mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-25 05:14:40 +01:00
Merge branch 'feature/OP-4859_cant-assign-shaders-to-the-ass-file' of https://github.com/tokejepsen/pype into feature/OP-4859_cant-assign-shaders-to-the-ass-file
This commit is contained in:
commit
e04523966f
194 changed files with 7026 additions and 2774 deletions
15
.github/pr-branch-labeler.yml
vendored
Normal file
15
.github/pr-branch-labeler.yml
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
# Apply label "feature" if head matches "feature/*"
|
||||
'type: feature':
|
||||
head: "feature/*"
|
||||
|
||||
# Apply label "feature" if head matches "feature/*"
|
||||
'type: enhancement':
|
||||
head: "enhancement/*"
|
||||
|
||||
# Apply label "bugfix" if head matches one of "bugfix/*" or "hotfix/*"
|
||||
'type: bug':
|
||||
head: ["bugfix/*", "hotfix/*"]
|
||||
|
||||
# Apply label "release" if base matches "release/*"
|
||||
'Bump Minor':
|
||||
base: "release/next-minor"
|
||||
102
.github/pr-glob-labeler.yml
vendored
Normal file
102
.github/pr-glob-labeler.yml
vendored
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
# Add type: unittest label if any changes in tests folders
|
||||
'type: unittest':
|
||||
- '*/*tests*/**/*'
|
||||
|
||||
# any changes in documentation structure
|
||||
'type: documentation':
|
||||
- '*/**/*website*/**/*'
|
||||
- '*/**/*docs*/**/*'
|
||||
|
||||
# hosts triage
|
||||
'host: Nuke':
|
||||
- '*/**/*nuke*'
|
||||
- '*/**/*nuke*/**/*'
|
||||
|
||||
'host: Photoshop':
|
||||
- '*/**/*photoshop*'
|
||||
- '*/**/*photoshop*/**/*'
|
||||
|
||||
'host: Harmony':
|
||||
- '*/**/*harmony*'
|
||||
- '*/**/*harmony*/**/*'
|
||||
|
||||
'host: UE':
|
||||
- '*/**/*unreal*'
|
||||
- '*/**/*unreal*/**/*'
|
||||
|
||||
'host: Houdini':
|
||||
- '*/**/*houdini*'
|
||||
- '*/**/*houdini*/**/*'
|
||||
|
||||
'host: Maya':
|
||||
- '*/**/*maya*'
|
||||
- '*/**/*maya*/**/*'
|
||||
|
||||
'host: Resolve':
|
||||
- '*/**/*resolve*'
|
||||
- '*/**/*resolve*/**/*'
|
||||
|
||||
'host: Blender':
|
||||
- '*/**/*blender*'
|
||||
- '*/**/*blender*/**/*'
|
||||
|
||||
'host: Hiero':
|
||||
- '*/**/*hiero*'
|
||||
- '*/**/*hiero*/**/*'
|
||||
|
||||
'host: Fusion':
|
||||
- '*/**/*fusion*'
|
||||
- '*/**/*fusion*/**/*'
|
||||
|
||||
'host: Flame':
|
||||
- '*/**/*flame*'
|
||||
- '*/**/*flame*/**/*'
|
||||
|
||||
'host: TrayPublisher':
|
||||
- '*/**/*traypublisher*'
|
||||
- '*/**/*traypublisher*/**/*'
|
||||
|
||||
'host: 3dsmax':
|
||||
- '*/**/*max*'
|
||||
- '*/**/*max*/**/*'
|
||||
|
||||
'host: TV Paint':
|
||||
- '*/**/*tvpaint*'
|
||||
- '*/**/*tvpaint*/**/*'
|
||||
|
||||
'host: CelAction':
|
||||
- '*/**/*celaction*'
|
||||
- '*/**/*celaction*/**/*'
|
||||
|
||||
'host: After Effects':
|
||||
- '*/**/*aftereffects*'
|
||||
- '*/**/*aftereffects*/**/*'
|
||||
|
||||
'host: Substance Painter':
|
||||
- '*/**/*substancepainter*'
|
||||
- '*/**/*substancepainter*/**/*'
|
||||
|
||||
# modules triage
|
||||
'module: Deadline':
|
||||
- '*/**/*deadline*'
|
||||
- '*/**/*deadline*/**/*'
|
||||
|
||||
'module: RoyalRender':
|
||||
- '*/**/*royalrender*'
|
||||
- '*/**/*royalrender*/**/*'
|
||||
|
||||
'module: Sitesync':
|
||||
- '*/**/*sync_server*'
|
||||
- '*/**/*sync_server*/**/*'
|
||||
|
||||
'module: Ftrack':
|
||||
- '*/**/*ftrack*'
|
||||
- '*/**/*ftrack*/**/*'
|
||||
|
||||
'module: Shotgrid':
|
||||
- '*/**/*shotgrid*'
|
||||
- '*/**/*shotgrid*/**/*'
|
||||
|
||||
'module: Kitsu':
|
||||
- '*/**/*kitsu*'
|
||||
- '*/**/*kitsu*/**/*'
|
||||
71
.github/workflows/project_actions.yml
vendored
Normal file
71
.github/workflows/project_actions.yml
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
name: project-actions
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, assigned, review_requested]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
pr_review_requested:
|
||||
name: pr_review_requested
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'pull_request_review' && github.event.review.state == 'changes_requested'
|
||||
steps:
|
||||
- name: Move PR to 'Change Requested'
|
||||
uses: leonsteinhaeuser/project-beta-automations@v2.1.0
|
||||
with:
|
||||
gh_token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
organization: ynput
|
||||
project_id: 11
|
||||
resource_node_id: ${{ github.event.pull_request.node_id }}
|
||||
status_value: Change Requested
|
||||
|
||||
size-label:
|
||||
name: pr_size_label
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
${{(github.event_name == 'pull_request' && github.event.action == 'synchronize')
|
||||
|| (github.event_name == 'pull_request' && github.event.action == 'assigned')}}
|
||||
|
||||
steps:
|
||||
- name: Add size label
|
||||
uses: "pascalgn/size-label-action@v0.4.3"
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}"
|
||||
IGNORED: ".gitignore\n*.md\n*.json"
|
||||
with:
|
||||
sizes: >
|
||||
{
|
||||
"0": "XS",
|
||||
"100": "S",
|
||||
"500": "M",
|
||||
"1000": "L",
|
||||
"1500": "XL",
|
||||
"2500": "XXL"
|
||||
}
|
||||
|
||||
label_prs_branch:
|
||||
name: pr_branch_label
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
${{(github.event_name == 'pull_request' && github.event.action == 'synchronize')
|
||||
|| (github.event_name == 'pull_request' && github.event.action == 'opened')}}
|
||||
steps:
|
||||
- name: Label PRs - Branch name detection
|
||||
uses: ffittschen/pr-branch-labeler@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
|
||||
label_prs_globe:
|
||||
name: pr_globe_label
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
${{(github.event_name == 'pull_request' && github.event.action == 'synchronize')
|
||||
|| (github.event_name == 'pull_request' && github.event.action == 'opened')}}
|
||||
steps:
|
||||
- name: Label PRs - Globe detection
|
||||
uses: actions/labeler@v4
|
||||
with:
|
||||
repo-token: ${{ secrets.YNPUT_BOT_TOKEN }}
|
||||
configuration-path: ".github/pr-glob-labeler.yml"
|
||||
77
ARCHITECTURE.md
Normal file
77
ARCHITECTURE.md
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
# Architecture
|
||||
|
||||
OpenPype is a monolithic Python project that bundles several parts, this document will try to give a birds eye overview of the project and, to a certain degree, each of the sub-projects.
|
||||
The current file structure looks like this:
|
||||
|
||||
```
|
||||
.
|
||||
├── common - Code in this folder is backend portion of Addon distribution logic for v4 server.
|
||||
├── docs - Documentation of the source code.
|
||||
├── igniter - The OpenPype bootstrapper, deals with running version resolution and setting up the connection to the mongodb.
|
||||
├── openpype - The actual OpenPype core package.
|
||||
├── schema - Collection of JSON files describing schematics of objects. This follows Avalon's convention.
|
||||
├── tests - Integration and unit tests.
|
||||
├── tools - Conveninece scripts to perform common actions (in both bash and ps1).
|
||||
├── vendor - When using the igniter, it deploys third party tools in here, such as ffmpeg.
|
||||
└── website - Source files for https://openpype.io/ which is Docusaursus (https://docusaurus.io/).
|
||||
```
|
||||
|
||||
The core functionality of the pipeline can be found in `igniter` and `openpype`, which in turn rely on the `schema` files, whenever you build (or download a pre-built) version of OpenPype, these two are bundled in there, and `Igniter` is the entry point.
|
||||
|
||||
|
||||
## Igniter
|
||||
|
||||
It's the setup and update tool for OpenPype, unless you want to package `openpype` separately and deal with all the config manually, this will most likely be your entry point.
|
||||
|
||||
```
|
||||
igniter/
|
||||
├── bootstrap_repos.py - Module that will find or install OpenPype versions in the system.
|
||||
├── __init__.py - Igniter entry point.
|
||||
├── install_dialog.py- Show dialog for choosing central pype repository.
|
||||
├── install_thread.py - Threading helpers for the install process.
|
||||
├── __main__.py - Like `__init__.py` ?
|
||||
├── message_dialog.py - Qt Dialog with a message and "Ok" button.
|
||||
├── nice_progress_bar.py - Fancy Qt progress bar.
|
||||
├── splash.txt - ASCII art for the terminal installer.
|
||||
├── stylesheet.css - Installer Qt styles.
|
||||
├── terminal_splash.py - Terminal installer animation, relies in `splash.txt`.
|
||||
├── tools.py - Collection of methods that don't fit in other modules.
|
||||
├── update_thread.py - Threading helper to update existing OpenPype installs.
|
||||
├── update_window.py - Qt UI to update OpenPype installs.
|
||||
├── user_settings.py - Interface for the OpenPype user settings.
|
||||
└── version.py - Igniter's version number.
|
||||
```
|
||||
|
||||
## OpenPype
|
||||
|
||||
This is the main package of the OpenPype logic, it could be loosely described as a combination of [Avalon](https://getavalon.github.io), [Pyblish](https://pyblish.com/) and glue around those with custom OpenPype only elements, things are in progress of being moved around to better prepare for V4, which will be released under a new name AYON.
|
||||
|
||||
```
|
||||
openpype/
|
||||
├── client - Interface for the MongoDB.
|
||||
├── hooks - Hooks to be executed on certain OpenPype Applications defined in `openpype.lib.applications`.
|
||||
├── host - Base class for the different hosts.
|
||||
├── hosts - Integration with the different DCCs (hosts) using the `host` base class.
|
||||
├── lib - Libraries that stitch together the package, some have been moved into other parts.
|
||||
├── modules - OpenPype modules should contain separated logic of specific kind of implementation, such as Ftrack connection and its python API.
|
||||
├── pipeline - Core of the OpenPype pipeline, handles creation of data, publishing, etc.
|
||||
├── plugins - Global/core plugins for loader and publisher tool.
|
||||
├── resources - Icons, fonts, etc.
|
||||
├── scripts - Loose scipts that get run by tools/publishers.
|
||||
├── settings - OpenPype settings interface.
|
||||
├── style - Qt styling.
|
||||
├── tests - Unit tests.
|
||||
├── tools - Core tools, check out https://openpype.io/docs/artist_tools.
|
||||
├── vendor - Vendoring of needed required Python packes.
|
||||
├── widgets - Common re-usable Qt Widgets.
|
||||
├── action.py - LEGACY: Lives now in `openpype.pipeline.publish.action` Pyblish actions.
|
||||
├── cli.py - Command line interface, leverages `click`.
|
||||
├── __init__.py - Sets two constants.
|
||||
├── __main__.py - Entry point, calls the `cli.py`
|
||||
├── plugin.py - Pyblish plugins.
|
||||
├── pype_commands.py - Implementation of OpenPype commands.
|
||||
└── version.py - Current version number.
|
||||
```
|
||||
|
||||
|
||||
|
||||
926
CHANGELOG.md
926
CHANGELOG.md
|
|
@ -1,5 +1,931 @@
|
|||
# Changelog
|
||||
|
||||
## [3.15.3](https://github.com/ynput/OpenPype/tree/3.15.3)
|
||||
|
||||
|
||||
[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.2...3.15.3)
|
||||
|
||||
### **🆕 New features**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Blender: Extract Review <a href="https://github.com/ynput/OpenPype/pull/3616">#3616</a></summary>
|
||||
|
||||
<strong>Added Review to Blender.
|
||||
|
||||
</strong>This implementation is based on #3508 but made compatible for the current implementation of OpenPype for Blender.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Data Exchanges: Point Cloud for 3dsMax <a href="https://github.com/ynput/OpenPype/pull/4532">#4532</a></summary>
|
||||
|
||||
<strong>Publish PRT format with tyFlow in 3dsmax
|
||||
|
||||
</strong>Publish PRT format with tyFlow in 3dsmax and possibly set up loader to load the format too.
|
||||
- [x] creator
|
||||
- [x] extractor
|
||||
- [x] validator
|
||||
- [x] loader
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Global: persistent staging directory for renders <a href="https://github.com/ynput/OpenPype/pull/4583">#4583</a></summary>
|
||||
|
||||
<strong>Allows configure if staging directory (`stagingDir`) should be persistent with use of profiles.
|
||||
|
||||
</strong>With this feature, users can specify a transient data folder path based on presets, which can be used during the creation and publishing stages. In some cases, these DCCs automatically add a rendering path during the creation stage, which is then used in publishing.One of the key advantages of this feature is that it allows users to take advantage of faster storages for rendering, which can help improve workflow efficiency. Additionally, this feature allows users to keep their rendered data persistent, and use their own infrastructure for regular cleaning.However, it should be noted that some productions may want to use this feature without persistency. Furthermore, there may be a need for retargeting the rendering folder to faster storages, which is also not supported at the moment.It is studio responsibility to clean up obsolete folders with data.Location of the folder is configured in `project_anatomy/templates/others`. ('transient' key is expected, with 'folder' key, could be more templates)Which family/task type/subset is applicable is configured in:`project_settings/global/tools/publish/transient_dir_profiles`
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Kitsu custom comment template <a href="https://github.com/ynput/OpenPype/pull/4599">#4599</a></summary>
|
||||
|
||||
Kitsu allows to write markdown in its comment field. This can be something very powerful to deliver dynamic comments with the help the data from the instance.This feature is defaults to off so the admin have to manually set up the comment field the way they want.I have added a basic example on how the comment can look like as the comment-fields default value.To this I want to add some documentation also but that's on its way when the code itself looks good for the reviewers.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>MaxScene Family <a href="https://github.com/ynput/OpenPype/pull/4615">#4615</a></summary>
|
||||
|
||||
Introduction of the Max Scene Family
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **🚀 Enhancements**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Multiple values on single render attribute - OP-4131 <a href="https://github.com/ynput/OpenPype/pull/4631">#4631</a></summary>
|
||||
|
||||
When validating render attributes, this adds support for multiple values. When repairing first value in list is used.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: enable 2D Pan/Zoom for playblasts - OP-5213 <a href="https://github.com/ynput/OpenPype/pull/4687">#4687</a></summary>
|
||||
|
||||
Setting for enabling 2D Pan/Zoom on reviews.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Copy existing or generate new Fusion profile on prelaunch <a href="https://github.com/ynput/OpenPype/pull/4572">#4572</a></summary>
|
||||
|
||||
<strong>Fusion preferences will be copied to the predefined `~/.openpype/hosts/fusion/prefs` folder (or any other folder set in system settings) on launch.
|
||||
|
||||
</strong>The idea is to create a copy of existing Fusion profile, adding an OpenPype menu to the Fusion instance.By default the copy setting is turned off, so no file copying is performed. Instead the clean Fusion profile is created by Fusion in the predefined folder. The default locaion is set to `~/.openpype/hosts/fusion/prefs`, to better comply with the other os platforms. After creating the default profile, some modifications are applied:
|
||||
- forced Python3
|
||||
- forced English interface
|
||||
- setup Openpype specific path maps.If the `copy_prefs` checkbox is toggled, a copy of existing Fusion profile folder will be placed in the mentioned location. Then they are altered the same way as described above. The operation is run only once, on the first launch, unless the `force_sync [Resync profile on each launch]` is toggled.English interface is forced because the `FUSION16_PROFILE_DIR` environment variable is not read otherwise (seems to be a Fusion bug).
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Houdini: Create button open new publisher's "create" tab <a href="https://github.com/ynput/OpenPype/pull/4601">#4601</a></summary>
|
||||
|
||||
During a talk with @maxpareschi he mentioned that the new publisher in Houdini felt super confusing due to "Create" going to the older creator but now being completely empty and the publish button directly went to the publish tab.This resolves that by fixing the Create button to now open the new publisher but on the Create tab.Also made publish button enforce going to the "publish" tab for consistency in usage.@antirotor I think changing the Create button's callback was just missed in this commit or was there a specific reason to not change that around yet?
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Clockify: refresh and fix the integration <a href="https://github.com/ynput/OpenPype/pull/4607">#4607</a></summary>
|
||||
|
||||
Due to recent API changes, Clockify requires `user_id` to operate with the timers. I updated this part and currently it is a WIP for making it fully functional. Most functions, such as start and stop timer, and projects sync are currently working. For the rate limiting task new dependency is added: https://pypi.org/project/ratelimiter/
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Fusion publish existing frames <a href="https://github.com/ynput/OpenPype/pull/4611">#4611</a></summary>
|
||||
|
||||
This PR adds the function to publish existing frames instead of having to re-render all of them for each new publish.I have split the render_locally plugin so the review-part is its own plugin now.I also change the saver-creator-plugin's label from Saver to Render (saver) as I intend to add a Prerender creator like in Nuke.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Resolution settings referenced from DB record for 3dsMax <a href="https://github.com/ynput/OpenPype/pull/4652">#4652</a></summary>
|
||||
|
||||
- Add Callback for setting the resolution according to DB after the new scene is created.
|
||||
- Add a new Action into openpype menu which allows the user to reset the resolution in 3dsMax
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>3dsmax: render instance settings in Publish tab <a href="https://github.com/ynput/OpenPype/pull/4658">#4658</a></summary>
|
||||
|
||||
Allows user preset the pools, group and use_published settings in Render Creator in the Max Hosts.User can set the settings before or after creating instance in the new publisher
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>scene length setting referenced from DB record for 3dsMax <a href="https://github.com/ynput/OpenPype/pull/4665">#4665</a></summary>
|
||||
|
||||
Setting the timeline length based on DB record in 3dsMax Hosts
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Windows reduce command window pop-ups during Publishing <a href="https://github.com/ynput/OpenPype/pull/4672">#4672</a></summary>
|
||||
|
||||
Reduce the command line pop-ups that show on Windows during publishing.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Explicit save <a href="https://github.com/ynput/OpenPype/pull/4676">#4676</a></summary>
|
||||
|
||||
Publisher have explicit button to save changes, so reset can happen without saving any changes. Save still happens automatically when publishing is started or on publisher window close. But a popup is shown if context of host has changed. Important context was enhanced by workfile path (if host integration supports it) so workfile changes are captured too. In that case a dialog with confirmation is shown to user. All callbacks that may require save of context were moved to main window to be able handle dialog show at one place. Save changes now returns success so the rest of logic is skipped -> publishing won't start, when save of instances fails.Save and reset buttons have shortcuts (Ctrl + s and Ctrls + r).
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>CelAction: conditional workfile parameters from settings <a href="https://github.com/ynput/OpenPype/pull/4677">#4677</a></summary>
|
||||
|
||||
Since some productions were requesting excluding some workfile parameters from publishing submission, we needed to move them to settings so those could be altered per project.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Improve logging of used app + tool envs on application launch <a href="https://github.com/ynput/OpenPype/pull/4682">#4682</a></summary>
|
||||
|
||||
Improve logging of what apps + tool environments got loaded for an application launch.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Fix name and docstring for Create Workdir Extra Folders prelaunch hook <a href="https://github.com/ynput/OpenPype/pull/4683">#4683</a></summary>
|
||||
|
||||
Fix class name and docstring for Create Workdir Extra Folders prelaunch hookThe class name and docstring were originally copied from another plug-in and didn't match the plug-in logic.This also fixes potentially seeing this twice in your logs. Before:After:Where it was actually running both this prelaunch hook and the actual `AddLastWorkfileToLaunchArgs` plugin.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Application launch context: Include app group name in logger <a href="https://github.com/ynput/OpenPype/pull/4684">#4684</a></summary>
|
||||
|
||||
Clarify in logs better what app group the ApplicationLaunchContext belongs to and what application is being launched.Before:After:
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>increment workfile version 3dsmax <a href="https://github.com/ynput/OpenPype/pull/4685">#4685</a></summary>
|
||||
|
||||
increment workfile version in 3dsmax as if in blender and maya hosts.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **🐛 Bug fixes**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Fix getting non-active model panel. <a href="https://github.com/ynput/OpenPype/pull/2968">#2968</a></summary>
|
||||
|
||||
<strong>When capturing multiple cameras with image planes that have file sequences playing, only the active (first) camera will play through the file sequence.
|
||||
|
||||
</strong>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Fix broken review publishing. <a href="https://github.com/ynput/OpenPype/pull/4549">#4549</a></summary>
|
||||
|
||||
<strong>Resolves #4547
|
||||
|
||||
</strong>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Avoid error on right click in Loader if `mtoa` is not loaded <a href="https://github.com/ynput/OpenPype/pull/4616">#4616</a></summary>
|
||||
|
||||
Fix an error on right clicking in the Loader when `mtoa` is not a loaded plug-in.Additionally if `mtoa` isn't loaded the loader will now load the plug-in before trying to create the arnold standin.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Fix extract look colorspace detection <a href="https://github.com/ynput/OpenPype/pull/4618">#4618</a></summary>
|
||||
|
||||
Fix the logic which guesses the colorspace using `arnold` python library.
|
||||
- Previously it'd error if `mtoa` was not available on path so it still required `mtoa` to be available.
|
||||
- The guessing colorspace logic doesn't actually require `mtoa` to be loaded, but just the `arnold` python library to be available. This changes the logic so it doesn't require the `mtoa` plugin to get loaded to guess the colorspace.
|
||||
- The if/else branch was likely not doing what was intended `cmds.loadPlugin("mtoa", quiet=True)` returns None if the plug-in was already loaded. So this would only ever be true if it ends up loading the `mtoa` plugin the first time.
|
||||
```python
|
||||
# Tested in Maya 2022.1
|
||||
print(cmds.loadPlugin("mtoa", quiet=True))
|
||||
# ['mtoa']
|
||||
print(cmds.loadPlugin("mtoa", quiet=True))
|
||||
# None
|
||||
```
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Maya Playblast Options overrides - OP-3847 <a href="https://github.com/ynput/OpenPype/pull/4634">#4634</a></summary>
|
||||
|
||||
When publishing a review in Maya, the extractor would fail due to wrong (long) panel name.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Bugfix/op 2834 fix extract playblast <a href="https://github.com/ynput/OpenPype/pull/4701">#4701</a></summary>
|
||||
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Bugfix/op 2834 fix extract playblast <a href="https://github.com/ynput/OpenPype/pull/4704">#4704</a></summary>
|
||||
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: bug fix for passing zoom settings if review is attached to subset <a href="https://github.com/ynput/OpenPype/pull/4716">#4716</a></summary>
|
||||
|
||||
Fix for attaching review to subset with pan/zoom option.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: tile assembly fail in draft - OP-4820 <a href="https://github.com/ynput/OpenPype/pull/4416">#4416</a></summary>
|
||||
|
||||
<strong>Tile assembly in Deadline was broken.
|
||||
|
||||
</strong>Initial bug report revealed other areas of the tile assembly that needed fixing.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Yeti Validate Rig Input - OP-3454 <a href="https://github.com/ynput/OpenPype/pull/4554">#4554</a></summary>
|
||||
|
||||
<strong>Fix Yeti Validate Rig Input
|
||||
|
||||
</strong>Existing workflow was broken due to this #3297.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Scene inventory: Fix code errors when "not found" entries are found <a href="https://github.com/ynput/OpenPype/pull/4594">#4594</a></summary>
|
||||
|
||||
Whenever a "NOT FOUND" entry is present a lot of errors happened in the Scene Inventory:
|
||||
- It started spamming a lot of errors for the VersionDelegate since it had no numeric version (no version at all).Error reported on Discord:
|
||||
```python
|
||||
Traceback (most recent call last):
|
||||
File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 65, in paint
|
||||
text = self.displayText(
|
||||
File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 33, in displayText
|
||||
assert isinstance(value, numbers.Integral), (
|
||||
AssertionError: Version is not integer. "None" <class 'NoneType'>
|
||||
```
|
||||
- Right click menu would error on NOT FOUND entries, and thus not show. With this PR it will now _disregard_ not found items for "Set version" and "Remove" but still allow actions.This PR resolves those.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Kitsu: Sync OP with zou, make sure value-data is int or float <a href="https://github.com/ynput/OpenPype/pull/4596">#4596</a></summary>
|
||||
|
||||
Currently the data zou pulls is a string and not a value causing some bugs in the pipe where a value is expected (like `Set frame range` in Fusion).
|
||||
|
||||
|
||||
|
||||
This PR makes sure each value is set with int() or float() so these bugs can't happen later on.
|
||||
|
||||
|
||||
|
||||
_(A request to cgwire has also bin sent to allow force values only for some metadata columns, but currently the user can enter what ever they want in there)_
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Max: fix the bug of removing an instance <a href="https://github.com/ynput/OpenPype/pull/4617">#4617</a></summary>
|
||||
|
||||
fix the bug of removing an instance in 3dsMax
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Global | Nuke: fixing farm publishing workflow <a href="https://github.com/ynput/OpenPype/pull/4623">#4623</a></summary>
|
||||
|
||||
After Nuke had adopted new publisher with new creators new issues were introduced. Those issues were addressed with this PR. Those are for example broken reviewable video files publishing if published via farm. Also fixed local publishing.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Ftrack: Ftrack additional families filtering <a href="https://github.com/ynput/OpenPype/pull/4633">#4633</a></summary>
|
||||
|
||||
Ftrack family collector makes sure the subset family is also in instance families for additional families filtering.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Ftrack: Hierarchical <> Non-Hierarchical attributes sync fix <a href="https://github.com/ynput/OpenPype/pull/4635">#4635</a></summary>
|
||||
|
||||
Sync between hierarchical and non-hierarchical attributes should be fixed and work as expected. Action should sync the values as expected and event handler should do it too and only on newly created entities.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>bugfix for 3dsmax publishing error <a href="https://github.com/ynput/OpenPype/pull/4637">#4637</a></summary>
|
||||
|
||||
fix the bug of failing publishing job in 3dsMax
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>General: Use right validation for ffmpeg executable <a href="https://github.com/ynput/OpenPype/pull/4640">#4640</a></summary>
|
||||
|
||||
Use ffmpeg exec validation for ffmpeg executables instead of oiio exec validation. The validation is used as last possible source of ffmpeg from `PATH` environment variables, which is an edge case but can cause issues.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>3dsmax: opening last workfile <a href="https://github.com/ynput/OpenPype/pull/4644">#4644</a></summary>
|
||||
|
||||
Supports opening last saved workfile in 3dsmax host.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Fixed a bug where a QThread in the splash screen could be destroyed before finishing execution <a href="https://github.com/ynput/OpenPype/pull/4647">#4647</a></summary>
|
||||
|
||||
This should fix the occasional behavior of the QThread being destroyed before even its worker returns from the `run()` function.After quiting, it should wait for the QThread object to properly close itself.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>General: Use right plugin class for Collect Comment <a href="https://github.com/ynput/OpenPype/pull/4653">#4653</a></summary>
|
||||
|
||||
Collect Comment plugin is instance plugin so should inherit from `InstancePlugin` instead of `ContextPlugin`.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Global: add tags field to thumbnail representation <a href="https://github.com/ynput/OpenPype/pull/4660">#4660</a></summary>
|
||||
|
||||
Thumbnail representation might be missing tags field.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Integrator: Enforce unique destination transfers, disallow overwrites in queued transfers <a href="https://github.com/ynput/OpenPype/pull/4662">#4662</a></summary>
|
||||
|
||||
Fix #4656 by enforcing unique destination transfers in the Integrator. It's now disallowed to a destination in the file transaction queue with a new source path during the publish.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Hiero: Creator with correct workfile numeric padding input <a href="https://github.com/ynput/OpenPype/pull/4666">#4666</a></summary>
|
||||
|
||||
Creator was showing 99 in workfile input for long time, even if users set default value to 1001 in studio settings. This has been fixed now.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Nuke: Nukenodes family instance without frame range <a href="https://github.com/ynput/OpenPype/pull/4669">#4669</a></summary>
|
||||
|
||||
No need to add frame range data into `nukenodes` (backdrop) family publishes - since those are timeless.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>TVPaint: Optional Validation plugins can be de/activated by user <a href="https://github.com/ynput/OpenPype/pull/4674">#4674</a></summary>
|
||||
|
||||
Added `OptionalPyblishPluginMixin` to TVpaint plugins that can be optional.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Kitsu: Slightly less strict with instance data <a href="https://github.com/ynput/OpenPype/pull/4678">#4678</a></summary>
|
||||
|
||||
- Allow to take task name from context if asset doesn't have any. Fixes an issue with Photoshop's review instance not having `task` in data.
|
||||
- Allow to match "review" against both `instance.data["family"]` and `instance.data["families"]` because some instances don't have the primary family in families, e.g. in Photoshop and TVPaint.
|
||||
- Do not error on Integrate Kitsu Review whenever for whatever reason Integrate Kitsu Note did not created a comment but just log the message that it was unable to connect a review.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Publisher: Fix reset shortcut sequence <a href="https://github.com/ynput/OpenPype/pull/4694">#4694</a></summary>
|
||||
|
||||
Fix bug created in https://github.com/ynput/OpenPype/pull/4676 where key sequence is checked using unsupported method. The check was changed to convert event into `QKeySequence` object which can be compared to prepared sequence.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Refactor _capture <a href="https://github.com/ynput/OpenPype/pull/4702">#4702</a></summary>
|
||||
|
||||
Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Hiero: correct container colors if UpToDate <a href="https://github.com/ynput/OpenPype/pull/4708">#4708</a></summary>
|
||||
|
||||
Colors on loaded containers are now correctly identifying real state of version. `Red` for out of date and `green` for up to date.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **🔀 Refactored code**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Look Assigner: Move Look Assigner tool since it's Maya only <a href="https://github.com/ynput/OpenPype/pull/4604">#4604</a></summary>
|
||||
|
||||
Fix #4357: Move Look Assigner tool to maya since it's Maya only
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Remove unused functions from Extract Look <a href="https://github.com/ynput/OpenPype/pull/4671">#4671</a></summary>
|
||||
|
||||
Remove unused functions from Maya Extract Look plug-in
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Extract Review code refactor <a href="https://github.com/ynput/OpenPype/pull/3930">#3930</a></summary>
|
||||
|
||||
<strong>Trying to reduce complexity of Extract Review plug-in
|
||||
- Re-use profile filtering from lib
|
||||
- Remove "combination families" additional filtering which supposedly was from OP v2
|
||||
- Simplify 'formatting' for filling gaps
|
||||
- Use `legacy_io.Session` over `os.environ`
|
||||
|
||||
</strong>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Replace last usages of Qt module <a href="https://github.com/ynput/OpenPype/pull/4610">#4610</a></summary>
|
||||
|
||||
Replace last usage of `Qt` module with `qtpy`. This change is needed for `PySide6` support. All changes happened in Maya loader plugins.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Update tests and documentation for `ColormanagedPyblishPluginMixin` <a href="https://github.com/ynput/OpenPype/pull/4612">#4612</a></summary>
|
||||
|
||||
Refactor `ExtractorColormanaged` to `ColormanagedPyblishPluginMixin` in tests and documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Improve logging of used app + tool envs on application launch (minor tweak) <a href="https://github.com/ynput/OpenPype/pull/4686">#4686</a></summary>
|
||||
|
||||
Use `app.full_name` for change done in #4682
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **📃 Documentation**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Docs/add architecture document <a href="https://github.com/ynput/OpenPype/pull/4344">#4344</a></summary>
|
||||
|
||||
<strong>Add `ARCHITECTURE.md` document.
|
||||
|
||||
</strong>his document attemps to give a quick overview of the project to help onboarding, it's not an extensive documentation but more of a elevator pitch one-line descriptions of files/directories and what the attempt to do.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Documentation: Tweak grammar and fix some typos <a href="https://github.com/ynput/OpenPype/pull/4613">#4613</a></summary>
|
||||
|
||||
This resolves some grammar and typos in the documentation.Also fixes the extension of some images in after effects docs which used uppercase extension even though files were lowercase extension.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Docs: Fix some minor grammar/typos <a href="https://github.com/ynput/OpenPype/pull/4680">#4680</a></summary>
|
||||
|
||||
Typo/grammar fixes in documentation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
### **Merged pull requests**
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Implement image file node loader <a href="https://github.com/ynput/OpenPype/pull/4313">#4313</a></summary>
|
||||
|
||||
<strong>Implements a loader for loading texture image into a `file` node in Maya.
|
||||
|
||||
</strong>Similar to Maya's hypershade creation of textures on load you have the option to choose for three modes of creating:
|
||||
- Texture
|
||||
- Projection
|
||||
- StencilThese should match what Maya generates if you create those in Maya.
|
||||
- [x] Load and manage file nodes
|
||||
- [x] Apply color spaces after #4195
|
||||
- [x] Support for _either_ UDIM or image sequence - currently it seems to always load sequences as UDIM automatically.
|
||||
- [ ] Add support for animation sequences of UDIM textures using the `<f>.<udim>.exr` path format?
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya Look Assigner: Don't rely on containers for get all assets <a href="https://github.com/ynput/OpenPype/pull/4600">#4600</a></summary>
|
||||
|
||||
This resolves #4044 by not actually relying on containers in the scene but instead just rely on finding nodes with `cbId` attributes. As such, imported nodes would also be found and a shader can be assigned (similar to when using get from selection).**Please take into consideration the potential downsides below**Potential downsides would be:
|
||||
- IF an already loaded look has any dagNodes, say a 3D Projection node - then that will also show up as a loaded asset where previously nodes from loaded looks were ignored.
|
||||
- If any dag nodes were created locally - they would have gotten `cbId` attributes on scene save and thus the current asset would almost always show?
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Unify menu labels for "Set Frame Range" and "Set Resolution" <a href="https://github.com/ynput/OpenPype/pull/4605">#4605</a></summary>
|
||||
|
||||
Fix #4109: Unify menu labels for "Set Frame Range" and "Set Resolution"This also tweaks it in Houdini from Reset Frame Range to Set Frame Range.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Resolve missing OPENPYPE_MONGO in deadline global job preload <a href="https://github.com/ynput/OpenPype/pull/4484">#4484</a></summary>
|
||||
|
||||
<strong>In the GlobalJobPreLoad plugin, we propose to replace the SpawnProcess by a sub-process and to pass the environment variables in the parameters, since the SpawnProcess under Centos Linux does not pass the environment variables.
|
||||
|
||||
</strong>In the GlobalJobPreLoad plugin, the Deadline SpawnProcess is used to start the OpenPype process. The problem is that the SpawnProcess does not pass environment variables, including OPENPYPE_MONGO, to the process when it is under Centos7 linux, and the process gets stuck. We propose to replace it by a subprocess and to pass the variable in the parameters.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Tests: Added setup_only to tests <a href="https://github.com/ynput/OpenPype/pull/4591">#4591</a></summary>
|
||||
|
||||
Allows to download test zip, unzip and restore DB in preparation for new test.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Arnold don't reset maya timeline frame range on render creation (or setting render settings) <a href="https://github.com/ynput/OpenPype/pull/4603">#4603</a></summary>
|
||||
|
||||
Fix #4429: Do not reset fps or playback timeline on applying or creating render settings
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Bump @sideway/formula from 3.0.0 to 3.0.1 in /website <a href="https://github.com/ynput/OpenPype/pull/4609">#4609</a></summary>
|
||||
|
||||
Bumps [@sideway/formula](https://github.com/sideway/formula) from 3.0.0 to 3.0.1.
|
||||
<details>
|
||||
<summary>Commits</summary>
|
||||
<ul>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/5b44c1bffc38135616fb91d5ad46eaf64f03d23b"><code>5b44c1b</code></a> 3.0.1</li>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/9fbc20a02d75ae809c37a610a57802cd1b41b3fe"><code>9fbc20a</code></a> chore: better number regex</li>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/41ae98e0421913b100886adb0107a25d552d9e1a"><code>41ae98e</code></a> Cleanup</li>
|
||||
<li><a href="https://github.com/hapijs/formula/commit/c59f35ec401e18cead10e0cedfb44291517610b1"><code>c59f35e</code></a> Move to Sideway</li>
|
||||
<li>See full diff in <a href="https://github.com/sideway/formula/compare/v3.0.0...v3.0.1">compare view</a></li>
|
||||
</ul>
|
||||
</details>
|
||||
<details>
|
||||
<summary>Maintainer changes</summary>
|
||||
<p>This version was pushed to npm by <a href="https://www.npmjs.com/~marsup">marsup</a>, a new releaser for <code>@sideway/formula</code> since your current version.</p>
|
||||
</details>
|
||||
<br />
|
||||
|
||||
|
||||
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
|
||||
|
||||
Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.
|
||||
|
||||
[//]: # (dependabot-automerge-start)
|
||||
[//]: # (dependabot-automerge-end)
|
||||
|
||||
---
|
||||
|
||||
<details>
|
||||
<summary>Dependabot commands and options</summary>
|
||||
<br />
|
||||
|
||||
You can trigger Dependabot actions by commenting on this PR:
|
||||
- `@dependabot rebase` will rebase this PR
|
||||
- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it
|
||||
- `@dependabot merge` will merge this PR after your CI passes on it
|
||||
- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it
|
||||
- `@dependabot cancel merge` will cancel a previously requested merge and block automerging
|
||||
- `@dependabot reopen` will reopen this PR if it is closed
|
||||
- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually
|
||||
- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)
|
||||
- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)
|
||||
- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
|
||||
- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language
|
||||
- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language
|
||||
- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language
|
||||
- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language
|
||||
|
||||
You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts).
|
||||
|
||||
</details>
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Update artist_hosts_maya_arnold.md <a href="https://github.com/ynput/OpenPype/pull/4626">#4626</a></summary>
|
||||
|
||||
Correct Arnold docs.
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Maya: Add "Include Parent Hierarchy" option in animation creator plugin <a href="https://github.com/ynput/OpenPype/pull/4645">#4645</a></summary>
|
||||
|
||||
Add an option in Project Settings > Maya > Creator Plugins > Create Animation to include (or not) parent hierarchy. This is to avoid artists to check manually the option for all create animation.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>General: Filter available applications <a href="https://github.com/ynput/OpenPype/pull/4667">#4667</a></summary>
|
||||
|
||||
Added option to filter applications that don't have valid executable available in settings in launcher and ftrack actions. This option can be disabled in new settings category `Applications`. The filtering is by default disabled.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>3dsmax: make sure that startup script executes <a href="https://github.com/ynput/OpenPype/pull/4695">#4695</a></summary>
|
||||
|
||||
Fixing reliability of OpenPype startup in 3dsmax.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Project Manager: Change minimum frame start/end to '0' <a href="https://github.com/ynput/OpenPype/pull/4719">#4719</a></summary>
|
||||
|
||||
Project manager can have frame start/end set to `0`.
|
||||
|
||||
|
||||
___
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
## [3.15.2](https://github.com/ynput/OpenPype/tree/3.15.2)
|
||||
|
||||
[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.1...3.15.2)
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
|||
# Execute after workfile template copy
|
||||
order = 10
|
||||
app_groups = [
|
||||
"3dsmax",
|
||||
"maya",
|
||||
"nuke",
|
||||
"nukex",
|
||||
|
|
|
|||
|
|
@ -3,10 +3,13 @@ from openpype.lib import PreLaunchHook
|
|||
from openpype.pipeline.workfile import create_workdir_extra_folders
|
||||
|
||||
|
||||
class AddLastWorkfileToLaunchArgs(PreLaunchHook):
|
||||
"""Add last workfile path to launch arguments.
|
||||
class CreateWorkdirExtraFolders(PreLaunchHook):
|
||||
"""Create extra folders for the work directory.
|
||||
|
||||
Based on setting `project_settings/global/tools/Workfiles/extra_folders`
|
||||
profile filtering will decide whether extra folders need to be created in
|
||||
the work directory.
|
||||
|
||||
This is not possible to do for all applications the same way.
|
||||
"""
|
||||
|
||||
# Execute after workfile template copy
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
Nuke is executed "like" python process so it is required to pass
|
||||
`CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console.
|
||||
At the same time the newly created console won't create it's own stdout
|
||||
At the same time the newly created console won't create its own stdout
|
||||
and stderr handlers so they should not be redirected to DEVNULL.
|
||||
"""
|
||||
|
||||
|
|
@ -18,7 +18,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook):
|
|||
|
||||
def execute(self):
|
||||
# Change `creationflags` to CREATE_NEW_CONSOLE
|
||||
# - on Windows will nuke create new window using it's console
|
||||
# - on Windows nuke will create new window using its console
|
||||
# Set `stdout` and `stderr` to None so new created console does not
|
||||
# have redirected output to DEVNULL in build
|
||||
self.launch_context.kwargs.update({
|
||||
|
|
|
|||
|
|
@ -31,10 +31,13 @@ from .lib import (
|
|||
lsattrs,
|
||||
read,
|
||||
maintained_selection,
|
||||
maintained_time,
|
||||
get_selection,
|
||||
# unique_name,
|
||||
)
|
||||
|
||||
from .capture import capture
|
||||
|
||||
|
||||
__all__ = [
|
||||
"install",
|
||||
|
|
@ -56,9 +59,11 @@ __all__ = [
|
|||
|
||||
# Utility functions
|
||||
"maintained_selection",
|
||||
"maintained_time",
|
||||
"lsattr",
|
||||
"lsattrs",
|
||||
"read",
|
||||
"get_selection",
|
||||
"capture",
|
||||
# "unique_name",
|
||||
]
|
||||
|
|
|
|||
278
openpype/hosts/blender/api/capture.py
Normal file
278
openpype/hosts/blender/api/capture.py
Normal file
|
|
@ -0,0 +1,278 @@
|
|||
|
||||
"""Blender Capture
|
||||
Playblasting with independent viewport, camera and display options
|
||||
"""
|
||||
import contextlib
|
||||
import bpy
|
||||
|
||||
from .lib import maintained_time
|
||||
from .plugin import deselect_all, create_blender_context
|
||||
|
||||
|
||||
def capture(
|
||||
camera=None,
|
||||
width=None,
|
||||
height=None,
|
||||
filename=None,
|
||||
start_frame=None,
|
||||
end_frame=None,
|
||||
step_frame=None,
|
||||
sound=None,
|
||||
isolate=None,
|
||||
maintain_aspect_ratio=True,
|
||||
overwrite=False,
|
||||
image_settings=None,
|
||||
display_options=None
|
||||
):
|
||||
"""Playblast in an independent windows
|
||||
Arguments:
|
||||
camera (str, optional): Name of camera, defaults to "Camera"
|
||||
width (int, optional): Width of output in pixels
|
||||
height (int, optional): Height of output in pixels
|
||||
filename (str, optional): Name of output file path. Defaults to current
|
||||
render output path.
|
||||
start_frame (int, optional): Defaults to current start frame.
|
||||
end_frame (int, optional): Defaults to current end frame.
|
||||
step_frame (int, optional): Defaults to 1.
|
||||
sound (str, optional): Specify the sound node to be used during
|
||||
playblast. When None (default) no sound will be used.
|
||||
isolate (list): List of nodes to isolate upon capturing
|
||||
maintain_aspect_ratio (bool, optional): Modify height in order to
|
||||
maintain aspect ratio.
|
||||
overwrite (bool, optional): Whether or not to overwrite if file
|
||||
already exists. If disabled and file exists and error will be
|
||||
raised.
|
||||
image_settings (dict, optional): Supplied image settings for render,
|
||||
using `ImageSettings`
|
||||
display_options (dict, optional): Supplied display options for render
|
||||
"""
|
||||
|
||||
scene = bpy.context.scene
|
||||
camera = camera or "Camera"
|
||||
|
||||
# Ensure camera exists.
|
||||
if camera not in scene.objects and camera != "AUTO":
|
||||
raise RuntimeError("Camera does not exist: {0}".format(camera))
|
||||
|
||||
# Ensure resolution.
|
||||
if width and height:
|
||||
maintain_aspect_ratio = False
|
||||
width = width or scene.render.resolution_x
|
||||
height = height or scene.render.resolution_y
|
||||
if maintain_aspect_ratio:
|
||||
ratio = scene.render.resolution_x / scene.render.resolution_y
|
||||
height = round(width / ratio)
|
||||
|
||||
# Get frame range.
|
||||
if start_frame is None:
|
||||
start_frame = scene.frame_start
|
||||
if end_frame is None:
|
||||
end_frame = scene.frame_end
|
||||
if step_frame is None:
|
||||
step_frame = 1
|
||||
frame_range = (start_frame, end_frame, step_frame)
|
||||
|
||||
if filename is None:
|
||||
filename = scene.render.filepath
|
||||
|
||||
render_options = {
|
||||
"filepath": "{}.".format(filename.rstrip(".")),
|
||||
"resolution_x": width,
|
||||
"resolution_y": height,
|
||||
"use_overwrite": overwrite,
|
||||
}
|
||||
|
||||
with _independent_window() as window:
|
||||
|
||||
applied_view(window, camera, isolate, options=display_options)
|
||||
|
||||
with contextlib.ExitStack() as stack:
|
||||
stack.enter_context(maintain_camera(window, camera))
|
||||
stack.enter_context(applied_frame_range(window, *frame_range))
|
||||
stack.enter_context(applied_render_options(window, render_options))
|
||||
stack.enter_context(applied_image_settings(window, image_settings))
|
||||
stack.enter_context(maintained_time())
|
||||
|
||||
bpy.ops.render.opengl(
|
||||
animation=True,
|
||||
render_keyed_only=False,
|
||||
sequencer=False,
|
||||
write_still=False,
|
||||
view_context=True
|
||||
)
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
ImageSettings = {
|
||||
"file_format": "FFMPEG",
|
||||
"color_mode": "RGB",
|
||||
"ffmpeg": {
|
||||
"format": "QUICKTIME",
|
||||
"use_autosplit": False,
|
||||
"codec": "H264",
|
||||
"constant_rate_factor": "MEDIUM",
|
||||
"gopsize": 18,
|
||||
"use_max_b_frames": False,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def isolate_objects(window, objects):
|
||||
"""Isolate selection"""
|
||||
deselect_all()
|
||||
|
||||
for obj in objects:
|
||||
obj.select_set(True)
|
||||
|
||||
context = create_blender_context(selected=objects, window=window)
|
||||
|
||||
bpy.ops.view3d.view_axis(context, type="FRONT")
|
||||
bpy.ops.view3d.localview(context)
|
||||
|
||||
deselect_all()
|
||||
|
||||
|
||||
def _apply_options(entity, options):
|
||||
for option, value in options.items():
|
||||
if isinstance(value, dict):
|
||||
_apply_options(getattr(entity, option), value)
|
||||
else:
|
||||
setattr(entity, option, value)
|
||||
|
||||
|
||||
def applied_view(window, camera, isolate=None, options=None):
|
||||
"""Apply view options to window."""
|
||||
area = window.screen.areas[0]
|
||||
space = area.spaces[0]
|
||||
|
||||
area.ui_type = "VIEW_3D"
|
||||
|
||||
meshes = [obj for obj in window.scene.objects if obj.type == "MESH"]
|
||||
|
||||
if camera == "AUTO":
|
||||
space.region_3d.view_perspective = "ORTHO"
|
||||
isolate_objects(window, isolate or meshes)
|
||||
else:
|
||||
isolate_objects(window, isolate or meshes)
|
||||
space.camera = window.scene.objects.get(camera)
|
||||
space.region_3d.view_perspective = "CAMERA"
|
||||
|
||||
if isinstance(options, dict):
|
||||
_apply_options(space, options)
|
||||
else:
|
||||
space.shading.type = "SOLID"
|
||||
space.shading.color_type = "MATERIAL"
|
||||
space.show_gizmo = False
|
||||
space.overlay.show_overlays = False
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def applied_frame_range(window, start, end, step):
|
||||
"""Context manager for setting frame range."""
|
||||
# Store current frame range
|
||||
current_frame_start = window.scene.frame_start
|
||||
current_frame_end = window.scene.frame_end
|
||||
current_frame_step = window.scene.frame_step
|
||||
# Apply frame range
|
||||
window.scene.frame_start = start
|
||||
window.scene.frame_end = end
|
||||
window.scene.frame_step = step
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Restore frame range
|
||||
window.scene.frame_start = current_frame_start
|
||||
window.scene.frame_end = current_frame_end
|
||||
window.scene.frame_step = current_frame_step
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def applied_render_options(window, options):
|
||||
"""Context manager for setting render options."""
|
||||
render = window.scene.render
|
||||
|
||||
# Store current settings
|
||||
original = {}
|
||||
for opt in options.copy():
|
||||
try:
|
||||
original[opt] = getattr(render, opt)
|
||||
except ValueError:
|
||||
options.pop(opt)
|
||||
|
||||
# Apply settings
|
||||
_apply_options(render, options)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Restore previous settings
|
||||
_apply_options(render, original)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def applied_image_settings(window, options):
|
||||
"""Context manager to override image settings."""
|
||||
|
||||
options = options or ImageSettings.copy()
|
||||
ffmpeg = options.pop("ffmpeg", {})
|
||||
render = window.scene.render
|
||||
|
||||
# Store current image settings
|
||||
original = {}
|
||||
for opt in options.copy():
|
||||
try:
|
||||
original[opt] = getattr(render.image_settings, opt)
|
||||
except ValueError:
|
||||
options.pop(opt)
|
||||
|
||||
# Store current ffmpeg settings
|
||||
original_ffmpeg = {}
|
||||
for opt in ffmpeg.copy():
|
||||
try:
|
||||
original_ffmpeg[opt] = getattr(render.ffmpeg, opt)
|
||||
except ValueError:
|
||||
ffmpeg.pop(opt)
|
||||
|
||||
# Apply image settings
|
||||
for opt, value in options.items():
|
||||
setattr(render.image_settings, opt, value)
|
||||
|
||||
# Apply ffmpeg settings
|
||||
for opt, value in ffmpeg.items():
|
||||
setattr(render.ffmpeg, opt, value)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Restore previous settings
|
||||
for opt, value in original.items():
|
||||
setattr(render.image_settings, opt, value)
|
||||
for opt, value in original_ffmpeg.items():
|
||||
setattr(render.ffmpeg, opt, value)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintain_camera(window, camera):
|
||||
"""Context manager to override camera."""
|
||||
current_camera = window.scene.camera
|
||||
if camera in window.scene.objects:
|
||||
window.scene.camera = window.scene.objects.get(camera)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
window.scene.camera = current_camera
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _independent_window():
|
||||
"""Create capture-window context."""
|
||||
context = create_blender_context()
|
||||
current_windows = set(bpy.context.window_manager.windows)
|
||||
bpy.ops.wm.window_new(context)
|
||||
window = list(set(bpy.context.window_manager.windows) - current_windows)[0]
|
||||
context["window"] = window
|
||||
try:
|
||||
yield window
|
||||
finally:
|
||||
bpy.ops.wm.window_close(context)
|
||||
|
|
@ -284,3 +284,13 @@ def maintained_selection():
|
|||
# This could happen if the active node was deleted during the
|
||||
# context.
|
||||
log.exception("Failed to set active object.")
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_time():
|
||||
"""Maintain current frame during context."""
|
||||
current_time = bpy.context.scene.frame_current
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
bpy.context.scene.frame_current = current_time
|
||||
|
|
|
|||
|
|
@ -84,11 +84,11 @@ class MainThreadItem:
|
|||
self.kwargs = kwargs
|
||||
|
||||
def execute(self):
|
||||
"""Execute callback and store it's result.
|
||||
"""Execute callback and store its result.
|
||||
|
||||
Method must be called from main thread. Item is marked as `done`
|
||||
when callback execution finished. Store output of callback of exception
|
||||
information when callback raise one.
|
||||
information when callback raises one.
|
||||
"""
|
||||
print("Executing process in main thread")
|
||||
if self.done:
|
||||
|
|
|
|||
|
|
@ -62,7 +62,8 @@ def prepare_data(data, container_name=None):
|
|||
|
||||
|
||||
def create_blender_context(active: Optional[bpy.types.Object] = None,
|
||||
selected: Optional[bpy.types.Object] = None,):
|
||||
selected: Optional[bpy.types.Object] = None,
|
||||
window: Optional[bpy.types.Window] = None):
|
||||
"""Create a new Blender context. If an object is passed as
|
||||
parameter, it is set as selected and active.
|
||||
"""
|
||||
|
|
@ -72,7 +73,9 @@ def create_blender_context(active: Optional[bpy.types.Object] = None,
|
|||
|
||||
override_context = bpy.context.copy()
|
||||
|
||||
for win in bpy.context.window_manager.windows:
|
||||
windows = [window] if window else bpy.context.window_manager.windows
|
||||
|
||||
for win in windows:
|
||||
for area in win.screen.areas:
|
||||
if area.type == 'VIEW_3D':
|
||||
for region in area.regions:
|
||||
|
|
|
|||
47
openpype/hosts/blender/plugins/create/create_review.py
Normal file
47
openpype/hosts/blender/plugins/create/create_review.py
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
"""Create review."""
|
||||
|
||||
import bpy
|
||||
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.blender.api import plugin, lib, ops
|
||||
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
|
||||
|
||||
|
||||
class CreateReview(plugin.Creator):
|
||||
"""Single baked camera"""
|
||||
|
||||
name = "reviewDefault"
|
||||
label = "Review"
|
||||
family = "review"
|
||||
icon = "video-camera"
|
||||
|
||||
def process(self):
|
||||
""" Run the creator on Blender main thread"""
|
||||
mti = ops.MainThreadItem(self._process)
|
||||
ops.execute_in_main_thread(mti)
|
||||
|
||||
def _process(self):
|
||||
# Get Instance Container or create it if it does not exist
|
||||
instances = bpy.data.collections.get(AVALON_INSTANCES)
|
||||
if not instances:
|
||||
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
|
||||
bpy.context.scene.collection.children.link(instances)
|
||||
|
||||
# Create instance object
|
||||
asset = self.data["asset"]
|
||||
subset = self.data["subset"]
|
||||
name = plugin.asset_name(asset, subset)
|
||||
asset_group = bpy.data.collections.new(name=name)
|
||||
instances.children.link(asset_group)
|
||||
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
|
||||
lib.imprint(asset_group, self.data)
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
selected = lib.get_selection()
|
||||
for obj in selected:
|
||||
asset_group.objects.link(obj)
|
||||
elif (self.options or {}).get("asset_group"):
|
||||
obj = (self.options or {}).get("asset_group")
|
||||
asset_group.objects.link(obj)
|
||||
|
||||
return asset_group
|
||||
64
openpype/hosts/blender/plugins/publish/collect_review.py
Normal file
64
openpype/hosts/blender/plugins/publish/collect_review.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import bpy
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin):
|
||||
"""Collect Review data
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.3
|
||||
label = "Collect Review Data"
|
||||
families = ["review"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.debug(f"instance: {instance}")
|
||||
|
||||
# get cameras
|
||||
cameras = [
|
||||
obj
|
||||
for obj in instance
|
||||
if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA"
|
||||
]
|
||||
|
||||
assert len(cameras) == 1, (
|
||||
f"Not a single camera found in extraction: {cameras}"
|
||||
)
|
||||
camera = cameras[0].name
|
||||
self.log.debug(f"camera: {camera}")
|
||||
|
||||
# get isolate objects list from meshes instance members .
|
||||
isolate_objects = [
|
||||
obj
|
||||
for obj in instance
|
||||
if isinstance(obj, bpy.types.Object) and obj.type == "MESH"
|
||||
]
|
||||
|
||||
if not instance.data.get("remove"):
|
||||
|
||||
task = legacy_io.Session.get("AVALON_TASK")
|
||||
|
||||
instance.data.update({
|
||||
"subset": f"{task}Review",
|
||||
"review_camera": camera,
|
||||
"frameStart": instance.context.data["frameStart"],
|
||||
"frameEnd": instance.context.data["frameEnd"],
|
||||
"fps": instance.context.data["fps"],
|
||||
"isolate": isolate_objects,
|
||||
})
|
||||
|
||||
self.log.debug(f"instance data: {instance.data}")
|
||||
|
||||
# TODO : Collect audio
|
||||
audio_tracks = []
|
||||
instance.data["audio"] = []
|
||||
for track in audio_tracks:
|
||||
instance.data["audio"].append(
|
||||
{
|
||||
"offset": track.offset.get(),
|
||||
"filename": track.filename.get(),
|
||||
}
|
||||
)
|
||||
123
openpype/hosts/blender/plugins/publish/extract_playblast.py
Normal file
123
openpype/hosts/blender/plugins/publish/extract_playblast.py
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
import os
|
||||
import clique
|
||||
|
||||
import bpy
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.blender.api import capture
|
||||
from openpype.hosts.blender.api.lib import maintained_time
|
||||
|
||||
|
||||
class ExtractPlayblast(publish.Extractor):
|
||||
"""
|
||||
Extract viewport playblast.
|
||||
|
||||
Takes review camera and creates review Quicktime video based on viewport
|
||||
capture.
|
||||
"""
|
||||
|
||||
label = "Extract Playblast"
|
||||
hosts = ["blender"]
|
||||
families = ["review"]
|
||||
optional = True
|
||||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
|
||||
self.log.info(instance.data)
|
||||
|
||||
# get scene fps
|
||||
fps = instance.data.get("fps")
|
||||
if fps is None:
|
||||
fps = bpy.context.scene.render.fps
|
||||
instance.data["fps"] = fps
|
||||
|
||||
self.log.info(f"fps: {fps}")
|
||||
|
||||
# If start and end frames cannot be determined,
|
||||
# get them from Blender timeline.
|
||||
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
|
||||
end = instance.data.get("frameEnd", bpy.context.scene.frame_end)
|
||||
|
||||
self.log.info(f"start: {start}, end: {end}")
|
||||
assert end > start, "Invalid time range !"
|
||||
|
||||
# get cameras
|
||||
camera = instance.data("review_camera", None)
|
||||
|
||||
# get isolate objects list
|
||||
isolate = instance.data("isolate", None)
|
||||
|
||||
# get ouput path
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = instance.name
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
self.log.info(f"Outputting images to {path}")
|
||||
|
||||
project_settings = instance.context.data["project_settings"]["blender"]
|
||||
presets = project_settings["publish"]["ExtractPlayblast"]["presets"]
|
||||
preset = presets.get("default")
|
||||
preset.update({
|
||||
"camera": camera,
|
||||
"start_frame": start,
|
||||
"end_frame": end,
|
||||
"filename": path,
|
||||
"overwrite": True,
|
||||
"isolate": isolate,
|
||||
})
|
||||
preset.setdefault(
|
||||
"image_settings",
|
||||
{
|
||||
"file_format": "PNG",
|
||||
"color_mode": "RGB",
|
||||
"color_depth": "8",
|
||||
"compression": 15,
|
||||
},
|
||||
)
|
||||
|
||||
with maintained_time():
|
||||
path = capture(**preset)
|
||||
|
||||
self.log.debug(f"playblast path {path}")
|
||||
|
||||
collected_files = os.listdir(stagingdir)
|
||||
collections, remainder = clique.assemble(
|
||||
collected_files,
|
||||
patterns=[f"{filename}\\.{clique.DIGITS_PATTERN}\\.png$"],
|
||||
)
|
||||
|
||||
if len(collections) > 1:
|
||||
raise RuntimeError(
|
||||
f"More than one collection found in stagingdir: {stagingdir}"
|
||||
)
|
||||
elif len(collections) == 0:
|
||||
raise RuntimeError(
|
||||
f"No collection found in stagingdir: {stagingdir}"
|
||||
)
|
||||
|
||||
frame_collection = collections[0]
|
||||
|
||||
self.log.info(f"We found collection of interest {frame_collection}")
|
||||
|
||||
instance.data.setdefault("representations", [])
|
||||
|
||||
tags = ["review"]
|
||||
if not instance.data.get("keepImages"):
|
||||
tags.append("delete")
|
||||
|
||||
representation = {
|
||||
"name": "png",
|
||||
"ext": "png",
|
||||
"files": list(frame_collection),
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"preview": True,
|
||||
"tags": tags,
|
||||
"camera_name": camera
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
99
openpype/hosts/blender/plugins/publish/extract_thumbnail.py
Normal file
99
openpype/hosts/blender/plugins/publish/extract_thumbnail.py
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
import os
|
||||
import glob
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.blender.api import capture
|
||||
from openpype.hosts.blender.api.lib import maintained_time
|
||||
|
||||
import bpy
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""Extract viewport thumbnail.
|
||||
|
||||
Takes review camera and creates a thumbnail based on viewport
|
||||
capture.
|
||||
|
||||
"""
|
||||
|
||||
label = "Extract Thumbnail"
|
||||
hosts = ["blender"]
|
||||
families = ["review"]
|
||||
order = pyblish.api.ExtractorOrder + 0.01
|
||||
presets = {}
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = instance.name
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
self.log.info(f"Outputting images to {path}")
|
||||
|
||||
camera = instance.data.get("review_camera", "AUTO")
|
||||
start = instance.data.get("frameStart", bpy.context.scene.frame_start)
|
||||
family = instance.data.get("family")
|
||||
isolate = instance.data("isolate", None)
|
||||
|
||||
preset = self.presets.get(family, {})
|
||||
|
||||
preset.update({
|
||||
"camera": camera,
|
||||
"start_frame": start,
|
||||
"end_frame": start,
|
||||
"filename": path,
|
||||
"overwrite": True,
|
||||
"isolate": isolate,
|
||||
})
|
||||
preset.setdefault(
|
||||
"image_settings",
|
||||
{
|
||||
"file_format": "JPEG",
|
||||
"color_mode": "RGB",
|
||||
"quality": 100,
|
||||
},
|
||||
)
|
||||
|
||||
with maintained_time():
|
||||
path = capture(**preset)
|
||||
|
||||
thumbnail = os.path.basename(self._fix_output_path(path))
|
||||
|
||||
self.log.info(f"thumbnail: {thumbnail}")
|
||||
|
||||
instance.data.setdefault("representations", [])
|
||||
|
||||
representation = {
|
||||
"name": "thumbnail",
|
||||
"ext": "jpg",
|
||||
"files": thumbnail,
|
||||
"stagingDir": stagingdir,
|
||||
"thumbnail": True
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
def _fix_output_path(self, filepath):
|
||||
""""Workaround to return correct filepath.
|
||||
|
||||
To workaround this we just glob.glob() for any file extensions and
|
||||
assume the latest modified file is the correct file and return it.
|
||||
|
||||
"""
|
||||
# Catch cancelled playblast
|
||||
if filepath is None:
|
||||
self.log.warning(
|
||||
"Playblast did not result in output path. "
|
||||
"Playblast is probably interrupted."
|
||||
)
|
||||
return None
|
||||
|
||||
if not os.path.exists(filepath):
|
||||
files = glob.glob(f"{filepath}.*.jpg")
|
||||
|
||||
if not files:
|
||||
raise RuntimeError(f"Couldn't find playblast from: {filepath}")
|
||||
filepath = max(files, key=os.path.getmtime)
|
||||
|
||||
return filepath
|
||||
|
|
@ -38,8 +38,9 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
)
|
||||
|
||||
path_to_cli = os.path.join(CELACTION_SCRIPTS_DIR, "publish_cli.py")
|
||||
subproces_args = get_openpype_execute_args("run", path_to_cli)
|
||||
openpype_executable = subproces_args.pop(0)
|
||||
subprocess_args = get_openpype_execute_args("run", path_to_cli)
|
||||
openpype_executable = subprocess_args.pop(0)
|
||||
workfile_settings = self.get_workfile_settings()
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey,
|
||||
|
|
@ -49,20 +50,34 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
openpype_executable
|
||||
)
|
||||
|
||||
parameters = subproces_args + [
|
||||
"--currentFile", "*SCENE*",
|
||||
"--chunk", "*CHUNK*",
|
||||
"--frameStart", "*START*",
|
||||
"--frameEnd", "*END*",
|
||||
"--resolutionWidth", "*X*",
|
||||
"--resolutionHeight", "*Y*"
|
||||
# add required arguments for workfile path
|
||||
parameters = subprocess_args + [
|
||||
"--currentFile", "*SCENE*"
|
||||
]
|
||||
|
||||
# Add custom parameters from workfile settings
|
||||
if "render_chunk" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--chunk", "*CHUNK*"
|
||||
]
|
||||
if "resolution" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--resolutionWidth", "*X*",
|
||||
"--resolutionHeight", "*Y*"
|
||||
]
|
||||
if "frame_range" in workfile_settings["submission_overrides"]:
|
||||
parameters += [
|
||||
"--frameStart", "*START*",
|
||||
"--frameEnd", "*END*"
|
||||
]
|
||||
|
||||
winreg.SetValueEx(
|
||||
hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
|
||||
subprocess.list2cmdline(parameters)
|
||||
)
|
||||
|
||||
self.log.debug(f"__ parameters: \"{parameters}\"")
|
||||
|
||||
# setting resolution parameters
|
||||
path_submit = "\\".join([
|
||||
path_user_settings, "Dialogs", "SubmitOutput"
|
||||
|
|
@ -135,3 +150,6 @@ class CelactionPrelaunchHook(PreLaunchHook):
|
|||
self.log.info(f"Workfile to open: \"{workfile_path}\"")
|
||||
|
||||
return workfile_path
|
||||
|
||||
def get_workfile_settings(self):
|
||||
return self.data["project_settings"]["celaction"]["workfile"]
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class CollectCelactionCliKwargs(pyblish.api.Collector):
|
|||
passing_kwargs[key] = value
|
||||
|
||||
if missing_kwargs:
|
||||
raise RuntimeError("Missing arguments {}".format(
|
||||
self.log.debug("Missing arguments {}".format(
|
||||
", ".join(
|
||||
[f'"{key}"' for key in missing_kwargs]
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,14 @@
|
|||
from .addon import (
|
||||
get_fusion_version,
|
||||
FusionAddon,
|
||||
FUSION_HOST_DIR,
|
||||
FUSION_VERSIONS_DICT,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"get_fusion_version",
|
||||
"FusionAddon",
|
||||
"FUSION_HOST_DIR",
|
||||
"FUSION_VERSIONS_DICT",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,52 @@
|
|||
import os
|
||||
import re
|
||||
from openpype.modules import OpenPypeModule, IHostAddon
|
||||
from openpype.lib import Logger
|
||||
|
||||
FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# FUSION_VERSIONS_DICT is used by the pre-launch hooks
|
||||
# The keys correspond to all currently supported Fusion versions
|
||||
# Each value is a list of corresponding Python home variables and a profile
|
||||
# number, which is used by the profile hook to set Fusion profile variables.
|
||||
FUSION_VERSIONS_DICT = {
|
||||
9: ("FUSION_PYTHON36_HOME", 9),
|
||||
16: ("FUSION16_PYTHON36_HOME", 16),
|
||||
17: ("FUSION16_PYTHON36_HOME", 16),
|
||||
18: ("FUSION_PYTHON3_HOME", 16),
|
||||
}
|
||||
|
||||
|
||||
def get_fusion_version(app_name):
|
||||
"""
|
||||
The function is triggered by the prelaunch hooks to get the fusion version.
|
||||
|
||||
`app_name` is obtained by prelaunch hooks from the
|
||||
`launch_context.env.get("AVALON_APP_NAME")`.
|
||||
|
||||
To get a correct Fusion version, a version number should be present
|
||||
in the `applications/fusion/variants` key
|
||||
of the Blackmagic Fusion Application Settings.
|
||||
"""
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
if not app_name:
|
||||
return
|
||||
|
||||
app_version_candidates = re.findall(r"\d+", app_name)
|
||||
if not app_version_candidates:
|
||||
return
|
||||
for app_version in app_version_candidates:
|
||||
if int(app_version) in FUSION_VERSIONS_DICT:
|
||||
return int(app_version)
|
||||
else:
|
||||
log.info(
|
||||
"Unsupported Fusion version: {app_version}".format(
|
||||
app_version=app_version
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class FusionAddon(OpenPypeModule, IHostAddon):
|
||||
name = "fusion"
|
||||
|
|
@ -14,15 +58,11 @@ class FusionAddon(OpenPypeModule, IHostAddon):
|
|||
def get_launch_hook_paths(self, app):
|
||||
if app.host_name != self.host_name:
|
||||
return []
|
||||
return [
|
||||
os.path.join(FUSION_HOST_DIR, "hooks")
|
||||
]
|
||||
return [os.path.join(FUSION_HOST_DIR, "hooks")]
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Set default values if are not already set via settings
|
||||
defaults = {
|
||||
"OPENPYPE_LOG_NO_COLORS": "Yes"
|
||||
}
|
||||
defaults = {"OPENPYPE_LOG_NO_COLORS": "Yes"}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
|
|
|||
|
|
@ -6,12 +6,13 @@ from openpype.pipeline.publish import get_errored_instances_from_context
|
|||
|
||||
|
||||
class SelectInvalidAction(pyblish.api.Action):
|
||||
"""Select invalid nodes in Maya when plug-in failed.
|
||||
"""Select invalid nodes in Fusion when plug-in failed.
|
||||
|
||||
To retrieve the invalid nodes this assumes a static `get_invalid()`
|
||||
method is available on the plugin.
|
||||
|
||||
"""
|
||||
|
||||
label = "Select invalid"
|
||||
on = "failed" # This action is only available on a failed plug-in
|
||||
icon = "search" # Icon from Awesome Icon
|
||||
|
|
@ -31,8 +32,10 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
if isinstance(invalid_nodes, (list, tuple)):
|
||||
invalid.extend(invalid_nodes)
|
||||
else:
|
||||
self.log.warning("Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes.")
|
||||
self.log.warning(
|
||||
"Plug-in returned to be invalid, "
|
||||
"but has no selectable nodes."
|
||||
)
|
||||
|
||||
if not invalid:
|
||||
# Assume relevant comp is current comp and clear selection
|
||||
|
|
@ -51,4 +54,6 @@ class SelectInvalidAction(pyblish.api.Action):
|
|||
for tool in invalid:
|
||||
flow.Select(tool, True)
|
||||
names.add(tool.Name)
|
||||
self.log.info("Selecting invalid tools: %s" % ", ".join(sorted(names)))
|
||||
self.log.info(
|
||||
"Selecting invalid tools: %s" % ", ".join(sorted(names))
|
||||
)
|
||||
|
|
|
|||
|
|
@ -303,10 +303,18 @@ def get_frame_path(path):
|
|||
return filename, padding, ext
|
||||
|
||||
|
||||
def get_current_comp():
|
||||
"""Hack to get current comp in this session"""
|
||||
def get_fusion_module():
|
||||
"""Get current Fusion instance"""
|
||||
fusion = getattr(sys.modules["__main__"], "fusion", None)
|
||||
return fusion.CurrentComp if fusion else None
|
||||
return fusion
|
||||
|
||||
|
||||
def get_current_comp():
|
||||
"""Get current comp in this session"""
|
||||
fusion = get_fusion_module()
|
||||
if fusion is not None:
|
||||
comp = fusion.CurrentComp
|
||||
return comp
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from openpype.tools.utils import host_tools
|
|||
from openpype.style import load_stylesheet
|
||||
from openpype.lib import register_event_callback
|
||||
from openpype.hosts.fusion.scripts import (
|
||||
set_rendermode,
|
||||
duplicate_with_inputs,
|
||||
)
|
||||
from openpype.hosts.fusion.api.lib import (
|
||||
|
|
@ -60,7 +59,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
publish_btn = QtWidgets.QPushButton("Publish...", self)
|
||||
manager_btn = QtWidgets.QPushButton("Manage...", self)
|
||||
libload_btn = QtWidgets.QPushButton("Library...", self)
|
||||
rendermode_btn = QtWidgets.QPushButton("Set render mode...", self)
|
||||
set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self)
|
||||
set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self)
|
||||
duplicate_with_inputs_btn = QtWidgets.QPushButton(
|
||||
|
|
@ -91,7 +89,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
|
||||
layout.addWidget(set_framerange_btn)
|
||||
layout.addWidget(set_resolution_btn)
|
||||
layout.addWidget(rendermode_btn)
|
||||
|
||||
layout.addSpacing(20)
|
||||
|
||||
|
|
@ -108,7 +105,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
load_btn.clicked.connect(self.on_load_clicked)
|
||||
manager_btn.clicked.connect(self.on_manager_clicked)
|
||||
libload_btn.clicked.connect(self.on_libload_clicked)
|
||||
rendermode_btn.clicked.connect(self.on_rendermode_clicked)
|
||||
duplicate_with_inputs_btn.clicked.connect(
|
||||
self.on_duplicate_with_inputs_clicked
|
||||
)
|
||||
|
|
@ -162,15 +158,6 @@ class OpenPypeMenu(QtWidgets.QWidget):
|
|||
def on_libload_clicked(self):
|
||||
host_tools.show_library_loader()
|
||||
|
||||
def on_rendermode_clicked(self):
|
||||
if self.render_mode_widget is None:
|
||||
window = set_rendermode.SetRenderMode()
|
||||
window.setStyleSheet(load_stylesheet())
|
||||
window.show()
|
||||
self.render_mode_widget = window
|
||||
else:
|
||||
self.render_mode_widget.show()
|
||||
|
||||
def on_duplicate_with_inputs_clicked(self):
|
||||
duplicate_with_inputs.duplicate_with_input_connections()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,19 +1,19 @@
|
|||
{
|
||||
Locked = true,
|
||||
Global = {
|
||||
Paths = {
|
||||
Map = {
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
|
||||
["Reactor:"] = "$(REACTOR)",
|
||||
|
||||
["Config:"] = "UserPaths:Config;OpenPype:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts",
|
||||
["UserPaths:"] = "UserData:;AllData:;Fusion:;Reactor:Deploy"
|
||||
},
|
||||
},
|
||||
Script = {
|
||||
PythonVersion = 3,
|
||||
Python3Forced = true
|
||||
},
|
||||
Paths = {
|
||||
Map = {
|
||||
["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy",
|
||||
["Config:"] = "UserPaths:Config;OpenPype:Config",
|
||||
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts",
|
||||
},
|
||||
}
|
||||
},
|
||||
Script = {
|
||||
PythonVersion = 3,
|
||||
Python3Forced = true
|
||||
},
|
||||
UserInterface = {
|
||||
Language = "en_US"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
161
openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py
Normal file
161
openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
import os
|
||||
import shutil
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
|
||||
from openpype.hosts.fusion import (
|
||||
FUSION_HOST_DIR,
|
||||
FUSION_VERSIONS_DICT,
|
||||
get_fusion_version,
|
||||
)
|
||||
|
||||
|
||||
class FusionCopyPrefsPrelaunch(PreLaunchHook):
|
||||
"""
|
||||
Prepares local Fusion profile directory, copies existing Fusion profile.
|
||||
This also sets FUSION MasterPrefs variable, which is used
|
||||
to apply Master.prefs file to override some Fusion profile settings to:
|
||||
- enable the OpenPype menu
|
||||
- force Python 3 over Python 2
|
||||
- force English interface
|
||||
Master.prefs is defined in openpype/hosts/fusion/deploy/fusion_shared.prefs
|
||||
"""
|
||||
|
||||
app_groups = ["fusion"]
|
||||
order = 2
|
||||
|
||||
def get_fusion_profile_name(self, profile_version) -> str:
|
||||
# Returns 'Default', unless FUSION16_PROFILE is set
|
||||
return os.getenv(f"FUSION{profile_version}_PROFILE", "Default")
|
||||
|
||||
def get_fusion_profile_dir(self, profile_version) -> Path:
|
||||
# Get FUSION_PROFILE_DIR variable
|
||||
fusion_profile = self.get_fusion_profile_name(profile_version)
|
||||
fusion_var_prefs_dir = os.getenv(
|
||||
f"FUSION{profile_version}_PROFILE_DIR"
|
||||
)
|
||||
|
||||
# Check if FUSION_PROFILE_DIR exists
|
||||
if fusion_var_prefs_dir and Path(fusion_var_prefs_dir).is_dir():
|
||||
fu_prefs_dir = Path(fusion_var_prefs_dir, fusion_profile)
|
||||
self.log.info(f"{fusion_var_prefs_dir} is set to {fu_prefs_dir}")
|
||||
return fu_prefs_dir
|
||||
|
||||
def get_profile_source(self, profile_version) -> Path:
|
||||
"""Get Fusion preferences profile location.
|
||||
See Per-User_Preferences_and_Paths on VFXpedia for reference.
|
||||
"""
|
||||
fusion_profile = self.get_fusion_profile_name(profile_version)
|
||||
profile_source = self.get_fusion_profile_dir(profile_version)
|
||||
if profile_source:
|
||||
return profile_source
|
||||
# otherwise get default location of the profile folder
|
||||
fu_prefs_dir = f"Blackmagic Design/Fusion/Profiles/{fusion_profile}"
|
||||
if platform.system() == "Windows":
|
||||
profile_source = Path(os.getenv("AppData"), fu_prefs_dir)
|
||||
elif platform.system() == "Darwin":
|
||||
profile_source = Path(
|
||||
"~/Library/Application Support/", fu_prefs_dir
|
||||
).expanduser()
|
||||
elif platform.system() == "Linux":
|
||||
profile_source = Path("~/.fusion", fu_prefs_dir).expanduser()
|
||||
self.log.info(
|
||||
f"Locating source Fusion prefs directory: {profile_source}"
|
||||
)
|
||||
return profile_source
|
||||
|
||||
def get_copy_fusion_prefs_settings(self):
|
||||
# Get copy preferences options from the global application settings
|
||||
|
||||
copy_fusion_settings = self.data["project_settings"]["fusion"].get(
|
||||
"copy_fusion_settings", {}
|
||||
)
|
||||
if not copy_fusion_settings:
|
||||
self.log.error("Copy prefs settings not found")
|
||||
copy_status = copy_fusion_settings.get("copy_status", False)
|
||||
force_sync = copy_fusion_settings.get("force_sync", False)
|
||||
copy_path = copy_fusion_settings.get("copy_path") or None
|
||||
if copy_path:
|
||||
copy_path = Path(copy_path).expanduser()
|
||||
return copy_status, copy_path, force_sync
|
||||
|
||||
def copy_fusion_profile(
|
||||
self, copy_from: Path, copy_to: Path, force_sync: bool
|
||||
) -> None:
|
||||
"""On the first Fusion launch copy the contents of Fusion profile
|
||||
directory to the working predefined location. If the Openpype profile
|
||||
folder exists, skip copying, unless re-sync is checked.
|
||||
If the prefs were not copied on the first launch,
|
||||
clean Fusion profile will be created in fu_profile_dir.
|
||||
"""
|
||||
if copy_to.exists() and not force_sync:
|
||||
self.log.info(
|
||||
"Destination Fusion preferences folder already exists: "
|
||||
f"{copy_to} "
|
||||
)
|
||||
return
|
||||
self.log.info("Starting copying Fusion preferences")
|
||||
self.log.debug(f"force_sync option is set to {force_sync}")
|
||||
try:
|
||||
copy_to.mkdir(exist_ok=True, parents=True)
|
||||
except PermissionError:
|
||||
self.log.warning(f"Creating the folder not permitted at {copy_to}")
|
||||
return
|
||||
if not copy_from.exists():
|
||||
self.log.warning(f"Fusion preferences not found in {copy_from}")
|
||||
return
|
||||
for file in copy_from.iterdir():
|
||||
if file.suffix in (
|
||||
".prefs",
|
||||
".def",
|
||||
".blocklist",
|
||||
".fu",
|
||||
".toolbars",
|
||||
):
|
||||
# convert Path to str to be compatible with Python 3.6+
|
||||
shutil.copy(str(file), str(copy_to))
|
||||
self.log.info(
|
||||
f"Successfully copied preferences: {copy_from} to {copy_to}"
|
||||
)
|
||||
|
||||
def execute(self):
|
||||
(
|
||||
copy_status,
|
||||
fu_profile_dir,
|
||||
force_sync,
|
||||
) = self.get_copy_fusion_prefs_settings()
|
||||
|
||||
# Get launched application context and return correct app version
|
||||
app_name = self.launch_context.env.get("AVALON_APP_NAME")
|
||||
app_version = get_fusion_version(app_name)
|
||||
if app_version is None:
|
||||
version_names = ", ".join(str(x) for x in FUSION_VERSIONS_DICT)
|
||||
raise ApplicationLaunchFailed(
|
||||
"Unable to detect valid Fusion version number from app "
|
||||
f"name: {app_name}.\nMake sure to include at least a digit "
|
||||
"to indicate the Fusion version like '18'.\n"
|
||||
f"Detectable Fusion versions are: {version_names}"
|
||||
)
|
||||
|
||||
_, profile_version = FUSION_VERSIONS_DICT[app_version]
|
||||
fu_profile = self.get_fusion_profile_name(profile_version)
|
||||
|
||||
# do a copy of Fusion profile if copy_status toggle is enabled
|
||||
if copy_status and fu_profile_dir is not None:
|
||||
profile_source = self.get_profile_source(profile_version)
|
||||
dest_folder = Path(fu_profile_dir, fu_profile)
|
||||
self.copy_fusion_profile(profile_source, dest_folder, force_sync)
|
||||
|
||||
# Add temporary profile directory variables to customize Fusion
|
||||
# to define where it can read custom scripts and tools from
|
||||
fu_profile_dir_variable = f"FUSION{profile_version}_PROFILE_DIR"
|
||||
self.log.info(f"Setting {fu_profile_dir_variable}: {fu_profile_dir}")
|
||||
self.launch_context.env[fu_profile_dir_variable] = str(fu_profile_dir)
|
||||
|
||||
# Add custom Fusion Master Prefs and the temporary
|
||||
# profile directory variables to customize Fusion
|
||||
# to define where it can read custom scripts and tools from
|
||||
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
|
||||
master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
|
||||
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
|
||||
self.launch_context.env[master_prefs_variable] = str(master_prefs)
|
||||
|
|
@ -1,32 +1,43 @@
|
|||
import os
|
||||
from openpype.lib import PreLaunchHook, ApplicationLaunchFailed
|
||||
from openpype.hosts.fusion import FUSION_HOST_DIR
|
||||
from openpype.hosts.fusion import (
|
||||
FUSION_HOST_DIR,
|
||||
FUSION_VERSIONS_DICT,
|
||||
get_fusion_version,
|
||||
)
|
||||
|
||||
|
||||
class FusionPrelaunch(PreLaunchHook):
|
||||
"""Prepares OpenPype Fusion environment
|
||||
|
||||
Requires FUSION_PYTHON3_HOME to be defined in the environment for Fusion
|
||||
to point at a valid Python 3 build for Fusion. That is Python 3.3-3.10
|
||||
for Fusion 18 and Fusion 3.6 for Fusion 16 and 17.
|
||||
|
||||
This also sets FUSION16_MasterPrefs to apply the fusion master prefs
|
||||
as set in openpype/hosts/fusion/deploy/fusion_shared.prefs to enable
|
||||
the OpenPype menu and force Python 3 over Python 2.
|
||||
|
||||
"""
|
||||
Prepares OpenPype Fusion environment.
|
||||
Requires correct Python home variable to be defined in the environment
|
||||
settings for Fusion to point at a valid Python 3 build for Fusion.
|
||||
Python3 versions that are supported by Fusion:
|
||||
Fusion 9, 16, 17 : Python 3.6
|
||||
Fusion 18 : Python 3.6 - 3.10
|
||||
"""
|
||||
|
||||
app_groups = ["fusion"]
|
||||
order = 1
|
||||
|
||||
def execute(self):
|
||||
# making sure python 3 is installed at provided path
|
||||
# Py 3.3-3.10 for Fusion 18+ or Py 3.6 for Fu 16-17
|
||||
py3_var = "FUSION_PYTHON3_HOME"
|
||||
app_data = self.launch_context.env.get("AVALON_APP_NAME")
|
||||
app_version = get_fusion_version(app_data)
|
||||
if not app_version:
|
||||
raise ApplicationLaunchFailed(
|
||||
"Fusion version information not found in System settings.\n"
|
||||
"The key field in the 'applications/fusion/variants' should "
|
||||
"consist a number, corresponding to major Fusion version."
|
||||
)
|
||||
py3_var, _ = FUSION_VERSIONS_DICT[app_version]
|
||||
fusion_python3_home = self.launch_context.env.get(py3_var, "")
|
||||
|
||||
self.log.info(f"Looking for Python 3 in: {fusion_python3_home}")
|
||||
for path in fusion_python3_home.split(os.pathsep):
|
||||
# Allow defining multiple paths to allow "fallback" to other
|
||||
# path. But make to set only a single path as final variable.
|
||||
# Allow defining multiple paths, separated by os.pathsep,
|
||||
# to allow "fallback" to other path.
|
||||
# But make to set only a single path as final variable.
|
||||
py3_dir = os.path.normpath(path)
|
||||
if os.path.isdir(py3_dir):
|
||||
break
|
||||
|
|
@ -43,19 +54,10 @@ class FusionPrelaunch(PreLaunchHook):
|
|||
self.launch_context.env[py3_var] = py3_dir
|
||||
|
||||
# Fusion 18+ requires FUSION_PYTHON3_HOME to also be on PATH
|
||||
self.launch_context.env["PATH"] += ";" + py3_dir
|
||||
if app_version >= 18:
|
||||
self.launch_context.env["PATH"] += os.pathsep + py3_dir
|
||||
|
||||
# Fusion 16 and 17 use FUSION16_PYTHON36_HOME instead of
|
||||
# FUSION_PYTHON3_HOME and will only work with a Python 3.6 version
|
||||
# TODO: Detect Fusion version to only set for specific Fusion build
|
||||
self.launch_context.env["FUSION16_PYTHON36_HOME"] = py3_dir
|
||||
self.launch_context.env[py3_var] = py3_dir
|
||||
|
||||
# Add our Fusion Master Prefs which is the only way to customize
|
||||
# Fusion to define where it can read custom scripts and tools from
|
||||
self.log.info(f"Setting OPENPYPE_FUSION: {FUSION_HOST_DIR}")
|
||||
self.launch_context.env["OPENPYPE_FUSION"] = FUSION_HOST_DIR
|
||||
|
||||
pref_var = "FUSION16_MasterPrefs" # used by Fusion 16, 17 and 18
|
||||
prefs = os.path.join(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs")
|
||||
self.log.info(f"Setting {pref_var}: {prefs}")
|
||||
self.launch_context.env[pref_var] = prefs
|
||||
|
|
|
|||
|
|
@ -4,29 +4,34 @@ import qtawesome
|
|||
|
||||
from openpype.hosts.fusion.api import (
|
||||
get_current_comp,
|
||||
comp_lock_and_undo_chunk
|
||||
comp_lock_and_undo_chunk,
|
||||
)
|
||||
|
||||
from openpype.lib import BoolDef
|
||||
from openpype.lib import (
|
||||
BoolDef,
|
||||
EnumDef,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
Creator,
|
||||
CreatedInstance
|
||||
CreatedInstance,
|
||||
)
|
||||
from openpype.client import (
|
||||
get_asset_by_name,
|
||||
)
|
||||
from openpype.client import get_asset_by_name
|
||||
|
||||
|
||||
class CreateSaver(Creator):
|
||||
identifier = "io.openpype.creators.fusion.saver"
|
||||
name = "saver"
|
||||
label = "Saver"
|
||||
label = "Render (saver)"
|
||||
name = "render"
|
||||
family = "render"
|
||||
default_variants = ["Main"]
|
||||
|
||||
default_variants = ["Main", "Mask"]
|
||||
description = "Fusion Saver to generate image sequence"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
instance_attributes = ["reviewable"]
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
# TODO: Add pre_create attributes to choose file format?
|
||||
file_format = "OpenEXRFormat"
|
||||
|
||||
|
|
@ -58,7 +63,8 @@ class CreateSaver(Creator):
|
|||
family=self.family,
|
||||
subset_name=subset_name,
|
||||
data=instance_data,
|
||||
creator=self)
|
||||
creator=self,
|
||||
)
|
||||
|
||||
# Insert the transient data
|
||||
instance.transient_data["tool"] = saver
|
||||
|
|
@ -68,11 +74,9 @@ class CreateSaver(Creator):
|
|||
return instance
|
||||
|
||||
def collect_instances(self):
|
||||
|
||||
comp = get_current_comp()
|
||||
tools = comp.GetToolList(False, "Saver").values()
|
||||
for tool in tools:
|
||||
|
||||
data = self.get_managed_tool_data(tool)
|
||||
if not data:
|
||||
data = self._collect_unmanaged_saver(tool)
|
||||
|
|
@ -90,7 +94,6 @@ class CreateSaver(Creator):
|
|||
|
||||
def update_instances(self, update_list):
|
||||
for created_inst, _changes in update_list:
|
||||
|
||||
new_data = created_inst.data_to_store()
|
||||
tool = created_inst.transient_data["tool"]
|
||||
self._update_tool_with_data(tool, new_data)
|
||||
|
|
@ -139,7 +142,6 @@ class CreateSaver(Creator):
|
|||
tool.SetAttrs({"TOOLS_Name": subset})
|
||||
|
||||
def _collect_unmanaged_saver(self, tool):
|
||||
|
||||
# TODO: this should not be done this way - this should actually
|
||||
# get the data as stored on the tool explicitly (however)
|
||||
# that would disallow any 'regular saver' to be collected
|
||||
|
|
@ -153,8 +155,7 @@ class CreateSaver(Creator):
|
|||
asset = legacy_io.Session["AVALON_ASSET"]
|
||||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
asset_doc = get_asset_by_name(project_name=project,
|
||||
asset_name=asset)
|
||||
asset_doc = get_asset_by_name(project_name=project, asset_name=asset)
|
||||
|
||||
path = tool["Clip"][comp.TIME_UNDEFINED]
|
||||
fname = os.path.basename(path)
|
||||
|
|
@ -178,21 +179,20 @@ class CreateSaver(Creator):
|
|||
"variant": variant,
|
||||
"active": not passthrough,
|
||||
"family": self.family,
|
||||
|
||||
# Unique identifier for instance and this creator
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
"creator_identifier": self.identifier,
|
||||
}
|
||||
|
||||
def get_managed_tool_data(self, tool):
|
||||
"""Return data of the tool if it matches creator identifier"""
|
||||
data = tool.GetData('openpype')
|
||||
data = tool.GetData("openpype")
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
required = {
|
||||
"id": "pyblish.avalon.instance",
|
||||
"creator_identifier": self.identifier
|
||||
"creator_identifier": self.identifier,
|
||||
}
|
||||
for key, value in required.items():
|
||||
if key not in data or data[key] != value:
|
||||
|
|
@ -205,11 +205,40 @@ class CreateSaver(Creator):
|
|||
|
||||
return data
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
return [
|
||||
BoolDef(
|
||||
"review",
|
||||
default=True,
|
||||
label="Review"
|
||||
)
|
||||
def get_pre_create_attr_defs(self):
|
||||
"""Settings for create page"""
|
||||
attr_defs = [
|
||||
self._get_render_target_enum(),
|
||||
self._get_reviewable_bool(),
|
||||
]
|
||||
return attr_defs
|
||||
|
||||
def get_instance_attr_defs(self):
|
||||
"""Settings for publish page"""
|
||||
attr_defs = [
|
||||
self._get_render_target_enum(),
|
||||
self._get_reviewable_bool(),
|
||||
]
|
||||
return attr_defs
|
||||
|
||||
# These functions below should be moved to another file
|
||||
# so it can be used by other plugins. plugin.py ?
|
||||
|
||||
def _get_render_target_enum(self):
|
||||
rendering_targets = {
|
||||
"local": "Local machine rendering",
|
||||
"frames": "Use existing frames",
|
||||
}
|
||||
if "farm_rendering" in self.instance_attributes:
|
||||
rendering_targets["farm"] = "Farm rendering"
|
||||
|
||||
return EnumDef(
|
||||
"render_target", items=rendering_targets, label="Render target"
|
||||
)
|
||||
|
||||
def _get_reviewable_bool(self):
|
||||
return BoolDef(
|
||||
"review",
|
||||
default=("reviewable" in self.instance_attributes),
|
||||
label="Review",
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,50 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
import os
|
||||
|
||||
|
||||
class CollectFusionExpectedFrames(
|
||||
pyblish.api.InstancePlugin, publish.ColormanagedPyblishPluginMixin
|
||||
):
|
||||
"""Collect all frames needed to publish expected frames"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.5
|
||||
label = "Collect Expected Frames"
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{len(str(frame_end))}d" % frame_start,
|
||||
"files": files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
)
|
||||
|
||||
# review representation
|
||||
if instance.data.get("review", False):
|
||||
repre["tags"] = ["review"]
|
||||
|
||||
# add the repre to the instance
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(repre)
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFusionRenderMode(pyblish.api.InstancePlugin):
|
||||
"""Collect current comp's render Mode
|
||||
|
||||
Options:
|
||||
local
|
||||
farm
|
||||
|
||||
Note that this value is set for each comp separately. When you save the
|
||||
comp this information will be stored in that file. If for some reason the
|
||||
available tool does not visualize which render mode is set for the
|
||||
current comp, please run the following line in the console (Py2)
|
||||
|
||||
comp.GetData("openpype.rendermode")
|
||||
|
||||
This will return the name of the current render mode as seen above under
|
||||
Options.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
label = "Collect Render Mode"
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
"""Collect all image sequence tools"""
|
||||
options = ["local", "farm"]
|
||||
|
||||
comp = instance.context.data.get("currentComp")
|
||||
if not comp:
|
||||
raise RuntimeError("No comp previously collected, unable to "
|
||||
"retrieve Fusion version.")
|
||||
|
||||
rendermode = comp.GetData("openpype.rendermode") or "local"
|
||||
assert rendermode in options, "Must be supported render mode"
|
||||
|
||||
self.log.info("Render mode: {0}".format(rendermode))
|
||||
|
||||
# Append family
|
||||
family = "render.{0}".format(rendermode)
|
||||
instance.data["families"].append(family)
|
||||
25
openpype/hosts/fusion/plugins/publish/collect_renders.py
Normal file
25
openpype/hosts/fusion/plugins/publish/collect_renders.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFusionRenders(pyblish.api.InstancePlugin):
|
||||
"""Collect current saver node's render Mode
|
||||
|
||||
Options:
|
||||
local (Render locally)
|
||||
frames (Use existing frames)
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.4
|
||||
label = "Collect Renders"
|
||||
hosts = ["fusion"]
|
||||
families = ["render"]
|
||||
|
||||
def process(self, instance):
|
||||
render_target = instance.data["render_target"]
|
||||
family = instance.data["family"]
|
||||
|
||||
# add targeted family to families
|
||||
instance.data["families"].append(
|
||||
"{}.{}".format(family, render_target)
|
||||
)
|
||||
109
openpype/hosts/fusion/plugins/publish/extract_render_local.py
Normal file
109
openpype/hosts/fusion/plugins/publish/extract_render_local.py
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
import logging
|
||||
import contextlib
|
||||
import pyblish.api
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def enabled_savers(comp, savers):
|
||||
"""Enable only the `savers` in Comp during the context.
|
||||
|
||||
Any Saver tool in the passed composition that is not in the savers list
|
||||
will be set to passthrough during the context.
|
||||
|
||||
Args:
|
||||
comp (object): Fusion composition object.
|
||||
savers (list): List of Saver tool objects.
|
||||
|
||||
"""
|
||||
passthrough_key = "TOOLB_PassThrough"
|
||||
original_states = {}
|
||||
enabled_save_names = {saver.Name for saver in savers}
|
||||
try:
|
||||
all_savers = comp.GetToolList(False, "Saver").values()
|
||||
for saver in all_savers:
|
||||
original_state = saver.GetAttrs()[passthrough_key]
|
||||
original_states[saver] = original_state
|
||||
|
||||
# The passthrough state we want to set (passthrough != enabled)
|
||||
state = saver.Name not in enabled_save_names
|
||||
if state != original_state:
|
||||
saver.SetAttrs({passthrough_key: state})
|
||||
yield
|
||||
finally:
|
||||
for saver, original_state in original_states.items():
|
||||
saver.SetAttrs({"TOOLB_PassThrough": original_state})
|
||||
|
||||
|
||||
class FusionRenderLocal(pyblish.api.InstancePlugin):
|
||||
"""Render the current Fusion composition locally."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Render Local"
|
||||
hosts = ["fusion"]
|
||||
families = ["render.local"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
# Start render
|
||||
self.render_once(context)
|
||||
|
||||
# Log render status
|
||||
self.log.info(
|
||||
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
|
||||
nm=instance.data["name"],
|
||||
ast=instance.data["asset"],
|
||||
tsk=instance.data["task"],
|
||||
)
|
||||
)
|
||||
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
# This plug-in assumes all render nodes get rendered at the same time
|
||||
# to speed up the rendering. The check below makes sure that we only
|
||||
# execute the rendering once and not for each instance.
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
|
||||
savers_to_render = [
|
||||
# Get the saver tool from the instance
|
||||
instance[0] for instance in context if
|
||||
# Only active instances
|
||||
instance.data.get("publish", True) and
|
||||
# Only render.local instances
|
||||
"render.local" in instance.data["families"]
|
||||
]
|
||||
|
||||
if key not in context.data:
|
||||
# We initialize as false to indicate it wasn't successful yet
|
||||
# so we can keep track of whether Fusion succeeded
|
||||
context.data[key] = False
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting Fusion render")
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
saver_names = ", ".join(saver.Name for saver in savers_to_render)
|
||||
self.log.info(f"Rendering tools: {saver_names}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
with enabled_savers(current_comp, savers_to_render):
|
||||
result = current_comp.Render(
|
||||
{
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True,
|
||||
}
|
||||
)
|
||||
|
||||
context.data[key] = bool(result)
|
||||
|
||||
if context.data[key] is False:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
@ -1,100 +0,0 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from openpype.hosts.fusion.api import comp_lock_and_undo_chunk
|
||||
|
||||
|
||||
class Fusionlocal(pyblish.api.InstancePlugin,
|
||||
publish.ColormanagedPyblishPluginMixin):
|
||||
"""Render the current Fusion composition locally.
|
||||
|
||||
Extract the result of savers by starting a comp render
|
||||
This will run the local render of Fusion.
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.1
|
||||
label = "Render Local"
|
||||
hosts = ["fusion"]
|
||||
families = ["render.local"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
|
||||
# Start render
|
||||
self.render_once(context)
|
||||
|
||||
# Log render status
|
||||
self.log.info(
|
||||
"Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format(
|
||||
nm=instance.data["name"],
|
||||
ast=instance.data["asset"],
|
||||
tsk=instance.data["task"],
|
||||
)
|
||||
)
|
||||
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
repre = {
|
||||
"name": ext[1:],
|
||||
"ext": ext[1:],
|
||||
"frameStart": f"%0{len(str(frame_end))}d" % frame_start,
|
||||
"files": files,
|
||||
"stagingDir": output_dir,
|
||||
}
|
||||
|
||||
self.set_representation_colorspace(
|
||||
representation=repre,
|
||||
context=context,
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
instance.data["representations"].append(repre)
|
||||
|
||||
# review representation
|
||||
if instance.data.get("review", False):
|
||||
repre["tags"] = ["review", "ftrackreview"]
|
||||
|
||||
def render_once(self, context):
|
||||
"""Render context comp only once, even with more render instances"""
|
||||
|
||||
# This plug-in assumes all render nodes get rendered at the same time
|
||||
# to speed up the rendering. The check below makes sure that we only
|
||||
# execute the rendering once and not for each instance.
|
||||
key = f"__hasRun{self.__class__.__name__}"
|
||||
if key not in context.data:
|
||||
# We initialize as false to indicate it wasn't successful yet
|
||||
# so we can keep track of whether Fusion succeeded
|
||||
context.data[key] = False
|
||||
|
||||
current_comp = context.data["currentComp"]
|
||||
frame_start = context.data["frameStartHandle"]
|
||||
frame_end = context.data["frameEndHandle"]
|
||||
|
||||
self.log.info("Starting Fusion render")
|
||||
self.log.info(f"Start frame: {frame_start}")
|
||||
self.log.info(f"End frame: {frame_end}")
|
||||
|
||||
with comp_lock_and_undo_chunk(current_comp):
|
||||
result = current_comp.Render(
|
||||
{
|
||||
"Start": frame_start,
|
||||
"End": frame_end,
|
||||
"Wait": True,
|
||||
}
|
||||
)
|
||||
|
||||
context.data[key] = bool(result)
|
||||
|
||||
if context.data[key] is False:
|
||||
raise RuntimeError("Comp render failed")
|
||||
|
|
@ -14,22 +14,19 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
actions = [RepairAction]
|
||||
label = "Validate Create Folder Checked"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [SelectInvalidAction]
|
||||
actions = [RepairAction, SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
active = instance.data.get("active", instance.data.get("publish"))
|
||||
if not active:
|
||||
return []
|
||||
|
||||
tool = instance[0]
|
||||
create_dir = tool.GetInput("CreateDir")
|
||||
if create_dir == 0.0:
|
||||
cls.log.error("%s has Create Folder turned off" % instance[0].Name)
|
||||
cls.log.error(
|
||||
"%s has Create Folder turned off" % instance[0].Name
|
||||
)
|
||||
return [tool]
|
||||
|
||||
def process(self, instance):
|
||||
|
|
@ -37,7 +34,8 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
|
|||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"Found Saver with Create Folder During Render checked off",
|
||||
title=self.label)
|
||||
title=self.label,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
|
|||
|
|
@ -0,0 +1,78 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from openpype.pipeline.publish import RepairAction
|
||||
from openpype.pipeline import PublishValidationError
|
||||
|
||||
from openpype.hosts.fusion.api.action import SelectInvalidAction
|
||||
|
||||
|
||||
class ValidateLocalFramesExistence(pyblish.api.InstancePlugin):
|
||||
"""Checks if files for savers that's set
|
||||
to publish expected frames exists
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Validate Expected Frames Exists"
|
||||
families = ["render"]
|
||||
hosts = ["fusion"]
|
||||
actions = [RepairAction, SelectInvalidAction]
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance, non_existing_frames=None):
|
||||
if non_existing_frames is None:
|
||||
non_existing_frames = []
|
||||
|
||||
if instance.data.get("render_target") == "frames":
|
||||
tool = instance[0]
|
||||
|
||||
frame_start = instance.data["frameStart"]
|
||||
frame_end = instance.data["frameEnd"]
|
||||
path = instance.data["path"]
|
||||
output_dir = instance.data["outputDir"]
|
||||
|
||||
basename = os.path.basename(path)
|
||||
head, ext = os.path.splitext(basename)
|
||||
files = [
|
||||
f"{head}{str(frame).zfill(4)}{ext}"
|
||||
for frame in range(frame_start, frame_end + 1)
|
||||
]
|
||||
|
||||
for file in files:
|
||||
if not os.path.exists(os.path.join(output_dir, file)):
|
||||
cls.log.error(
|
||||
f"Missing file: {os.path.join(output_dir, file)}"
|
||||
)
|
||||
non_existing_frames.append(file)
|
||||
|
||||
if len(non_existing_frames) > 0:
|
||||
cls.log.error(f"Some of {tool.Name}'s files does not exist")
|
||||
return [tool]
|
||||
|
||||
def process(self, instance):
|
||||
non_existing_frames = []
|
||||
invalid = self.get_invalid(instance, non_existing_frames)
|
||||
if invalid:
|
||||
raise PublishValidationError(
|
||||
"{} is set to publish existing frames but "
|
||||
"some frames are missing. "
|
||||
"The missing file(s) are:\n\n{}".format(
|
||||
invalid[0].Name,
|
||||
"\n\n".join(non_existing_frames),
|
||||
),
|
||||
title=self.label,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
invalid = cls.get_invalid(instance)
|
||||
if invalid:
|
||||
tool = invalid[0]
|
||||
|
||||
# Change render target to local to render locally
|
||||
tool.SetData("openpype.creator_attributes.render_target", "local")
|
||||
|
||||
cls.log.info(
|
||||
f"Reload the publisher and {tool.Name} "
|
||||
"will be set to render locally"
|
||||
)
|
||||
|
|
@ -1,112 +0,0 @@
|
|||
from qtpy import QtWidgets
|
||||
import qtawesome
|
||||
from openpype.hosts.fusion.api import get_current_comp
|
||||
|
||||
|
||||
_help = {"local": "Render the comp on your own machine and publish "
|
||||
"it from that the destination folder",
|
||||
"farm": "Submit a Fusion render job to a Render farm to use all other"
|
||||
" computers and add a publish job"}
|
||||
|
||||
|
||||
class SetRenderMode(QtWidgets.QWidget):
|
||||
|
||||
def __init__(self, parent=None):
|
||||
QtWidgets.QWidget.__init__(self, parent)
|
||||
|
||||
self._comp = get_current_comp()
|
||||
self._comp_name = self._get_comp_name()
|
||||
|
||||
self.setWindowTitle("Set Render Mode")
|
||||
self.setFixedSize(300, 175)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout()
|
||||
|
||||
# region comp info
|
||||
comp_info_layout = QtWidgets.QHBoxLayout()
|
||||
|
||||
update_btn = QtWidgets.QPushButton(qtawesome.icon("fa.refresh",
|
||||
color="white"), "")
|
||||
update_btn.setFixedWidth(25)
|
||||
update_btn.setFixedHeight(25)
|
||||
|
||||
comp_information = QtWidgets.QLineEdit()
|
||||
comp_information.setEnabled(False)
|
||||
|
||||
comp_info_layout.addWidget(comp_information)
|
||||
comp_info_layout.addWidget(update_btn)
|
||||
# endregion comp info
|
||||
|
||||
# region modes
|
||||
mode_options = QtWidgets.QComboBox()
|
||||
mode_options.addItems(_help.keys())
|
||||
|
||||
mode_information = QtWidgets.QTextEdit()
|
||||
mode_information.setReadOnly(True)
|
||||
# endregion modes
|
||||
|
||||
accept_btn = QtWidgets.QPushButton("Accept")
|
||||
|
||||
layout.addLayout(comp_info_layout)
|
||||
layout.addWidget(mode_options)
|
||||
layout.addWidget(mode_information)
|
||||
layout.addWidget(accept_btn)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
self.comp_information = comp_information
|
||||
self.update_btn = update_btn
|
||||
|
||||
self.mode_options = mode_options
|
||||
self.mode_information = mode_information
|
||||
|
||||
self.accept_btn = accept_btn
|
||||
|
||||
self.connections()
|
||||
self.update()
|
||||
|
||||
# Force updated render mode help text
|
||||
self._update_rendermode_info()
|
||||
|
||||
def connections(self):
|
||||
"""Build connections between code and buttons"""
|
||||
|
||||
self.update_btn.clicked.connect(self.update)
|
||||
self.accept_btn.clicked.connect(self._set_comp_rendermode)
|
||||
self.mode_options.currentIndexChanged.connect(
|
||||
self._update_rendermode_info)
|
||||
|
||||
def update(self):
|
||||
"""Update all information in the UI"""
|
||||
|
||||
self._comp = get_current_comp()
|
||||
self._comp_name = self._get_comp_name()
|
||||
self.comp_information.setText(self._comp_name)
|
||||
|
||||
# Update current comp settings
|
||||
mode = self._get_comp_rendermode()
|
||||
index = self.mode_options.findText(mode)
|
||||
self.mode_options.setCurrentIndex(index)
|
||||
|
||||
def _update_rendermode_info(self):
|
||||
rendermode = self.mode_options.currentText()
|
||||
self.mode_information.setText(_help[rendermode])
|
||||
|
||||
def _get_comp_name(self):
|
||||
return self._comp.GetAttrs("COMPS_Name")
|
||||
|
||||
def _get_comp_rendermode(self):
|
||||
return self._comp.GetData("openpype.rendermode") or "local"
|
||||
|
||||
def _set_comp_rendermode(self):
|
||||
rendermode = self.mode_options.currentText()
|
||||
self._comp.SetData("openpype.rendermode", rendermode)
|
||||
|
||||
self._comp.Print("Updated render mode to '%s'\n" % rendermode)
|
||||
self.hide()
|
||||
|
||||
def _validation(self):
|
||||
ui_mode = self.mode_options.currentText()
|
||||
comp_mode = self._get_comp_rendermode()
|
||||
|
||||
return comp_mode == ui_mode
|
||||
|
|
@ -1221,7 +1221,7 @@ def set_track_color(track_item, color):
|
|||
|
||||
def check_inventory_versions(track_items=None):
|
||||
"""
|
||||
Actual version color idetifier of Loaded containers
|
||||
Actual version color identifier of Loaded containers
|
||||
|
||||
Check all track items and filter only
|
||||
Loader nodes for its version. It will get all versions from database
|
||||
|
|
@ -1249,10 +1249,10 @@ def check_inventory_versions(track_items=None):
|
|||
project_name = legacy_io.active_project()
|
||||
filter_result = filter_containers(containers, project_name)
|
||||
for container in filter_result.latest:
|
||||
set_track_color(container["_item"], clip_color)
|
||||
set_track_color(container["_item"], clip_color_last)
|
||||
|
||||
for container in filter_result.outdated:
|
||||
set_track_color(container["_item"], clip_color_last)
|
||||
set_track_color(container["_item"], clip_color)
|
||||
|
||||
|
||||
def selection_changed_timeline(event):
|
||||
|
|
|
|||
|
|
@ -146,6 +146,8 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
return " ".join([str(m.group(0)).capitalize() for m in matches])
|
||||
|
||||
def create_row(self, layout, type, text, **kwargs):
|
||||
value_keys = ["setText", "setCheckState", "setValue", "setChecked"]
|
||||
|
||||
# get type attribute from qwidgets
|
||||
attr = getattr(QtWidgets, type)
|
||||
|
||||
|
|
@ -167,14 +169,27 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
|
||||
# assign the created attribute to variable
|
||||
item = getattr(self, attr_name)
|
||||
|
||||
# set attributes to item which are not values
|
||||
for func, val in kwargs.items():
|
||||
if func in value_keys:
|
||||
continue
|
||||
|
||||
if getattr(item, func):
|
||||
log.debug("Setting {} to {}".format(func, val))
|
||||
func_attr = getattr(item, func)
|
||||
if isinstance(val, tuple):
|
||||
func_attr(*val)
|
||||
else:
|
||||
func_attr(val)
|
||||
|
||||
# set values to item
|
||||
for value_item in value_keys:
|
||||
if value_item not in kwargs:
|
||||
continue
|
||||
if getattr(item, value_item):
|
||||
getattr(item, value_item)(kwargs[value_item])
|
||||
|
||||
# add to layout
|
||||
layout.addRow(label, item)
|
||||
|
||||
|
|
@ -276,8 +291,11 @@ class CreatorWidget(QtWidgets.QDialog):
|
|||
elif v["type"] == "QSpinBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QSpinBox", v["label"],
|
||||
setValue=v["value"], setMinimum=0,
|
||||
setValue=v["value"],
|
||||
setDisplayIntegerBase=10000,
|
||||
setRange=(0, 99999), setMinimum=0,
|
||||
setMaximum=100000, setToolTip=tool_tip)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,11 @@ from pymxs import runtime as rt
|
|||
from typing import Union
|
||||
import contextlib
|
||||
|
||||
from openpype.pipeline.context_tools import (
|
||||
get_current_project_asset,
|
||||
get_current_project
|
||||
)
|
||||
|
||||
|
||||
JSON_PREFIX = "JSON::"
|
||||
|
||||
|
|
@ -157,6 +162,112 @@ def get_multipass_setting(project_setting=None):
|
|||
["multipass"])
|
||||
|
||||
|
||||
def set_scene_resolution(width: int, height: int):
|
||||
"""Set the render resolution
|
||||
|
||||
Args:
|
||||
width(int): value of the width
|
||||
height(int): value of the height
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
rt.renderWidth = width
|
||||
rt.renderHeight = height
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
"""Apply the scene resolution from the project definition
|
||||
|
||||
scene resolution can be overwritten by an asset if the asset.data contains
|
||||
any information regarding scene resolution .
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
data = ["data.resolutionWidth", "data.resolutionHeight"]
|
||||
project_resolution = get_current_project(fields=data)
|
||||
project_resolution_data = project_resolution["data"]
|
||||
asset_resolution = get_current_project_asset(fields=data)
|
||||
asset_resolution_data = asset_resolution["data"]
|
||||
# Set project resolution
|
||||
project_width = int(project_resolution_data.get("resolutionWidth", 1920))
|
||||
project_height = int(project_resolution_data.get("resolutionHeight", 1080))
|
||||
width = int(asset_resolution_data.get("resolutionWidth", project_width))
|
||||
height = int(asset_resolution_data.get("resolutionHeight", project_height))
|
||||
|
||||
set_scene_resolution(width, height)
|
||||
|
||||
|
||||
def get_frame_range() -> dict:
|
||||
"""Get the current assets frame range and handles.
|
||||
|
||||
Returns:
|
||||
dict: with frame start, frame end, handle start, handle end.
|
||||
"""
|
||||
# Set frame start/end
|
||||
asset = get_current_project_asset()
|
||||
frame_start = asset["data"].get("frameStart")
|
||||
frame_end = asset["data"].get("frameEnd")
|
||||
# Backwards compatibility
|
||||
if frame_start is None or frame_end is None:
|
||||
frame_start = asset["data"].get("edit_in")
|
||||
frame_end = asset["data"].get("edit_out")
|
||||
if frame_start is None or frame_end is None:
|
||||
return
|
||||
handles = asset["data"].get("handles") or 0
|
||||
handle_start = asset["data"].get("handleStart")
|
||||
if handle_start is None:
|
||||
handle_start = handles
|
||||
handle_end = asset["data"].get("handleEnd")
|
||||
if handle_end is None:
|
||||
handle_end = handles
|
||||
return {
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end
|
||||
}
|
||||
|
||||
|
||||
def reset_frame_range(fps: bool = True):
|
||||
"""Set frame range to current asset.
|
||||
This is part of 3dsmax documentation:
|
||||
|
||||
animationRange: A System Global variable which lets you get and
|
||||
set an Interval value that defines the start and end frames
|
||||
of the Active Time Segment.
|
||||
frameRate: A System Global variable which lets you get
|
||||
and set an Integer value that defines the current
|
||||
scene frame rate in frames-per-second.
|
||||
"""
|
||||
if fps:
|
||||
data_fps = get_current_project(fields=["data.fps"])
|
||||
fps_number = float(data_fps["data"]["fps"])
|
||||
rt.frameRate = fps_number
|
||||
frame_range = get_frame_range()
|
||||
frame_start = frame_range["frameStart"] - int(frame_range["handleStart"])
|
||||
frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"])
|
||||
frange_cmd = f"animationRange = interval {frame_start} {frame_end}"
|
||||
rt.execute(frange_cmd)
|
||||
|
||||
|
||||
def set_context_setting():
|
||||
"""Apply the project settings from the project definition
|
||||
|
||||
Settings can be overwritten by an asset if the asset.data contains
|
||||
any information regarding those settings.
|
||||
|
||||
Examples of settings:
|
||||
frame range
|
||||
resolution
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
reset_scene_resolution()
|
||||
|
||||
|
||||
def get_max_version():
|
||||
"""
|
||||
Args:
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ from openpype.hosts.max.api.lib import (
|
|||
get_current_renderer,
|
||||
get_default_render_folder
|
||||
)
|
||||
from openpype.pipeline.context_tools import get_current_project_asset
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
|
@ -34,14 +35,20 @@ class RenderProducts(object):
|
|||
filename,
|
||||
container)
|
||||
|
||||
context = get_current_project_asset()
|
||||
startFrame = context["data"].get("frameStart")
|
||||
endFrame = context["data"].get("frameEnd") + 1
|
||||
|
||||
img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa
|
||||
full_render_list = []
|
||||
beauty = self.beauty_render_product(output_file, img_fmt)
|
||||
full_render_list.append(beauty)
|
||||
full_render_list = self.beauty_render_product(output_file,
|
||||
startFrame,
|
||||
endFrame,
|
||||
img_fmt)
|
||||
|
||||
renderer_class = get_current_renderer()
|
||||
renderer = str(renderer_class).split(":")[0]
|
||||
|
||||
|
||||
if renderer == "VUE_File_Renderer":
|
||||
return full_render_list
|
||||
|
||||
|
|
@ -54,6 +61,8 @@ class RenderProducts(object):
|
|||
"Quicksilver_Hardware_Renderer",
|
||||
]:
|
||||
render_elem_list = self.render_elements_product(output_file,
|
||||
startFrame,
|
||||
endFrame,
|
||||
img_fmt)
|
||||
if render_elem_list:
|
||||
full_render_list.extend(iter(render_elem_list))
|
||||
|
|
@ -61,18 +70,24 @@ class RenderProducts(object):
|
|||
|
||||
if renderer == "Arnold":
|
||||
aov_list = self.arnold_render_product(output_file,
|
||||
startFrame,
|
||||
endFrame,
|
||||
img_fmt)
|
||||
if aov_list:
|
||||
full_render_list.extend(iter(aov_list))
|
||||
return full_render_list
|
||||
|
||||
def beauty_render_product(self, folder, fmt):
|
||||
beauty_output = f"{folder}.####.{fmt}"
|
||||
beauty_output = beauty_output.replace("\\", "/")
|
||||
return beauty_output
|
||||
def beauty_render_product(self, folder, startFrame, endFrame, fmt):
|
||||
beauty_frame_range = []
|
||||
for f in range(startFrame, endFrame):
|
||||
beauty_output = f"{folder}.{f}.{fmt}"
|
||||
beauty_output = beauty_output.replace("\\", "/")
|
||||
beauty_frame_range.append(beauty_output)
|
||||
|
||||
return beauty_frame_range
|
||||
|
||||
# TODO: Get the arnold render product
|
||||
def arnold_render_product(self, folder, fmt):
|
||||
def arnold_render_product(self, folder, startFrame, endFrame, fmt):
|
||||
"""Get all the Arnold AOVs"""
|
||||
aovs = []
|
||||
|
||||
|
|
@ -85,15 +100,17 @@ class RenderProducts(object):
|
|||
for i in range(aov_group_num):
|
||||
# get the specific AOV group
|
||||
for aov in aov_mgr.drivers[i].aov_list:
|
||||
render_element = f"{folder}_{aov.name}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
aovs.append(render_element)
|
||||
for f in range(startFrame, endFrame):
|
||||
render_element = f"{folder}_{aov.name}.{f}.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
aovs.append(render_element)
|
||||
|
||||
# close the AOVs manager window
|
||||
amw.close()
|
||||
|
||||
return aovs
|
||||
|
||||
def render_elements_product(self, folder, fmt):
|
||||
def render_elements_product(self, folder, startFrame, endFrame, fmt):
|
||||
"""Get all the render element output files. """
|
||||
render_dirname = []
|
||||
|
||||
|
|
@ -104,9 +121,10 @@ class RenderProducts(object):
|
|||
renderlayer_name = render_elem.GetRenderElement(i)
|
||||
target, renderpass = str(renderlayer_name).split(":")
|
||||
if renderlayer_name.enabled:
|
||||
render_element = f"{folder}_{renderpass}.####.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
render_dirname.append(render_element)
|
||||
for f in range(startFrame, endFrame):
|
||||
render_element = f"{folder}_{renderpass}.{f}.{fmt}"
|
||||
render_element = render_element.replace("\\", "/")
|
||||
render_dirname.append(render_element)
|
||||
|
||||
return render_dirname
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from qtpy import QtWidgets, QtCore
|
|||
from pymxs import runtime as rt
|
||||
|
||||
from openpype.tools.utils import host_tools
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class OpenPypeMenu(object):
|
||||
|
|
@ -107,6 +108,17 @@ class OpenPypeMenu(object):
|
|||
workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu)
|
||||
workfiles_action.triggered.connect(self.workfiles_callback)
|
||||
openpype_menu.addAction(workfiles_action)
|
||||
|
||||
openpype_menu.addSeparator()
|
||||
|
||||
res_action = QtWidgets.QAction("Set Resolution", openpype_menu)
|
||||
res_action.triggered.connect(self.resolution_callback)
|
||||
openpype_menu.addAction(res_action)
|
||||
|
||||
frame_action = QtWidgets.QAction("Set Frame Range", openpype_menu)
|
||||
frame_action.triggered.connect(self.frame_range_callback)
|
||||
openpype_menu.addAction(frame_action)
|
||||
|
||||
return openpype_menu
|
||||
|
||||
def load_callback(self):
|
||||
|
|
@ -128,3 +140,11 @@ class OpenPypeMenu(object):
|
|||
def workfiles_callback(self):
|
||||
"""Callback to show Workfiles tool."""
|
||||
host_tools.show_workfiles(parent=self.main_widget)
|
||||
|
||||
def resolution_callback(self):
|
||||
"""Callback to reset scene resolution"""
|
||||
return lib.reset_scene_resolution()
|
||||
|
||||
def frame_range_callback(self):
|
||||
"""Callback to reset frame range"""
|
||||
return lib.reset_frame_range()
|
||||
|
|
|
|||
|
|
@ -50,6 +50,11 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher):
|
|||
|
||||
self._has_been_setup = True
|
||||
|
||||
def context_setting():
|
||||
return lib.set_context_setting()
|
||||
rt.callbacks.addScript(rt.Name('systemPostNew'),
|
||||
context_setting)
|
||||
|
||||
def has_unsaved_changes(self):
|
||||
# TODO: how to get it from 3dsmax?
|
||||
return True
|
||||
|
|
|
|||
26
openpype/hosts/max/plugins/create/create_maxScene.py
Normal file
26
openpype/hosts/max/plugins/create/create_maxScene.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating raw max scene."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreateMaxScene(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.maxScene"
|
||||
label = "Max Scene"
|
||||
family = "maxScene"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreateMaxScene, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container = rt.getNodeByName(instance.data.get("instance_node"))
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
26
openpype/hosts/max/plugins/create/create_pointcloud.py
Normal file
26
openpype/hosts/max/plugins/create/create_pointcloud.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Creator plugin for creating point cloud."""
|
||||
from openpype.hosts.max.api import plugin
|
||||
from openpype.pipeline import CreatedInstance
|
||||
|
||||
|
||||
class CreatePointCloud(plugin.MaxCreator):
|
||||
identifier = "io.openpype.creators.max.pointcloud"
|
||||
label = "Point Cloud"
|
||||
family = "pointcloud"
|
||||
icon = "gear"
|
||||
|
||||
def create(self, subset_name, instance_data, pre_create_data):
|
||||
from pymxs import runtime as rt
|
||||
sel_obj = list(rt.selection)
|
||||
instance = super(CreatePointCloud, self).create(
|
||||
subset_name,
|
||||
instance_data,
|
||||
pre_create_data) # type: CreatedInstance
|
||||
container = rt.getNodeByName(instance.data.get("instance_node"))
|
||||
# TODO: Disable "Add to Containers?" Panel
|
||||
# parent the selected cameras into the container
|
||||
for obj in sel_obj:
|
||||
obj.parent = container
|
||||
# for additional work on the node:
|
||||
# instance_node = rt.getNodeByName(instance.get("instance_node"))
|
||||
|
|
@ -9,7 +9,8 @@ from openpype.hosts.max.api import lib
|
|||
class MaxSceneLoader(load.LoaderPlugin):
|
||||
"""Max Scene Loader"""
|
||||
|
||||
families = ["camera"]
|
||||
families = ["camera",
|
||||
"maxScene"]
|
||||
representations = ["max"]
|
||||
order = -8
|
||||
icon = "code-fork"
|
||||
|
|
@ -46,8 +47,7 @@ class MaxSceneLoader(load.LoaderPlugin):
|
|||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
max_objects = self.get_container_children(node)
|
||||
max_objects = node.Children
|
||||
for max_object in max_objects:
|
||||
max_object.source = path
|
||||
|
||||
|
|
|
|||
51
openpype/hosts/max/plugins/load/load_pointcloud.py
Normal file
51
openpype/hosts/max/plugins/load/load_pointcloud.py
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
import os
|
||||
from openpype.pipeline import (
|
||||
load, get_representation_path
|
||||
)
|
||||
from openpype.hosts.max.api.pipeline import containerise
|
||||
from openpype.hosts.max.api import lib
|
||||
|
||||
|
||||
class PointCloudLoader(load.LoaderPlugin):
|
||||
"""Point Cloud Loader"""
|
||||
|
||||
families = ["pointcloud"]
|
||||
representations = ["prt"]
|
||||
order = -8
|
||||
icon = "code-fork"
|
||||
color = "green"
|
||||
|
||||
def load(self, context, name=None, namespace=None, data=None):
|
||||
"""load point cloud by tyCache"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
filepath = os.path.normpath(self.fname)
|
||||
obj = rt.tyCache()
|
||||
obj.filename = filepath
|
||||
|
||||
prt_container = rt.getNodeByName(f"{obj.name}")
|
||||
|
||||
return containerise(
|
||||
name, [prt_container], context, loader=self.__class__.__name__)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""update the container"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
path = get_representation_path(representation)
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
|
||||
prt_objects = self.get_container_children(node)
|
||||
for prt_object in prt_objects:
|
||||
prt_object.source = path
|
||||
|
||||
lib.imprint(container["instance_node"], {
|
||||
"representation": str(representation["_id"])
|
||||
})
|
||||
|
||||
def remove(self, container):
|
||||
"""remove the container"""
|
||||
from pymxs import runtime as rt
|
||||
|
||||
node = rt.getNodeByName(container["instance_node"])
|
||||
rt.delete(node)
|
||||
|
|
@ -61,7 +61,7 @@ class CollectRender(pyblish.api.InstancePlugin):
|
|||
"plugin": "3dsmax",
|
||||
"frameStart": context.data['frameStart'],
|
||||
"frameEnd": context.data['frameEnd'],
|
||||
"version": version_int
|
||||
"version": version_int,
|
||||
}
|
||||
self.log.info("data: {0}".format(data))
|
||||
instance.data.update(data)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,8 @@ class ExtractMaxSceneRaw(publish.Extractor,
|
|||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Max Scene (Raw)"
|
||||
hosts = ["max"]
|
||||
families = ["camera"]
|
||||
families = ["camera",
|
||||
"maxScene"]
|
||||
optional = True
|
||||
|
||||
def process(self, instance):
|
||||
|
|
|
|||
207
openpype/hosts/max/plugins/publish/extract_pointcloud.py
Normal file
207
openpype/hosts/max/plugins/publish/extract_pointcloud.py
Normal file
|
|
@ -0,0 +1,207 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
from openpype.pipeline import publish
|
||||
from pymxs import runtime as rt
|
||||
from openpype.hosts.max.api import (
|
||||
maintained_selection
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
def get_setting(project_setting=None):
|
||||
project_setting = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
return (project_setting["max"]["PointCloud"])
|
||||
|
||||
|
||||
class ExtractPointCloud(publish.Extractor):
|
||||
"""
|
||||
Extract PRT format with tyFlow operators
|
||||
|
||||
Notes:
|
||||
Currently only works for the default partition setting
|
||||
|
||||
Args:
|
||||
export_particle(): sets up all job arguments for attributes
|
||||
to be exported in MAXscript
|
||||
|
||||
get_operators(): get the export_particle operator
|
||||
|
||||
get_custom_attr(): get all custom channel attributes from Openpype
|
||||
setting and sets it as job arguments before exporting
|
||||
|
||||
get_files(): get the files with tyFlow naming convention
|
||||
before publishing
|
||||
|
||||
partition_output_name(): get the naming with partition settings.
|
||||
get_partition(): get partition value
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder - 0.2
|
||||
label = "Extract Point Cloud"
|
||||
hosts = ["max"]
|
||||
families = ["pointcloud"]
|
||||
|
||||
def process(self, instance):
|
||||
start = int(instance.context.data.get("frameStart"))
|
||||
end = int(instance.context.data.get("frameEnd"))
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Extracting PRT...")
|
||||
|
||||
stagingdir = self.staging_dir(instance)
|
||||
filename = "{name}.prt".format(**instance.data)
|
||||
path = os.path.join(stagingdir, filename)
|
||||
|
||||
with maintained_selection():
|
||||
job_args = self.export_particle(container,
|
||||
start,
|
||||
end,
|
||||
path)
|
||||
for job in job_args:
|
||||
rt.execute(job)
|
||||
|
||||
self.log.info("Performing Extraction ...")
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
self.log.info("Writing PRT with TyFlow Plugin...")
|
||||
filenames = self.get_files(container, path, start, end)
|
||||
self.log.debug("filenames: {0}".format(filenames))
|
||||
|
||||
partition = self.partition_output_name(container)
|
||||
|
||||
representation = {
|
||||
'name': 'prt',
|
||||
'ext': 'prt',
|
||||
'files': filenames if len(filenames) > 1 else filenames[0],
|
||||
"stagingDir": stagingdir,
|
||||
"outputName": partition # partition value
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
self.log.info("Extracted instance '%s' to: %s" % (instance.name,
|
||||
path))
|
||||
|
||||
def export_particle(self,
|
||||
container,
|
||||
start,
|
||||
end,
|
||||
filepath):
|
||||
job_args = []
|
||||
opt_list = self.get_operators(container)
|
||||
for operator in opt_list:
|
||||
start_frame = "{0}.frameStart={1}".format(operator,
|
||||
start)
|
||||
job_args.append(start_frame)
|
||||
end_frame = "{0}.frameEnd={1}".format(operator,
|
||||
end)
|
||||
job_args.append(end_frame)
|
||||
filepath = filepath.replace("\\", "/")
|
||||
prt_filename = '{0}.PRTFilename="{1}"'.format(operator,
|
||||
filepath)
|
||||
|
||||
job_args.append(prt_filename)
|
||||
# Partition
|
||||
mode = "{0}.PRTPartitionsMode=2".format(operator)
|
||||
job_args.append(mode)
|
||||
|
||||
additional_args = self.get_custom_attr(operator)
|
||||
for args in additional_args:
|
||||
job_args.append(args)
|
||||
|
||||
prt_export = "{0}.exportPRT()".format(operator)
|
||||
job_args.append(prt_export)
|
||||
|
||||
return job_args
|
||||
|
||||
def get_operators(self, container):
|
||||
"""Get Export Particles Operator"""
|
||||
|
||||
opt_list = []
|
||||
node = rt.getNodebyName(container)
|
||||
selection_list = list(node.Children)
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
# TODO: to see if it can be used maxscript instead
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
for anim_name in anim_names:
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
if boolean:
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
opt_list.append(opt)
|
||||
|
||||
return opt_list
|
||||
|
||||
def get_custom_attr(self, operator):
|
||||
"""Get Custom Attributes"""
|
||||
|
||||
custom_attr_list = []
|
||||
attr_settings = get_setting()["attribute"]
|
||||
for key, value in attr_settings.items():
|
||||
custom_attr = "{0}.PRTChannels_{1}=True".format(operator,
|
||||
value)
|
||||
self.log.debug(
|
||||
"{0} will be added as custom attribute".format(key)
|
||||
)
|
||||
custom_attr_list.append(custom_attr)
|
||||
|
||||
return custom_attr_list
|
||||
|
||||
def get_files(self,
|
||||
container,
|
||||
path,
|
||||
start_frame,
|
||||
end_frame):
|
||||
"""
|
||||
Note:
|
||||
Set the filenames accordingly to the tyFlow file
|
||||
naming extension for the publishing purpose
|
||||
|
||||
Actual File Output from tyFlow:
|
||||
<SceneFile>__part<PartitionStart>of<PartitionCount>.<frame>.prt
|
||||
e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt
|
||||
"""
|
||||
filenames = []
|
||||
filename = os.path.basename(path)
|
||||
orig_name, ext = os.path.splitext(filename)
|
||||
partition_count, partition_start = self.get_partition(container)
|
||||
for frame in range(int(start_frame), int(end_frame) + 1):
|
||||
actual_name = "{}__part{:03}of{}_{:05}".format(orig_name,
|
||||
partition_start,
|
||||
partition_count,
|
||||
frame)
|
||||
actual_filename = path.replace(orig_name, actual_name)
|
||||
filenames.append(os.path.basename(actual_filename))
|
||||
|
||||
return filenames
|
||||
|
||||
def partition_output_name(self, container):
|
||||
"""
|
||||
Notes:
|
||||
Partition output name set for mapping
|
||||
the published file output
|
||||
|
||||
todo:
|
||||
Customizes the setting for the output
|
||||
"""
|
||||
partition_count, partition_start = self.get_partition(container)
|
||||
partition = "_part{:03}of{}".format(partition_start,
|
||||
partition_count)
|
||||
|
||||
return partition
|
||||
|
||||
def get_partition(self, container):
|
||||
"""
|
||||
Get Partition Value
|
||||
"""
|
||||
opt_list = self.get_operators(container)
|
||||
for operator in opt_list:
|
||||
count = rt.execute(f'{operator}.PRTPartitionsCount')
|
||||
start = rt.execute(f'{operator}.PRTPartitionsFrom')
|
||||
|
||||
return count, start
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
import pyblish.api
|
||||
from openpype.lib import version_up
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
|
||||
"""Increment current workfile version."""
|
||||
|
||||
order = pyblish.api.IntegratorOrder + 0.9
|
||||
label = "Increment Workfile Version"
|
||||
hosts = ["max"]
|
||||
families = ["workfile"]
|
||||
|
||||
def process(self, context):
|
||||
path = context.data["currentFile"]
|
||||
filepath = version_up(path)
|
||||
|
||||
rt.saveMaxFile(filepath)
|
||||
self.log.info("Incrementing file version")
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from pymxs import runtime as rt
|
||||
|
||||
|
||||
class ValidateMaxContents(pyblish.api.InstancePlugin):
|
||||
"""Validates Max contents.
|
||||
|
||||
Check if MaxScene container includes any contents underneath.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["camera",
|
||||
"maxScene",
|
||||
"maxrender"]
|
||||
hosts = ["max"]
|
||||
label = "Max Scene Contents"
|
||||
|
||||
def process(self, instance):
|
||||
container = rt.getNodeByName(instance.data["instance_node"])
|
||||
if not list(container.Children):
|
||||
raise PublishValidationError("No content found in the container")
|
||||
191
openpype/hosts/max/plugins/publish/validate_pointcloud.py
Normal file
191
openpype/hosts/max/plugins/publish/validate_pointcloud.py
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishValidationError
|
||||
from pymxs import runtime as rt
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.pipeline import legacy_io
|
||||
|
||||
|
||||
def get_setting(project_setting=None):
|
||||
project_setting = get_project_settings(
|
||||
legacy_io.Session["AVALON_PROJECT"]
|
||||
)
|
||||
return (project_setting["max"]["PointCloud"])
|
||||
|
||||
|
||||
class ValidatePointCloud(pyblish.api.InstancePlugin):
|
||||
"""Validate that workfile was saved."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
families = ["pointcloud"]
|
||||
hosts = ["max"]
|
||||
label = "Validate Point Cloud"
|
||||
|
||||
def process(self, instance):
|
||||
"""
|
||||
Notes:
|
||||
|
||||
1. Validate the container only include tyFlow objects
|
||||
2. Validate if tyFlow operator Export Particle exists
|
||||
3. Validate if the export mode of Export Particle is at PRT format
|
||||
4. Validate the partition count and range set as default value
|
||||
Partition Count : 100
|
||||
Partition Range : 1 to 1
|
||||
5. Validate if the custom attribute(s) exist as parameter(s)
|
||||
of export_particle operator
|
||||
|
||||
"""
|
||||
invalid = self.get_tyFlow_object(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Non tyFlow object "
|
||||
"found: {}".format(invalid))
|
||||
invalid = self.get_tyFlow_operator(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("tyFlow ExportParticle operator "
|
||||
"not found: {}".format(invalid))
|
||||
|
||||
invalid = self.validate_export_mode(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("The export mode is not at PRT")
|
||||
|
||||
invalid = self.validate_partition_value(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("tyFlow Partition setting is "
|
||||
"not at the default value")
|
||||
invalid = self.validate_custom_attribute(instance)
|
||||
if invalid:
|
||||
raise PublishValidationError("Custom Attribute not found "
|
||||
":{}".format(invalid))
|
||||
|
||||
def get_tyFlow_object(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow container "
|
||||
"for {}".format(container))
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
for sel in selection_list:
|
||||
sel_tmp = str(sel)
|
||||
if rt.classOf(sel) in [rt.tyFlow,
|
||||
rt.Editable_Mesh]:
|
||||
if "tyFlow" not in sel_tmp:
|
||||
invalid.append(sel)
|
||||
else:
|
||||
invalid.append(sel)
|
||||
|
||||
return invalid
|
||||
|
||||
def get_tyFlow_operator(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow object "
|
||||
"for {}".format(container))
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
bool_list = []
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
bool_list.append(str(boolean))
|
||||
# if the export_particles property is not there
|
||||
# it means there is not a "Export Particle" operator
|
||||
if "True" not in bool_list:
|
||||
self.log.error("Operator 'Export Particles' not found!")
|
||||
invalid.append(sel)
|
||||
|
||||
return invalid
|
||||
|
||||
def validate_custom_attribute(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow custom "
|
||||
"attributes for {}".format(container))
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
if boolean:
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
attributes = get_setting()["attribute"]
|
||||
for key, value in attributes.items():
|
||||
custom_attr = "{0}.PRTChannels_{1}".format(opt,
|
||||
value)
|
||||
try:
|
||||
rt.execute(custom_attr)
|
||||
except RuntimeError:
|
||||
invalid.add(key)
|
||||
|
||||
return invalid
|
||||
|
||||
def validate_partition_value(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow partition "
|
||||
"value for {}".format(container))
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
if boolean:
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
count = rt.execute(f'{opt}.PRTPartitionsCount')
|
||||
if count != 100:
|
||||
invalid.append(count)
|
||||
start = rt.execute(f'{opt}.PRTPartitionsFrom')
|
||||
if start != 1:
|
||||
invalid.append(start)
|
||||
end = rt.execute(f'{opt}.PRTPartitionsTo')
|
||||
if end != 1:
|
||||
invalid.append(end)
|
||||
|
||||
return invalid
|
||||
|
||||
def validate_export_mode(self, instance):
|
||||
invalid = []
|
||||
container = instance.data["instance_node"]
|
||||
self.log.info("Validating tyFlow export "
|
||||
"mode for {}".format(container))
|
||||
|
||||
con = rt.getNodeByName(container)
|
||||
selection_list = list(con.Children)
|
||||
for sel in selection_list:
|
||||
obj = sel.baseobject
|
||||
anim_names = rt.getsubanimnames(obj)
|
||||
for anim_name in anim_names:
|
||||
# get all the names of the related tyFlow nodes
|
||||
sub_anim = rt.getsubanim(obj, anim_name)
|
||||
# check if there is export particle operator
|
||||
boolean = rt.isProperty(sub_anim, "Export_Particles")
|
||||
event_name = sub_anim.name
|
||||
if boolean:
|
||||
opt = "${0}.{1}.export_particles".format(sel.name,
|
||||
event_name)
|
||||
export_mode = rt.execute(f'{opt}.exportMode')
|
||||
if export_mode != 1:
|
||||
invalid.append(export_mode)
|
||||
|
||||
return invalid
|
||||
|
|
@ -2099,29 +2099,40 @@ def get_frame_range():
|
|||
}
|
||||
|
||||
|
||||
def reset_frame_range():
|
||||
"""Set frame range to current asset"""
|
||||
def reset_frame_range(playback=True, render=True, fps=True):
|
||||
"""Set frame range to current asset
|
||||
|
||||
fps = convert_to_maya_fps(
|
||||
float(legacy_io.Session.get("AVALON_FPS", 25))
|
||||
)
|
||||
set_scene_fps(fps)
|
||||
Args:
|
||||
playback (bool, Optional): Whether to set the maya timeline playback
|
||||
frame range. Defaults to True.
|
||||
render (bool, Optional): Whether to set the maya render frame range.
|
||||
Defaults to True.
|
||||
fps (bool, Optional): Whether to set scene FPS. Defaults to True.
|
||||
"""
|
||||
|
||||
if fps:
|
||||
fps = convert_to_maya_fps(
|
||||
float(legacy_io.Session.get("AVALON_FPS", 25))
|
||||
)
|
||||
set_scene_fps(fps)
|
||||
|
||||
frame_range = get_frame_range()
|
||||
|
||||
frame_start = frame_range["frameStart"] - int(frame_range["handleStart"])
|
||||
frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"])
|
||||
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
if playback:
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.playbackOptions(animationStartTime=frame_start)
|
||||
cmds.playbackOptions(animationEndTime=frame_end)
|
||||
cmds.playbackOptions(minTime=frame_start)
|
||||
cmds.playbackOptions(maxTime=frame_end)
|
||||
cmds.currentTime(frame_start)
|
||||
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
if render:
|
||||
cmds.setAttr("defaultRenderGlobals.startFrame", frame_start)
|
||||
cmds.setAttr("defaultRenderGlobals.endFrame", frame_end)
|
||||
|
||||
|
||||
def reset_scene_resolution():
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ class RenderSettings(object):
|
|||
cmds.setAttr(
|
||||
"defaultArnoldDriver.mergeAOVs", multi_exr)
|
||||
self._additional_attribs_setter(additional_options)
|
||||
reset_frame_range()
|
||||
reset_frame_range(playback=False, fps=False, render=True)
|
||||
|
||||
def _set_redshift_settings(self, width, height):
|
||||
"""Sets settings for Redshift."""
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ class CreateAnimation(plugin.Creator):
|
|||
icon = "male"
|
||||
write_color_sets = False
|
||||
write_face_sets = False
|
||||
include_parent_hierarchy = False
|
||||
include_user_defined_attributes = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
@ -37,7 +38,7 @@ class CreateAnimation(plugin.Creator):
|
|||
self.data["visibleOnly"] = False
|
||||
|
||||
# Include the groups above the out_SET content
|
||||
self.data["includeParentHierarchy"] = False # Include parent groups
|
||||
self.data["includeParentHierarchy"] = self.include_parent_hierarchy
|
||||
|
||||
# Default to exporting world-space
|
||||
self.data["worldSpace"] = True
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ class CreateReview(plugin.Creator):
|
|||
"alpha cut"
|
||||
]
|
||||
useMayaTimeline = True
|
||||
panZoom = False
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateReview, self).__init__(*args, **kwargs)
|
||||
|
|
@ -45,5 +46,6 @@ class CreateReview(plugin.Creator):
|
|||
data["keepImages"] = self.keepImages
|
||||
data["imagePlane"] = self.imagePlane
|
||||
data["transparency"] = self.transparency
|
||||
data["panZoom"] = self.panZoom
|
||||
|
||||
self.data = data
|
||||
|
|
|
|||
178
openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py
Normal file
178
openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
import os
|
||||
import json
|
||||
from collections import defaultdict
|
||||
|
||||
from maya import cmds
|
||||
|
||||
from openpype.pipeline import (
|
||||
InventoryAction, get_representation_context, get_representation_path
|
||||
)
|
||||
from openpype.hosts.maya.api.lib import get_container_members, get_id
|
||||
|
||||
|
||||
class ConnectYetiRig(InventoryAction):
|
||||
"""Connect Yeti Rig with an animation or pointcache."""
|
||||
|
||||
label = "Connect Yeti Rig"
|
||||
icon = "link"
|
||||
color = "white"
|
||||
|
||||
def process(self, containers):
|
||||
# Validate selection is more than 1.
|
||||
message = (
|
||||
"Only 1 container selected. 2+ containers needed for this action."
|
||||
)
|
||||
if len(containers) == 1:
|
||||
self.display_warning(message)
|
||||
return
|
||||
|
||||
# Categorize containers by family.
|
||||
containers_by_family = defaultdict(list)
|
||||
for container in containers:
|
||||
family = get_representation_context(
|
||||
container["representation"]
|
||||
)["subset"]["data"]["family"]
|
||||
containers_by_family[family].append(container)
|
||||
|
||||
# Validate to only 1 source container.
|
||||
source_containers = containers_by_family.get("animation", [])
|
||||
source_containers += containers_by_family.get("pointcache", [])
|
||||
source_container_namespaces = [
|
||||
x["namespace"] for x in source_containers
|
||||
]
|
||||
message = (
|
||||
"{} animation containers selected:\n\n{}\n\nOnly select 1 of type "
|
||||
"\"animation\" or \"pointcache\".".format(
|
||||
len(source_containers), source_container_namespaces
|
||||
)
|
||||
)
|
||||
if len(source_containers) != 1:
|
||||
self.display_warning(message)
|
||||
return
|
||||
|
||||
source_container = source_containers[0]
|
||||
source_ids = self.nodes_by_id(source_container)
|
||||
|
||||
# Target containers.
|
||||
target_ids = {}
|
||||
inputs = []
|
||||
|
||||
yeti_rig_containers = containers_by_family.get("yetiRig")
|
||||
if not yeti_rig_containers:
|
||||
self.display_warning(
|
||||
"Select at least one yetiRig container"
|
||||
)
|
||||
return
|
||||
|
||||
for container in yeti_rig_containers:
|
||||
target_ids.update(self.nodes_by_id(container))
|
||||
|
||||
maya_file = get_representation_path(
|
||||
get_representation_context(
|
||||
container["representation"]
|
||||
)["representation"]
|
||||
)
|
||||
_, ext = os.path.splitext(maya_file)
|
||||
settings_file = maya_file.replace(ext, ".rigsettings")
|
||||
if not os.path.exists(settings_file):
|
||||
continue
|
||||
|
||||
with open(settings_file) as f:
|
||||
inputs.extend(json.load(f)["inputs"])
|
||||
|
||||
# Compare loaded connections to scene.
|
||||
for input in inputs:
|
||||
source_node = source_ids.get(input["sourceID"])
|
||||
target_node = target_ids.get(input["destinationID"])
|
||||
|
||||
if not source_node or not target_node:
|
||||
self.log.debug(
|
||||
"Could not find nodes for input:\n" +
|
||||
json.dumps(input, indent=4, sort_keys=True)
|
||||
)
|
||||
continue
|
||||
source_attr, target_attr = input["connections"]
|
||||
|
||||
if not cmds.attributeQuery(
|
||||
source_attr, node=source_node, exists=True
|
||||
):
|
||||
self.log.debug(
|
||||
"Could not find attribute {} on node {} for "
|
||||
"input:\n{}".format(
|
||||
source_attr,
|
||||
source_node,
|
||||
json.dumps(input, indent=4, sort_keys=True)
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
if not cmds.attributeQuery(
|
||||
target_attr, node=target_node, exists=True
|
||||
):
|
||||
self.log.debug(
|
||||
"Could not find attribute {} on node {} for "
|
||||
"input:\n{}".format(
|
||||
target_attr,
|
||||
target_node,
|
||||
json.dumps(input, indent=4, sort_keys=True)
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
source_plug = "{}.{}".format(
|
||||
source_node, source_attr
|
||||
)
|
||||
target_plug = "{}.{}".format(
|
||||
target_node, target_attr
|
||||
)
|
||||
if cmds.isConnected(
|
||||
source_plug, target_plug, ignoreUnitConversion=True
|
||||
):
|
||||
self.log.debug(
|
||||
"Connection already exists: {} -> {}".format(
|
||||
source_plug, target_plug
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
cmds.connectAttr(source_plug, target_plug, force=True)
|
||||
self.log.debug(
|
||||
"Connected attributes: {} -> {}".format(
|
||||
source_plug, target_plug
|
||||
)
|
||||
)
|
||||
|
||||
def nodes_by_id(self, container):
|
||||
ids = {}
|
||||
for member in get_container_members(container):
|
||||
id = get_id(member)
|
||||
if not id:
|
||||
continue
|
||||
ids[id] = member
|
||||
|
||||
return ids
|
||||
|
||||
def display_warning(self, message, show_cancel=False):
|
||||
"""Show feedback to user.
|
||||
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
|
||||
from qtpy import QtWidgets
|
||||
|
||||
accept = QtWidgets.QMessageBox.Ok
|
||||
if show_cancel:
|
||||
buttons = accept | QtWidgets.QMessageBox.Cancel
|
||||
else:
|
||||
buttons = accept
|
||||
|
||||
state = QtWidgets.QMessageBox.warning(
|
||||
None,
|
||||
"",
|
||||
message,
|
||||
buttons=buttons,
|
||||
defaultButton=accept
|
||||
)
|
||||
|
||||
return state == accept
|
||||
332
openpype/hosts/maya/plugins/load/load_image.py
Normal file
332
openpype/hosts/maya/plugins/load/load_image.py
Normal file
|
|
@ -0,0 +1,332 @@
|
|||
import os
|
||||
import copy
|
||||
|
||||
from openpype.lib import EnumDef
|
||||
from openpype.pipeline import (
|
||||
load,
|
||||
get_representation_context
|
||||
)
|
||||
from openpype.pipeline.load.utils import get_representation_path_from_context
|
||||
from openpype.pipeline.colorspace import (
|
||||
get_imageio_colorspace_from_filepath,
|
||||
get_imageio_config,
|
||||
get_imageio_file_rules
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
|
||||
from openpype.hosts.maya.api.pipeline import containerise
|
||||
from openpype.hosts.maya.api.lib import (
|
||||
unique_namespace,
|
||||
namespaced
|
||||
)
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
def create_texture():
|
||||
"""Create place2dTexture with file node with uv connections
|
||||
|
||||
Mimics Maya "file [Texture]" creation.
|
||||
"""
|
||||
|
||||
place = cmds.shadingNode("place2dTexture", asUtility=True, name="place2d")
|
||||
file = cmds.shadingNode("file", asTexture=True, name="file")
|
||||
|
||||
connections = ["coverage", "translateFrame", "rotateFrame", "rotateUV",
|
||||
"mirrorU", "mirrorV", "stagger", "wrapV", "wrapU",
|
||||
"repeatUV", "offset", "noiseUV", "vertexUvThree",
|
||||
"vertexUvTwo", "vertexUvOne", "vertexCameraOne"]
|
||||
for attr in connections:
|
||||
src = "{}.{}".format(place, attr)
|
||||
dest = "{}.{}".format(file, attr)
|
||||
cmds.connectAttr(src, dest)
|
||||
|
||||
cmds.connectAttr(place + '.outUV', file + '.uvCoord')
|
||||
cmds.connectAttr(place + '.outUvFilterSize', file + '.uvFilterSize')
|
||||
|
||||
return file, place
|
||||
|
||||
|
||||
def create_projection():
|
||||
"""Create texture with place3dTexture and projection
|
||||
|
||||
Mimics Maya "file [Projection]" creation.
|
||||
"""
|
||||
|
||||
file, place = create_texture()
|
||||
projection = cmds.shadingNode("projection", asTexture=True,
|
||||
name="projection")
|
||||
place3d = cmds.shadingNode("place3dTexture", asUtility=True,
|
||||
name="place3d")
|
||||
|
||||
cmds.connectAttr(place3d + '.worldInverseMatrix[0]',
|
||||
projection + ".placementMatrix")
|
||||
cmds.connectAttr(file + '.outColor', projection + ".image")
|
||||
|
||||
return file, place, projection, place3d
|
||||
|
||||
|
||||
def create_stencil():
|
||||
"""Create texture with extra place2dTexture offset and stencil
|
||||
|
||||
Mimics Maya "file [Stencil]" creation.
|
||||
"""
|
||||
|
||||
file, place = create_texture()
|
||||
|
||||
place_stencil = cmds.shadingNode("place2dTexture", asUtility=True,
|
||||
name="place2d_stencil")
|
||||
stencil = cmds.shadingNode("stencil", asTexture=True, name="stencil")
|
||||
|
||||
for src_attr, dest_attr in [
|
||||
("outUV", "uvCoord"),
|
||||
("outUvFilterSize", "uvFilterSize")
|
||||
]:
|
||||
src_plug = "{}.{}".format(place_stencil, src_attr)
|
||||
cmds.connectAttr(src_plug, "{}.{}".format(place, dest_attr))
|
||||
cmds.connectAttr(src_plug, "{}.{}".format(stencil, dest_attr))
|
||||
|
||||
return file, place, stencil, place_stencil
|
||||
|
||||
|
||||
class FileNodeLoader(load.LoaderPlugin):
|
||||
"""File node loader."""
|
||||
|
||||
families = ["image", "plate", "render"]
|
||||
label = "Load file node"
|
||||
representations = ["exr", "tif", "png", "jpg"]
|
||||
icon = "image"
|
||||
color = "orange"
|
||||
order = 2
|
||||
|
||||
options = [
|
||||
EnumDef(
|
||||
"mode",
|
||||
items={
|
||||
"texture": "Texture",
|
||||
"projection": "Projection",
|
||||
"stencil": "Stencil"
|
||||
},
|
||||
default="texture",
|
||||
label="Texture Mode"
|
||||
)
|
||||
]
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
|
||||
asset = context['asset']['name']
|
||||
namespace = namespace or unique_namespace(
|
||||
asset + "_",
|
||||
prefix="_" if asset[0].isdigit() else "",
|
||||
suffix="_",
|
||||
)
|
||||
|
||||
with namespaced(namespace, new=True) as namespace:
|
||||
# Create the nodes within the namespace
|
||||
nodes = {
|
||||
"texture": create_texture,
|
||||
"projection": create_projection,
|
||||
"stencil": create_stencil
|
||||
}[data.get("mode", "texture")]()
|
||||
|
||||
file_node = cmds.ls(nodes, type="file")[0]
|
||||
|
||||
self._apply_representation_context(context, file_node)
|
||||
|
||||
# For ease of access for the user select all the nodes and select
|
||||
# the file node last so that UI shows its attributes by default
|
||||
cmds.select(list(nodes) + [file_node], replace=True)
|
||||
|
||||
return containerise(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
nodes=nodes,
|
||||
context=context,
|
||||
loader=self.__class__.__name__
|
||||
)
|
||||
|
||||
def update(self, container, representation):
|
||||
|
||||
members = cmds.sets(container['objectName'], query=True)
|
||||
file_node = cmds.ls(members, type="file")[0]
|
||||
|
||||
context = get_representation_context(representation)
|
||||
self._apply_representation_context(context, file_node)
|
||||
|
||||
# Update representation
|
||||
cmds.setAttr(
|
||||
container["objectName"] + ".representation",
|
||||
str(representation["_id"]),
|
||||
type="string"
|
||||
)
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
members = cmds.sets(container['objectName'], query=True)
|
||||
cmds.lockNode(members, lock=False)
|
||||
cmds.delete([container['objectName']] + members)
|
||||
|
||||
# Clean up the namespace
|
||||
try:
|
||||
cmds.namespace(removeNamespace=container['namespace'],
|
||||
deleteNamespaceContent=True)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
def _apply_representation_context(self, context, file_node):
|
||||
"""Update the file node to match the context.
|
||||
|
||||
This sets the file node's attributes for:
|
||||
- file path
|
||||
- udim tiling mode (if it is an udim tile)
|
||||
- use frame extension (if it is a sequence)
|
||||
- colorspace
|
||||
|
||||
"""
|
||||
|
||||
repre_context = context["representation"]["context"]
|
||||
has_frames = repre_context.get("frame") is not None
|
||||
has_udim = repre_context.get("udim") is not None
|
||||
|
||||
# Set UV tiling mode if UDIM tiles
|
||||
if has_udim:
|
||||
cmds.setAttr(file_node + ".uvTilingMode", 3) # UDIM-tiles
|
||||
else:
|
||||
cmds.setAttr(file_node + ".uvTilingMode", 0) # off
|
||||
|
||||
# Enable sequence if publish has `startFrame` and `endFrame` and
|
||||
# `startFrame != endFrame`
|
||||
if has_frames and self._is_sequence(context):
|
||||
# When enabling useFrameExtension maya automatically
|
||||
# connects an expression to <file>.frameExtension to set
|
||||
# the current frame. However, this expression is generated
|
||||
# with some delay and thus it'll show a warning if frame 0
|
||||
# doesn't exist because we're explicitly setting the <f>
|
||||
# token.
|
||||
cmds.setAttr(file_node + ".useFrameExtension", True)
|
||||
else:
|
||||
cmds.setAttr(file_node + ".useFrameExtension", False)
|
||||
|
||||
# Set the file node path attribute
|
||||
path = self._format_path(context)
|
||||
cmds.setAttr(file_node + ".fileTextureName", path, type="string")
|
||||
|
||||
# Set colorspace
|
||||
colorspace = self._get_colorspace(context)
|
||||
if colorspace:
|
||||
cmds.setAttr(file_node + ".colorSpace", colorspace, type="string")
|
||||
else:
|
||||
self.log.debug("Unknown colorspace - setting colorspace skipped.")
|
||||
|
||||
def _is_sequence(self, context):
|
||||
"""Check whether frameStart and frameEnd are not the same."""
|
||||
version = context.get("version", {})
|
||||
representation = context.get("representation", {})
|
||||
|
||||
for doc in [representation, version]:
|
||||
# Frame range can be set on version or representation.
|
||||
# When set on representation it overrides version data.
|
||||
data = doc.get("data", {})
|
||||
start = data.get("frameStartHandle", data.get("frameStart", None))
|
||||
end = data.get("frameEndHandle", data.get("frameEnd", None))
|
||||
|
||||
if start is None or end is None:
|
||||
continue
|
||||
|
||||
if start != end:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
def _get_colorspace(self, context):
|
||||
"""Return colorspace of the file to load.
|
||||
|
||||
Retrieves the explicit colorspace from the publish. If no colorspace
|
||||
data is stored with published content then project imageio settings
|
||||
are used to make an assumption of the colorspace based on the file
|
||||
rules. If no file rules match then None is returned.
|
||||
|
||||
Returns:
|
||||
str or None: The colorspace of the file or None if not detected.
|
||||
|
||||
"""
|
||||
|
||||
# We can't apply color spaces if management is not enabled
|
||||
if not cmds.colorManagementPrefs(query=True, cmEnabled=True):
|
||||
return
|
||||
|
||||
representation = context["representation"]
|
||||
colorspace_data = representation.get("data", {}).get("colorspaceData")
|
||||
if colorspace_data:
|
||||
return colorspace_data["colorspace"]
|
||||
|
||||
# Assume colorspace from filepath based on project settings
|
||||
project_name = context["project"]["name"]
|
||||
host_name = os.environ.get("AVALON_APP")
|
||||
project_settings = get_project_settings(project_name)
|
||||
|
||||
config_data = get_imageio_config(
|
||||
project_name, host_name,
|
||||
project_settings=project_settings
|
||||
)
|
||||
file_rules = get_imageio_file_rules(
|
||||
project_name, host_name,
|
||||
project_settings=project_settings
|
||||
)
|
||||
|
||||
path = get_representation_path_from_context(context)
|
||||
colorspace = get_imageio_colorspace_from_filepath(
|
||||
path=path,
|
||||
host_name=host_name,
|
||||
project_name=project_name,
|
||||
config_data=config_data,
|
||||
file_rules=file_rules,
|
||||
project_settings=project_settings
|
||||
)
|
||||
|
||||
return colorspace
|
||||
|
||||
def _format_path(self, context):
|
||||
"""Format the path with correct tokens for frames and udim tiles."""
|
||||
|
||||
context = copy.deepcopy(context)
|
||||
representation = context["representation"]
|
||||
template = representation.get("data", {}).get("template")
|
||||
if not template:
|
||||
# No template to find token locations for
|
||||
return get_representation_path_from_context(context)
|
||||
|
||||
def _placeholder(key):
|
||||
# Substitute with a long placeholder value so that potential
|
||||
# custom formatting with padding doesn't find its way into
|
||||
# our formatting, so that <f> wouldn't be padded as 0<f>
|
||||
return "___{}___".format(key)
|
||||
|
||||
# We format UDIM and Frame numbers with their specific tokens. To do so
|
||||
# we in-place change the representation context data to format the path
|
||||
# with our own data
|
||||
tokens = {
|
||||
"frame": "<f>",
|
||||
"udim": "<UDIM>"
|
||||
}
|
||||
has_tokens = False
|
||||
repre_context = representation["context"]
|
||||
for key, _token in tokens.items():
|
||||
if key in repre_context:
|
||||
repre_context[key] = _placeholder(key)
|
||||
has_tokens = True
|
||||
|
||||
# Replace with our custom template that has the tokens set
|
||||
representation["data"]["template"] = template
|
||||
path = get_representation_path_from_context(context)
|
||||
|
||||
if has_tokens:
|
||||
for key, token in tokens.items():
|
||||
if key in repre_context:
|
||||
path = path.replace(_placeholder(key), token)
|
||||
|
||||
return path
|
||||
|
|
@ -1,17 +1,12 @@
|
|||
import os
|
||||
from collections import defaultdict
|
||||
import maya.cmds as cmds
|
||||
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.settings import get_current_project_settings
|
||||
import openpype.hosts.maya.api.plugin
|
||||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
||||
class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
||||
"""
|
||||
This loader will load Yeti rig. You can select something in scene and if it
|
||||
has same ID as mesh published with rig, their shapes will be linked
|
||||
together.
|
||||
"""
|
||||
"""This loader will load Yeti rig."""
|
||||
|
||||
families = ["yetiRig"]
|
||||
representations = ["ma"]
|
||||
|
|
@ -22,72 +17,31 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader):
|
|||
color = "orange"
|
||||
|
||||
def process_reference(
|
||||
self, context, name=None, namespace=None, options=None):
|
||||
self, context, name=None, namespace=None, options=None
|
||||
):
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
# get roots of selected hierarchies
|
||||
selected_roots = []
|
||||
for sel in cmds.ls(sl=True, long=True):
|
||||
selected_roots.append(sel.split("|")[1])
|
||||
|
||||
# get all objects under those roots
|
||||
selected_hierarchy = []
|
||||
for root in selected_roots:
|
||||
selected_hierarchy.append(cmds.listRelatives(
|
||||
root,
|
||||
allDescendents=True) or [])
|
||||
|
||||
# flatten the list and filter only shapes
|
||||
shapes_flat = []
|
||||
for root in selected_hierarchy:
|
||||
shapes = cmds.ls(root, long=True, type="mesh") or []
|
||||
for shape in shapes:
|
||||
shapes_flat.append(shape)
|
||||
|
||||
# create dictionary of cbId and shape nodes
|
||||
scene_lookup = defaultdict(list)
|
||||
for node in shapes_flat:
|
||||
cb_id = lib.get_id(node)
|
||||
scene_lookup[cb_id] = node
|
||||
|
||||
# load rig
|
||||
group_name = "{}:{}".format(namespace, name)
|
||||
with lib.maintained_selection():
|
||||
file_url = self.prepare_root_value(self.fname,
|
||||
context["project"]["name"])
|
||||
nodes = cmds.file(file_url,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName="{}:{}".format(namespace, name))
|
||||
file_url = self.prepare_root_value(
|
||||
self.fname, context["project"]["name"]
|
||||
)
|
||||
nodes = cmds.file(
|
||||
file_url,
|
||||
namespace=namespace,
|
||||
reference=True,
|
||||
returnNewNodes=True,
|
||||
groupReference=True,
|
||||
groupName=group_name
|
||||
)
|
||||
|
||||
# for every shape node we've just loaded find matching shape by its
|
||||
# cbId in selection. If found outMesh of scene shape will connect to
|
||||
# inMesh of loaded shape.
|
||||
for destination_node in nodes:
|
||||
source_node = scene_lookup[lib.get_id(destination_node)]
|
||||
if source_node:
|
||||
self.log.info("found: {}".format(source_node))
|
||||
self.log.info(
|
||||
"creating connection to {}".format(destination_node))
|
||||
|
||||
cmds.connectAttr("{}.outMesh".format(source_node),
|
||||
"{}.inMesh".format(destination_node),
|
||||
force=True)
|
||||
|
||||
groupName = "{}:{}".format(namespace, name)
|
||||
|
||||
settings = get_project_settings(os.environ['AVALON_PROJECT'])
|
||||
colors = settings['maya']['load']['colors']
|
||||
|
||||
c = colors.get('yetiRig')
|
||||
settings = get_current_project_settings()
|
||||
colors = settings["maya"]["load"]["colors"]
|
||||
c = colors.get("yetiRig")
|
||||
if c is not None:
|
||||
cmds.setAttr(groupName + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(groupName + ".outlinerColor",
|
||||
(float(c[0])/255),
|
||||
(float(c[1])/255),
|
||||
(float(c[2])/255)
|
||||
cmds.setAttr(group_name + ".useOutlinerColor", 1)
|
||||
cmds.setAttr(
|
||||
group_name + ".outlinerColor",
|
||||
(float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255)
|
||||
)
|
||||
self[:] = nodes
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import pyblish.api
|
|||
|
||||
from openpype.client import get_subset_by_name
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.hosts.maya.api.lib import get_attribute_input
|
||||
|
||||
|
||||
class CollectReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -24,7 +25,9 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
task = legacy_io.Session["AVALON_TASK"]
|
||||
|
||||
# Get panel.
|
||||
instance.data["panel"] = cmds.playblast(activeEditor=True)
|
||||
instance.data["panel"] = cmds.playblast(
|
||||
activeEditor=True
|
||||
).split("|")[-1]
|
||||
|
||||
# get cameras
|
||||
members = instance.data['setMembers']
|
||||
|
|
@ -77,6 +80,8 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
data['review_width'] = instance.data['review_width']
|
||||
data['review_height'] = instance.data['review_height']
|
||||
data["isolate"] = instance.data["isolate"]
|
||||
data["panZoom"] = instance.data.get("panZoom", False)
|
||||
data["panel"] = instance.data["panel"]
|
||||
cmds.setAttr(str(instance) + '.active', 1)
|
||||
self.log.debug('data {}'.format(instance.context[i].data))
|
||||
instance.context[i].data.update(data)
|
||||
|
|
@ -142,3 +147,21 @@ class CollectReview(pyblish.api.InstancePlugin):
|
|||
"filename": node.filename.get()
|
||||
}
|
||||
)
|
||||
|
||||
# Collect focal length.
|
||||
attr = camera + ".focalLength"
|
||||
focal_length = None
|
||||
if get_attribute_input(attr):
|
||||
start = instance.data["frameStart"]
|
||||
end = instance.data["frameEnd"] + 1
|
||||
focal_length = [
|
||||
cmds.getAttr(attr, time=t) for t in range(int(start), int(end))
|
||||
]
|
||||
else:
|
||||
focal_length = cmds.getAttr(attr)
|
||||
|
||||
key = "focalLength"
|
||||
try:
|
||||
instance.data["burninDataMembers"][key] = focal_length
|
||||
except KeyError:
|
||||
instance.data["burninDataMembers"] = {key: focal_length}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,10 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Maya look extractor."""
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import tempfile
|
||||
import platform
|
||||
import contextlib
|
||||
import subprocess
|
||||
from collections import OrderedDict
|
||||
|
||||
from maya import cmds # noqa
|
||||
|
|
@ -23,34 +21,13 @@ COPY = 1
|
|||
HARDLINK = 2
|
||||
|
||||
|
||||
def escape_space(path):
|
||||
"""Ensure path is enclosed by quotes to allow paths with spaces"""
|
||||
return '"{}"'.format(path) if " " in path else path
|
||||
|
||||
|
||||
def get_ocio_config_path(profile_folder):
|
||||
"""Path to OpenPype vendorized OCIO.
|
||||
|
||||
Vendorized OCIO config file path is grabbed from the specific path
|
||||
hierarchy specified below.
|
||||
|
||||
"{OPENPYPE_ROOT}/vendor/OpenColorIO-Configs/{profile_folder}/config.ocio"
|
||||
Args:
|
||||
profile_folder (str): Name of folder to grab config file from.
|
||||
|
||||
Returns:
|
||||
str: Path to vendorized config file.
|
||||
"""
|
||||
|
||||
return os.path.join(
|
||||
os.environ["OPENPYPE_ROOT"],
|
||||
"vendor",
|
||||
"bin",
|
||||
"ocioconfig",
|
||||
"OpenColorIOConfigs",
|
||||
profile_folder,
|
||||
"config.ocio"
|
||||
)
|
||||
def _has_arnold():
|
||||
"""Return whether the arnold package is available and can be imported."""
|
||||
try:
|
||||
import arnold # noqa: F401
|
||||
return True
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
return False
|
||||
|
||||
|
||||
def find_paths_by_hash(texture_hash):
|
||||
|
|
@ -548,7 +525,7 @@ class ExtractLook(publish.Extractor):
|
|||
color_space = cmds.getAttr(color_space_attr)
|
||||
except ValueError:
|
||||
# node doesn't have color space attribute
|
||||
if cmds.loadPlugin("mtoa", quiet=True):
|
||||
if _has_arnold():
|
||||
img_info = image_info(filepath)
|
||||
color_space = guess_colorspace(img_info)
|
||||
else:
|
||||
|
|
@ -560,7 +537,7 @@ class ExtractLook(publish.Extractor):
|
|||
render_colorspace])
|
||||
else:
|
||||
|
||||
if cmds.loadPlugin("mtoa", quiet=True):
|
||||
if _has_arnold():
|
||||
img_info = image_info(filepath)
|
||||
color_space = guess_colorspace(img_info)
|
||||
if color_space == "sRGB":
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import json
|
||||
import contextlib
|
||||
|
||||
import clique
|
||||
import capture
|
||||
|
|
@ -11,6 +12,16 @@ from maya import cmds
|
|||
import pymel.core as pm
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def panel_camera(panel, camera):
|
||||
original_camera = cmds.modelPanel(panel, query=True, camera=True)
|
||||
try:
|
||||
cmds.modelPanel(panel, edit=True, camera=camera)
|
||||
yield
|
||||
finally:
|
||||
cmds.modelPanel(panel, edit=True, camera=original_camera)
|
||||
|
||||
|
||||
class ExtractPlayblast(publish.Extractor):
|
||||
"""Extract viewport playblast.
|
||||
|
||||
|
|
@ -25,6 +36,16 @@ class ExtractPlayblast(publish.Extractor):
|
|||
optional = True
|
||||
capture_preset = {}
|
||||
|
||||
def _capture(self, preset):
|
||||
self.log.info(
|
||||
"Using preset:\n{}".format(
|
||||
json.dumps(preset, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
|
||||
path = capture.capture(log=self.log, **preset)
|
||||
self.log.debug("playblast path {}".format(path))
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
|
||||
|
|
@ -43,7 +64,7 @@ class ExtractPlayblast(publish.Extractor):
|
|||
self.log.info("start: {}, end: {}".format(start, end))
|
||||
|
||||
# get cameras
|
||||
camera = instance.data['review_camera']
|
||||
camera = instance.data["review_camera"]
|
||||
|
||||
preset = lib.load_capture_preset(data=self.capture_preset)
|
||||
# Grab capture presets from the project settings
|
||||
|
|
@ -57,23 +78,23 @@ class ExtractPlayblast(publish.Extractor):
|
|||
asset_height = asset_data.get("resolutionHeight")
|
||||
review_instance_width = instance.data.get("review_width")
|
||||
review_instance_height = instance.data.get("review_height")
|
||||
preset['camera'] = camera
|
||||
preset["camera"] = camera
|
||||
|
||||
# Tests if project resolution is set,
|
||||
# if it is a value other than zero, that value is
|
||||
# used, if not then the asset resolution is
|
||||
# used
|
||||
if review_instance_width and review_instance_height:
|
||||
preset['width'] = review_instance_width
|
||||
preset['height'] = review_instance_height
|
||||
preset["width"] = review_instance_width
|
||||
preset["height"] = review_instance_height
|
||||
elif width_preset and height_preset:
|
||||
preset['width'] = width_preset
|
||||
preset['height'] = height_preset
|
||||
preset["width"] = width_preset
|
||||
preset["height"] = height_preset
|
||||
elif asset_width and asset_height:
|
||||
preset['width'] = asset_width
|
||||
preset['height'] = asset_height
|
||||
preset['start_frame'] = start
|
||||
preset['end_frame'] = end
|
||||
preset["width"] = asset_width
|
||||
preset["height"] = asset_height
|
||||
preset["start_frame"] = start
|
||||
preset["end_frame"] = end
|
||||
|
||||
# Enforce persisting camera depth of field
|
||||
camera_options = preset.setdefault("camera_options", {})
|
||||
|
|
@ -86,8 +107,8 @@ class ExtractPlayblast(publish.Extractor):
|
|||
|
||||
self.log.info("Outputting images to %s" % path)
|
||||
|
||||
preset['filename'] = path
|
||||
preset['overwrite'] = True
|
||||
preset["filename"] = path
|
||||
preset["overwrite"] = True
|
||||
|
||||
pm.refresh(f=True)
|
||||
|
||||
|
|
@ -114,7 +135,8 @@ class ExtractPlayblast(publish.Extractor):
|
|||
|
||||
# Disable Pan/Zoom.
|
||||
pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"]))
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False)
|
||||
preset.pop("pan_zoom", None)
|
||||
preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"]
|
||||
|
||||
# Need to explicitly enable some viewport changes so the viewport is
|
||||
# refreshed ahead of playblasting.
|
||||
|
|
@ -136,30 +158,39 @@ class ExtractPlayblast(publish.Extractor):
|
|||
)
|
||||
|
||||
override_viewport_options = (
|
||||
capture_presets['Viewport Options']['override_viewport_options']
|
||||
capture_presets["Viewport Options"]["override_viewport_options"]
|
||||
)
|
||||
with lib.maintained_time():
|
||||
filename = preset.get("filename", "%TEMP%")
|
||||
|
||||
# Force viewer to False in call to capture because we have our own
|
||||
# viewer opening call to allow a signal to trigger between
|
||||
# playblast and viewer
|
||||
preset['viewer'] = False
|
||||
# Force viewer to False in call to capture because we have our own
|
||||
# viewer opening call to allow a signal to trigger between
|
||||
# playblast and viewer
|
||||
preset["viewer"] = False
|
||||
|
||||
# Update preset with current panel setting
|
||||
# if override_viewport_options is turned off
|
||||
if not override_viewport_options:
|
||||
panel_preset = capture.parse_view(instance.data["panel"])
|
||||
panel_preset.pop("camera")
|
||||
preset.update(panel_preset)
|
||||
# Update preset with current panel setting
|
||||
# if override_viewport_options is turned off
|
||||
if not override_viewport_options:
|
||||
panel_preset = capture.parse_view(instance.data["panel"])
|
||||
panel_preset.pop("camera")
|
||||
preset.update(panel_preset)
|
||||
|
||||
self.log.info(
|
||||
"Using preset:\n{}".format(
|
||||
json.dumps(preset, sort_keys=True, indent=4)
|
||||
# Need to ensure Python 2 compatibility.
|
||||
# TODO: Remove once dropping Python 2.
|
||||
if getattr(contextlib, "nested", None):
|
||||
# Python 3 compatibility.
|
||||
with contextlib.nested(
|
||||
lib.maintained_time(),
|
||||
panel_camera(instance.data["panel"], preset["camera"])
|
||||
):
|
||||
self._capture(preset)
|
||||
else:
|
||||
# Python 2 compatibility.
|
||||
with contextlib.ExitStack() as stack:
|
||||
stack.enter_context(lib.maintained_time())
|
||||
stack.enter_context(
|
||||
panel_camera(instance.data["panel"], preset["camera"])
|
||||
)
|
||||
)
|
||||
|
||||
path = capture.capture(log=self.log, **preset)
|
||||
self._capture(preset)
|
||||
|
||||
# Restoring viewport options.
|
||||
if viewport_defaults:
|
||||
|
|
@ -169,18 +200,17 @@ class ExtractPlayblast(publish.Extractor):
|
|||
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
|
||||
|
||||
self.log.debug("playblast path {}".format(path))
|
||||
|
||||
collected_files = os.listdir(stagingdir)
|
||||
patterns = [clique.PATTERNS["frames"]]
|
||||
collections, remainder = clique.assemble(collected_files,
|
||||
minimum_items=1,
|
||||
patterns=patterns)
|
||||
|
||||
filename = preset.get("filename", "%TEMP%")
|
||||
self.log.debug("filename {}".format(filename))
|
||||
frame_collection = None
|
||||
for collection in collections:
|
||||
filebase = collection.format('{head}').rstrip(".")
|
||||
filebase = collection.format("{head}").rstrip(".")
|
||||
self.log.debug("collection head {}".format(filebase))
|
||||
if filebase in filename:
|
||||
frame_collection = collection
|
||||
|
|
@ -204,15 +234,15 @@ class ExtractPlayblast(publish.Extractor):
|
|||
collected_files = collected_files[0]
|
||||
|
||||
representation = {
|
||||
'name': 'png',
|
||||
'ext': 'png',
|
||||
'files': collected_files,
|
||||
"name": self.capture_preset["Codec"]["compression"],
|
||||
"ext": self.capture_preset["Codec"]["compression"],
|
||||
"files": collected_files,
|
||||
"stagingDir": stagingdir,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
'fps': fps,
|
||||
'preview': True,
|
||||
'tags': tags,
|
||||
'camera_name': camera_node_name
|
||||
"fps": fps,
|
||||
"preview": True,
|
||||
"tags": tags,
|
||||
"camera_name": camera_node_name
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
|
|
|||
|
|
@ -26,28 +26,28 @@ class ExtractThumbnail(publish.Extractor):
|
|||
def process(self, instance):
|
||||
self.log.info("Extracting capture..")
|
||||
|
||||
camera = instance.data['review_camera']
|
||||
camera = instance.data["review_camera"]
|
||||
|
||||
capture_preset = (
|
||||
instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast']['capture_preset']
|
||||
)
|
||||
maya_setting = instance.context.data["project_settings"]["maya"]
|
||||
plugin_setting = maya_setting["publish"]["ExtractPlayblast"]
|
||||
capture_preset = plugin_setting["capture_preset"]
|
||||
override_viewport_options = (
|
||||
capture_preset['Viewport Options']['override_viewport_options']
|
||||
capture_preset["Viewport Options"]["override_viewport_options"]
|
||||
)
|
||||
|
||||
try:
|
||||
preset = lib.load_capture_preset(data=capture_preset)
|
||||
except KeyError as ke:
|
||||
self.log.error('Error loading capture presets: {}'.format(str(ke)))
|
||||
self.log.error("Error loading capture presets: {}".format(str(ke)))
|
||||
preset = {}
|
||||
self.log.info('Using viewport preset: {}'.format(preset))
|
||||
self.log.info("Using viewport preset: {}".format(preset))
|
||||
|
||||
# preset["off_screen"] = False
|
||||
|
||||
preset['camera'] = camera
|
||||
preset['start_frame'] = instance.data["frameStart"]
|
||||
preset['end_frame'] = instance.data["frameStart"]
|
||||
preset['camera_options'] = {
|
||||
preset["camera"] = camera
|
||||
preset["start_frame"] = instance.data["frameStart"]
|
||||
preset["end_frame"] = instance.data["frameStart"]
|
||||
preset["camera_options"] = {
|
||||
"displayGateMask": False,
|
||||
"displayResolution": False,
|
||||
"displayFilmGate": False,
|
||||
|
|
@ -74,14 +74,14 @@ class ExtractThumbnail(publish.Extractor):
|
|||
# used, if not then the asset resolution is
|
||||
# used
|
||||
if review_instance_width and review_instance_height:
|
||||
preset['width'] = review_instance_width
|
||||
preset['height'] = review_instance_height
|
||||
preset["width"] = review_instance_width
|
||||
preset["height"] = review_instance_height
|
||||
elif width_preset and height_preset:
|
||||
preset['width'] = width_preset
|
||||
preset['height'] = height_preset
|
||||
preset["width"] = width_preset
|
||||
preset["height"] = height_preset
|
||||
elif asset_width and asset_height:
|
||||
preset['width'] = asset_width
|
||||
preset['height'] = asset_height
|
||||
preset["width"] = asset_width
|
||||
preset["height"] = asset_height
|
||||
|
||||
# Create temp directory for thumbnail
|
||||
# - this is to avoid "override" of source file
|
||||
|
|
@ -96,8 +96,8 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
self.log.info("Outputting images to %s" % path)
|
||||
|
||||
preset['filename'] = path
|
||||
preset['overwrite'] = True
|
||||
preset["filename"] = path
|
||||
preset["overwrite"] = True
|
||||
|
||||
pm.refresh(f=True)
|
||||
|
||||
|
|
@ -123,14 +123,14 @@ class ExtractThumbnail(publish.Extractor):
|
|||
preset["viewport_options"] = {"imagePlane": image_plane}
|
||||
|
||||
# Disable Pan/Zoom.
|
||||
pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"]))
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False)
|
||||
preset.pop("pan_zoom", None)
|
||||
preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"]
|
||||
|
||||
with lib.maintained_time():
|
||||
# Force viewer to False in call to capture because we have our own
|
||||
# viewer opening call to allow a signal to trigger between
|
||||
# playblast and viewer
|
||||
preset['viewer'] = False
|
||||
preset["viewer"] = False
|
||||
|
||||
# Update preset with current panel setting
|
||||
# if override_viewport_options is turned off
|
||||
|
|
@ -145,17 +145,15 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
_, thumbnail = os.path.split(playblast)
|
||||
|
||||
cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom)
|
||||
|
||||
self.log.info("file list {}".format(thumbnail))
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
'name': 'thumbnail',
|
||||
'ext': 'jpg',
|
||||
'files': thumbnail,
|
||||
"name": "thumbnail",
|
||||
"ext": "jpg",
|
||||
"files": thumbnail,
|
||||
"stagingDir": dst_staging,
|
||||
"thumbnail": True
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,22 @@ from openpype.pipeline.publish import (
|
|||
from openpype.hosts.maya.api import lib
|
||||
|
||||
|
||||
def convert_to_int_or_float(string_value):
|
||||
# Order of types are important here since float can convert string
|
||||
# representation of integer.
|
||||
types = [int, float]
|
||||
for t in types:
|
||||
try:
|
||||
result = t(string_value)
|
||||
except ValueError:
|
||||
continue
|
||||
else:
|
||||
return result
|
||||
|
||||
# Neither integer or float.
|
||||
return string_value
|
||||
|
||||
|
||||
def get_redshift_image_format_labels():
|
||||
"""Return nice labels for Redshift image formats."""
|
||||
var = "$g_redshiftImageFormatLabels"
|
||||
|
|
@ -242,10 +258,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING))
|
||||
|
||||
# load validation definitions from settings
|
||||
validation_settings = (
|
||||
instance.context.data["project_settings"]["maya"]["publish"]["ValidateRenderSettings"].get( # noqa: E501
|
||||
"{}_render_attributes".format(renderer)) or []
|
||||
)
|
||||
settings_lights_flag = instance.context.data["project_settings"].get(
|
||||
"maya", {}).get(
|
||||
"RenderSettings", {}).get(
|
||||
|
|
@ -253,17 +265,67 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
instance_lights_flag = instance.data.get("renderSetupIncludeLights")
|
||||
if settings_lights_flag != instance_lights_flag:
|
||||
cls.log.warning('Instance flag for "Render Setup Include Lights" is set to {0} and Settings flag is set to {1}'.format(instance_lights_flag, settings_lights_flag)) # noqa
|
||||
cls.log.warning(
|
||||
"Instance flag for \"Render Setup Include Lights\" is set to "
|
||||
"{} and Settings flag is set to {}".format(
|
||||
instance_lights_flag, settings_lights_flag
|
||||
)
|
||||
)
|
||||
|
||||
# go through definitions and test if such node.attribute exists.
|
||||
# if so, compare its value from the one required.
|
||||
for attr, value in OrderedDict(validation_settings).items():
|
||||
cls.log.debug("{}: {}".format(attr, value))
|
||||
if "." not in attr:
|
||||
cls.log.warning("Skipping invalid attribute defined in "
|
||||
"validation settings: '{}'".format(attr))
|
||||
for attribute, data in cls.get_nodes(instance, renderer).items():
|
||||
# Validate the settings has values.
|
||||
if not data["values"]:
|
||||
cls.log.error(
|
||||
"Settings for {}.{} is missing values.".format(
|
||||
node, attribute
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
for node in data["nodes"]:
|
||||
try:
|
||||
render_value = cmds.getAttr(
|
||||
"{}.{}".format(node, attribute)
|
||||
)
|
||||
except RuntimeError:
|
||||
invalid = True
|
||||
cls.log.error(
|
||||
"Cannot get value of {}.{}".format(node, attribute)
|
||||
)
|
||||
else:
|
||||
if render_value not in data["values"]:
|
||||
invalid = True
|
||||
cls.log.error(
|
||||
"Invalid value {} set on {}.{}. Expecting "
|
||||
"{}".format(
|
||||
render_value, node, attribute, data["values"]
|
||||
)
|
||||
)
|
||||
|
||||
return invalid
|
||||
|
||||
@classmethod
|
||||
def get_nodes(cls, instance, renderer):
|
||||
maya_settings = instance.context.data["project_settings"]["maya"]
|
||||
validation_settings = (
|
||||
maya_settings["publish"]["ValidateRenderSettings"].get(
|
||||
"{}_render_attributes".format(renderer)
|
||||
) or []
|
||||
)
|
||||
result = {}
|
||||
for attr, values in OrderedDict(validation_settings).items():
|
||||
cls.log.debug("{}: {}".format(attr, values))
|
||||
if "." not in attr:
|
||||
cls.log.warning(
|
||||
"Skipping invalid attribute defined in validation "
|
||||
"settings: \"{}\"".format(attr)
|
||||
)
|
||||
continue
|
||||
|
||||
values = [convert_to_int_or_float(v) for v in values]
|
||||
|
||||
node_type, attribute_name = attr.split(".", 1)
|
||||
|
||||
# first get node of that type
|
||||
|
|
@ -271,28 +333,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
if not nodes:
|
||||
cls.log.warning(
|
||||
"No nodes of type '{}' found.".format(node_type))
|
||||
"No nodes of type \"{}\" found.".format(node_type)
|
||||
)
|
||||
continue
|
||||
|
||||
for node in nodes:
|
||||
try:
|
||||
render_value = cmds.getAttr(
|
||||
"{}.{}".format(node, attribute_name))
|
||||
except RuntimeError:
|
||||
invalid = True
|
||||
cls.log.error(
|
||||
"Cannot get value of {}.{}".format(
|
||||
node, attribute_name))
|
||||
else:
|
||||
if str(value) != str(render_value):
|
||||
invalid = True
|
||||
cls.log.error(
|
||||
("Invalid value {} set on {}.{}. "
|
||||
"Expecting {}").format(
|
||||
render_value, node, attribute_name, value)
|
||||
)
|
||||
result[attribute_name] = {"nodes": nodes, "values": values}
|
||||
|
||||
return invalid
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
|
@ -305,6 +352,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
"{aov_separator}", instance.data.get("aovSeparator", "_")
|
||||
)
|
||||
|
||||
for attribute, data in cls.get_nodes(instance, renderer).items():
|
||||
if not data["values"]:
|
||||
continue
|
||||
for node in data["nodes"]:
|
||||
lib.set_attribute(attribute, data["values"][0], node)
|
||||
|
||||
with lib.renderlayer(layer_node):
|
||||
default = lib.RENDER_ATTRS['default']
|
||||
render_attrs = lib.RENDER_ATTRS.get(renderer, default)
|
||||
|
|
|
|||
|
|
@ -48,6 +48,18 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin):
|
|||
|
||||
yeti_loaded = cmds.pluginInfo("pgYetiMaya", query=True, loaded=True)
|
||||
|
||||
if not yeti_loaded and not cmds.ls(type="pgYetiMaya"):
|
||||
# The yeti plug-in is available and loaded so at
|
||||
# this point we don't really care whether the scene
|
||||
# has any yeti callback set or not since if the callback
|
||||
# is there it wouldn't error and if it weren't then
|
||||
# nothing happens because there are no yeti nodes.
|
||||
cls.log.info(
|
||||
"Yeti is loaded but no yeti nodes were found. "
|
||||
"Callback validation skipped.."
|
||||
)
|
||||
return False
|
||||
|
||||
renderer = instance.data["renderer"]
|
||||
if renderer == "redshift":
|
||||
cls.log.info("Redshift ignores any pre and post render callbacks")
|
||||
|
|
|
|||
|
|
@ -59,21 +59,7 @@ def get_all_asset_nodes():
|
|||
Returns:
|
||||
list: list of dictionaries
|
||||
"""
|
||||
|
||||
host = registered_host()
|
||||
|
||||
nodes = []
|
||||
for container in host.ls():
|
||||
# We are not interested in looks but assets!
|
||||
if container["loader"] == "LookLoader":
|
||||
continue
|
||||
|
||||
# Gather all information
|
||||
container_name = container["objectName"]
|
||||
nodes += lib.get_container_members(container_name)
|
||||
|
||||
nodes = list(set(nodes))
|
||||
return nodes
|
||||
return cmds.ls(dag=True, noIntermediate=True, long=True)
|
||||
|
||||
|
||||
def create_asset_id_hash(nodes):
|
||||
|
|
|
|||
|
|
@ -54,22 +54,19 @@ class LoadBackdropNodes(load.LoaderPlugin):
|
|||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
namespace = namespace or context['asset']['name']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# prepare data for imprinting
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
add_keys = ["source", "author", "fps"]
|
||||
|
||||
data_imprint = {"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
data_imprint = {
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name
|
||||
}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
|
@ -204,18 +201,13 @@ class LoadBackdropNodes(load.LoaderPlugin):
|
|||
name = container['name']
|
||||
version_data = version_doc.get("data", {})
|
||||
vname = version_doc.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
namespace = container['namespace']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
add_keys = ["source", "author", "fps"]
|
||||
|
||||
data_imprint = {"representation": str(representation["_id"]),
|
||||
"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
|
|
|||
|
|
@ -51,38 +51,10 @@ class CollectBackdrops(pyblish.api.InstancePlugin):
|
|||
instance.data["label"] = "{0} ({1} nodes)".format(
|
||||
bckn.name(), len(instance.data["transientData"]["childNodes"]))
|
||||
|
||||
instance.data["families"].append(instance.data["family"])
|
||||
|
||||
# Get frame range
|
||||
handle_start = instance.context.data["handleStart"]
|
||||
handle_end = instance.context.data["handleEnd"]
|
||||
first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
last_frame = int(nuke.root()["last_frame"].getValue())
|
||||
|
||||
# get version
|
||||
version = instance.context.data.get('version')
|
||||
|
||||
if not version:
|
||||
raise RuntimeError("Script name has no version in the name.")
|
||||
if version:
|
||||
instance.data['version'] = version
|
||||
|
||||
instance.data['version'] = version
|
||||
|
||||
# Add version data to instance
|
||||
version_data = {
|
||||
"handles": handle_start,
|
||||
"handleStart": handle_start,
|
||||
"handleEnd": handle_end,
|
||||
"frameStart": first_frame + handle_start,
|
||||
"frameEnd": last_frame - handle_end,
|
||||
"version": int(version),
|
||||
"families": [instance.data["family"]] + instance.data["families"],
|
||||
"subset": instance.data["subset"],
|
||||
"fps": instance.context.data["fps"]
|
||||
}
|
||||
|
||||
instance.data.update({
|
||||
"versionData": version_data,
|
||||
"frameStart": first_frame,
|
||||
"frameEnd": last_frame
|
||||
})
|
||||
self.log.info("Backdrop instance collected: `{}`".format(instance))
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class ExtractReviewData(publish.Extractor):
|
|||
# review can be removed since `ProcessSubmittedJobOnFarm` will create
|
||||
# reviewable representation if needed
|
||||
if (
|
||||
"render.farm" in instance.data["families"]
|
||||
instance.data.get("farm")
|
||||
and "review" in instance.data["families"]
|
||||
):
|
||||
instance.data["families"].remove("review")
|
||||
|
|
|
|||
|
|
@ -49,7 +49,12 @@ class ExtractReviewDataLut(publish.Extractor):
|
|||
exporter.stagingDir, exporter.file).replace("\\", "/")
|
||||
instance.data["representations"] += data["representations"]
|
||||
|
||||
if "render.farm" in families:
|
||||
# review can be removed since `ProcessSubmittedJobOnFarm` will create
|
||||
# reviewable representation if needed
|
||||
if (
|
||||
instance.data.get("farm")
|
||||
and "review" in instance.data["families"]
|
||||
):
|
||||
instance.data["families"].remove("review")
|
||||
|
||||
self.log.debug(
|
||||
|
|
|
|||
|
|
@ -105,10 +105,7 @@ class ExtractReviewDataMov(publish.Extractor):
|
|||
self, instance, o_name, o_data["extension"],
|
||||
multiple_presets)
|
||||
|
||||
if (
|
||||
"render.farm" in families or
|
||||
"prerender.farm" in families
|
||||
):
|
||||
if instance.data.get("farm"):
|
||||
if "review" in instance.data["families"]:
|
||||
instance.data["families"].remove("review")
|
||||
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class ExtractThumbnail(publish.Extractor):
|
|||
|
||||
|
||||
def process(self, instance):
|
||||
if "render.farm" in instance.data["families"]:
|
||||
if instance.data.get("farm"):
|
||||
return
|
||||
|
||||
with napi.maintained_selection():
|
||||
|
|
|
|||
|
|
@ -66,11 +66,11 @@ class MainThreadItem:
|
|||
return self._result
|
||||
|
||||
def execute(self):
|
||||
"""Execute callback and store it's result.
|
||||
"""Execute callback and store its result.
|
||||
|
||||
Method must be called from main thread. Item is marked as `done`
|
||||
when callback execution finished. Store output of callback of exception
|
||||
information when callback raise one.
|
||||
information when callback raises one.
|
||||
"""
|
||||
log.debug("Executing process in main thread")
|
||||
if self.done:
|
||||
|
|
|
|||
|
|
@ -389,11 +389,11 @@ class MainThreadItem:
|
|||
self.kwargs = kwargs
|
||||
|
||||
def execute(self):
|
||||
"""Execute callback and store it's result.
|
||||
"""Execute callback and store its result.
|
||||
|
||||
Method must be called from main thread. Item is marked as `done`
|
||||
when callback execution finished. Store output of callback of exception
|
||||
information when callback raise one.
|
||||
information when callback raises one.
|
||||
"""
|
||||
log.debug("Executing process in main thread")
|
||||
if self.done:
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ class TVPaintLegacyConverted(SubsetConvertorPlugin):
|
|||
self._convert_render_layers(
|
||||
to_convert["renderLayer"], current_instances)
|
||||
self._convert_render_passes(
|
||||
to_convert["renderpass"], current_instances)
|
||||
to_convert["renderPass"], current_instances)
|
||||
self._convert_render_scenes(
|
||||
to_convert["renderScene"], current_instances)
|
||||
self._convert_workfiles(
|
||||
|
|
@ -116,7 +116,7 @@ class TVPaintLegacyConverted(SubsetConvertorPlugin):
|
|||
render_layers_by_group_id = {}
|
||||
for instance in current_instances:
|
||||
if instance.get("creator_identifier") == "render.layer":
|
||||
group_id = instance["creator_identifier"]["group_id"]
|
||||
group_id = instance["creator_attributes"]["group_id"]
|
||||
render_layers_by_group_id[group_id] = instance
|
||||
|
||||
for render_pass in render_passes:
|
||||
|
|
|
|||
|
|
@ -415,11 +415,11 @@ class CreateRenderPass(TVPaintCreator):
|
|||
.get("creator_attributes", {})
|
||||
.get("render_layer_instance_id")
|
||||
)
|
||||
render_layer_info = render_layers.get(render_layer_instance_id)
|
||||
render_layer_info = render_layers.get(render_layer_instance_id, {})
|
||||
self.update_instance_labels(
|
||||
instance_data,
|
||||
render_layer_info["variant"],
|
||||
render_layer_info["template_data"]
|
||||
render_layer_info.get("variant"),
|
||||
render_layer_info.get("template_data")
|
||||
)
|
||||
instance = CreatedInstance.from_existing(instance_data, self)
|
||||
self._add_instance_to_context(instance)
|
||||
|
|
@ -607,11 +607,11 @@ class CreateRenderPass(TVPaintCreator):
|
|||
current_instances = self.host.list_instances()
|
||||
render_layers = [
|
||||
{
|
||||
"value": instance["instance_id"],
|
||||
"label": instance["subset"]
|
||||
"value": inst["instance_id"],
|
||||
"label": inst["subset"]
|
||||
}
|
||||
for instance in current_instances
|
||||
if instance["creator_identifier"] == CreateRenderlayer.identifier
|
||||
for inst in current_instances
|
||||
if inst.get("creator_identifier") == CreateRenderlayer.identifier
|
||||
]
|
||||
if not render_layers:
|
||||
render_layers.append({"value": None, "label": "N/A"})
|
||||
|
|
@ -697,6 +697,7 @@ class TVPaintAutoDetectRenderCreator(TVPaintCreator):
|
|||
["create"]
|
||||
["auto_detect_render"]
|
||||
)
|
||||
self.enabled = plugin_settings.get("enabled", False)
|
||||
self.allow_group_rename = plugin_settings["allow_group_rename"]
|
||||
self.group_name_template = plugin_settings["group_name_template"]
|
||||
self.group_idx_offset = plugin_settings["group_idx_offset"]
|
||||
|
|
|
|||
|
|
@ -22,9 +22,11 @@ class CollectOutputFrameRange(pyblish.api.InstancePlugin):
|
|||
context = instance.context
|
||||
|
||||
frame_start = asset_doc["data"]["frameStart"]
|
||||
fps = asset_doc["data"]["fps"]
|
||||
frame_end = frame_start + (
|
||||
context.data["sceneMarkOut"] - context.data["sceneMarkIn"]
|
||||
)
|
||||
instance.data["fps"] = fps
|
||||
instance.data["frameStart"] = frame_start
|
||||
instance.data["frameEnd"] = frame_end
|
||||
self.log.info(
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin,
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.pipeline import (
|
||||
list_instances,
|
||||
write_instances,
|
||||
|
|
@ -31,7 +34,10 @@ class FixAssetNames(pyblish.api.Action):
|
|||
write_instances(new_instance_items)
|
||||
|
||||
|
||||
class ValidateAssetNames(pyblish.api.ContextPlugin):
|
||||
class ValidateAssetName(
|
||||
OptionalPyblishPluginMixin,
|
||||
pyblish.api.ContextPlugin
|
||||
):
|
||||
"""Validate assset name present on instance.
|
||||
|
||||
Asset name on instance should be the same as context's.
|
||||
|
|
@ -43,6 +49,8 @@ class ValidateAssetNames(pyblish.api.ContextPlugin):
|
|||
actions = [FixAssetNames]
|
||||
|
||||
def process(self, context):
|
||||
if not self.is_active(context.data):
|
||||
return
|
||||
context_asset_name = context.data["asset"]
|
||||
for instance in context:
|
||||
asset_name = instance.data.get("asset")
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin):
|
|||
families = ["review", "render"]
|
||||
|
||||
def process(self, instance):
|
||||
layers = instance.data["layers"]
|
||||
layers = instance.data.get("layers")
|
||||
# Instance have empty layers
|
||||
# - it is not job of this validator to check that
|
||||
if not layers:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
import json
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin,
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.lib import execute_george
|
||||
|
||||
|
||||
|
|
@ -23,7 +26,10 @@ class ValidateMarksRepair(pyblish.api.Action):
|
|||
)
|
||||
|
||||
|
||||
class ValidateMarks(pyblish.api.ContextPlugin):
|
||||
class ValidateMarks(
|
||||
OptionalPyblishPluginMixin,
|
||||
pyblish.api.ContextPlugin
|
||||
):
|
||||
"""Validate mark in and out are enabled and it's duration.
|
||||
|
||||
Mark In/Out does not have to match frameStart and frameEnd but duration is
|
||||
|
|
@ -59,6 +65,9 @@ class ValidateMarks(pyblish.api.ContextPlugin):
|
|||
}
|
||||
|
||||
def process(self, context):
|
||||
if not self.is_active(context.data):
|
||||
return
|
||||
|
||||
current_data = {
|
||||
"markIn": context.data["sceneMarkIn"],
|
||||
"markInState": context.data["sceneMarkInState"],
|
||||
|
|
|
|||
|
|
@ -1,11 +1,17 @@
|
|||
import json
|
||||
|
||||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin,
|
||||
)
|
||||
|
||||
|
||||
# TODO @iLliCiTiT add fix action for fps
|
||||
class ValidateProjectSettings(pyblish.api.ContextPlugin):
|
||||
class ValidateProjectSettings(
|
||||
OptionalPyblishPluginMixin,
|
||||
pyblish.api.ContextPlugin
|
||||
):
|
||||
"""Validate scene settings against database."""
|
||||
|
||||
label = "Validate Scene Settings"
|
||||
|
|
@ -13,6 +19,9 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin):
|
|||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
if not self.is_active(context.data):
|
||||
return
|
||||
|
||||
expected_data = context.data["assetEntity"]["data"]
|
||||
scene_data = {
|
||||
"fps": context.data.get("sceneFps"),
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
import pyblish.api
|
||||
from openpype.pipeline import PublishXmlValidationError
|
||||
from openpype.pipeline import (
|
||||
PublishXmlValidationError,
|
||||
OptionalPyblishPluginMixin,
|
||||
)
|
||||
from openpype.hosts.tvpaint.api.lib import execute_george
|
||||
|
||||
|
||||
|
|
@ -14,7 +17,10 @@ class RepairStartFrame(pyblish.api.Action):
|
|||
execute_george("tv_startframe 0")
|
||||
|
||||
|
||||
class ValidateStartFrame(pyblish.api.ContextPlugin):
|
||||
class ValidateStartFrame(
|
||||
OptionalPyblishPluginMixin,
|
||||
pyblish.api.ContextPlugin
|
||||
):
|
||||
"""Validate start frame being at frame 0."""
|
||||
|
||||
label = "Validate Start Frame"
|
||||
|
|
@ -24,6 +30,9 @@ class ValidateStartFrame(pyblish.api.ContextPlugin):
|
|||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
if not self.is_active(context.data):
|
||||
return
|
||||
|
||||
start_frame = execute_george("tv_startframe")
|
||||
if start_frame == 0:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -5,29 +5,38 @@ import re
|
|||
import subprocess
|
||||
from distutils import dir_util
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from typing import List, Union
|
||||
|
||||
import openpype.hosts.unreal.lib as ue_lib
|
||||
|
||||
from qtpy import QtCore
|
||||
|
||||
|
||||
def parse_comp_progress(line: str, progress_signal: QtCore.Signal(int)) -> int:
|
||||
match = re.search('\[[1-9]+/[0-9]+\]', line)
|
||||
def parse_comp_progress(line: str, progress_signal: QtCore.Signal(int)):
|
||||
match = re.search(r"\[[1-9]+/[0-9]+]", line)
|
||||
if match is not None:
|
||||
split: list[str] = match.group().split('/')
|
||||
split: list[str] = match.group().split("/")
|
||||
curr: float = float(split[0][1:])
|
||||
total: float = float(split[1][:-1])
|
||||
progress_signal.emit(int((curr / total) * 100.0))
|
||||
|
||||
|
||||
def parse_prj_progress(line: str, progress_signal: QtCore.Signal(int)) -> int:
|
||||
match = re.search('@progress', line)
|
||||
def parse_prj_progress(line: str, progress_signal: QtCore.Signal(int)):
|
||||
match = re.search("@progress", line)
|
||||
if match is not None:
|
||||
percent_match = re.search('\d{1,3}', line)
|
||||
percent_match = re.search(r"\d{1,3}", line)
|
||||
progress_signal.emit(int(percent_match.group()))
|
||||
|
||||
|
||||
def retrieve_exit_code(line: str):
|
||||
match = re.search(r"ExitCode=\d+", line)
|
||||
if match is not None:
|
||||
split: list[str] = match.group().split("=")
|
||||
return int(split[1])
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class UEProjectGenerationWorker(QtCore.QObject):
|
||||
finished = QtCore.Signal(str)
|
||||
failed = QtCore.Signal(str)
|
||||
|
|
@ -77,16 +86,19 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
if self.dev_mode:
|
||||
stage_count = 4
|
||||
|
||||
self.stage_begin.emit(f'Generating a new UE project ... 1 out of '
|
||||
f'{stage_count}')
|
||||
self.stage_begin.emit(
|
||||
("Generating a new UE project ... 1 out of "
|
||||
f"{stage_count}"))
|
||||
|
||||
commandlet_cmd = [f'{ue_editor_exe.as_posix()}',
|
||||
f'{cmdlet_project.as_posix()}',
|
||||
f'-run=OPGenerateProject',
|
||||
f'{project_file.resolve().as_posix()}']
|
||||
commandlet_cmd = [
|
||||
f"{ue_editor_exe.as_posix()}",
|
||||
f"{cmdlet_project.as_posix()}",
|
||||
"-run=OPGenerateProject",
|
||||
f"{project_file.resolve().as_posix()}",
|
||||
]
|
||||
|
||||
if self.dev_mode:
|
||||
commandlet_cmd.append('-GenerateCode')
|
||||
commandlet_cmd.append("-GenerateCode")
|
||||
|
||||
gen_process = subprocess.Popen(commandlet_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
|
|
@ -94,24 +106,27 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
|
||||
for line in gen_process.stdout:
|
||||
decoded_line = line.decode(errors="replace")
|
||||
print(decoded_line, end='')
|
||||
print(decoded_line, end="")
|
||||
self.log.emit(decoded_line)
|
||||
gen_process.stdout.close()
|
||||
return_code = gen_process.wait()
|
||||
|
||||
if return_code and return_code != 0:
|
||||
msg = 'Failed to generate ' + self.project_name \
|
||||
+ f' project! Exited with return code {return_code}'
|
||||
msg = (
|
||||
f"Failed to generate {self.project_name} "
|
||||
f"project! Exited with return code {return_code}"
|
||||
)
|
||||
self.failed.emit(msg, return_code)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
print("--- Project has been generated successfully.")
|
||||
self.stage_begin.emit(f'Writing the Engine ID of the build UE ... 1'
|
||||
f' out of {stage_count}')
|
||||
self.stage_begin.emit(
|
||||
(f"Writing the Engine ID of the build UE ... 1"
|
||||
f" out of {stage_count}"))
|
||||
|
||||
if not project_file.is_file():
|
||||
msg = "Failed to write the Engine ID into .uproject file! Can " \
|
||||
"not read!"
|
||||
msg = ("Failed to write the Engine ID into .uproject file! Can "
|
||||
"not read!")
|
||||
self.failed.emit(msg)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
|
@ -125,13 +140,14 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
pf.seek(0)
|
||||
json.dump(pf_json, pf, indent=4)
|
||||
pf.truncate()
|
||||
print(f'--- Engine ID has been written into the project file')
|
||||
print("--- Engine ID has been written into the project file")
|
||||
|
||||
self.progress.emit(90)
|
||||
if self.dev_mode:
|
||||
# 2nd stage
|
||||
self.stage_begin.emit(f'Generating project files ... 2 out of '
|
||||
f'{stage_count}')
|
||||
self.stage_begin.emit(
|
||||
(f"Generating project files ... 2 out of "
|
||||
f"{stage_count}"))
|
||||
|
||||
self.progress.emit(0)
|
||||
ubt_path = ue_lib.get_path_to_ubt(self.engine_path,
|
||||
|
|
@ -154,8 +170,8 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
for line in gen_proc.stdout:
|
||||
decoded_line: str = line.decode(errors='replace')
|
||||
print(decoded_line, end='')
|
||||
decoded_line: str = line.decode(errors="replace")
|
||||
print(decoded_line, end="")
|
||||
self.log.emit(decoded_line)
|
||||
parse_prj_progress(decoded_line, self.progress)
|
||||
|
||||
|
|
@ -163,13 +179,13 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
return_code = gen_proc.wait()
|
||||
|
||||
if return_code and return_code != 0:
|
||||
msg = 'Failed to generate project files! ' \
|
||||
f'Exited with return code {return_code}'
|
||||
msg = ("Failed to generate project files! "
|
||||
f"Exited with return code {return_code}")
|
||||
self.failed.emit(msg, return_code)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
self.stage_begin.emit(f'Building the project ... 3 out of '
|
||||
f'{stage_count}')
|
||||
self.stage_begin.emit(
|
||||
f"Building the project ... 3 out of {stage_count}")
|
||||
self.progress.emit(0)
|
||||
# 3rd stage
|
||||
build_prj_cmd = [ubt_path.as_posix(),
|
||||
|
|
@ -177,16 +193,16 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
arch,
|
||||
"Development",
|
||||
"-TargetType=Editor",
|
||||
f'-Project={project_file}',
|
||||
f'{project_file}',
|
||||
f"-Project={project_file}",
|
||||
f"{project_file}",
|
||||
"-IgnoreJunk"]
|
||||
|
||||
build_prj_proc = subprocess.Popen(build_prj_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
for line in build_prj_proc.stdout:
|
||||
decoded_line: str = line.decode(errors='replace')
|
||||
print(decoded_line, end='')
|
||||
decoded_line: str = line.decode(errors="replace")
|
||||
print(decoded_line, end="")
|
||||
self.log.emit(decoded_line)
|
||||
parse_comp_progress(decoded_line, self.progress)
|
||||
|
||||
|
|
@ -194,16 +210,17 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
return_code = build_prj_proc.wait()
|
||||
|
||||
if return_code and return_code != 0:
|
||||
msg = 'Failed to build project! ' \
|
||||
f'Exited with return code {return_code}'
|
||||
msg = ("Failed to build project! "
|
||||
f"Exited with return code {return_code}")
|
||||
self.failed.emit(msg, return_code)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# ensure we have PySide2 installed in engine
|
||||
|
||||
self.progress.emit(0)
|
||||
self.stage_begin.emit(f'Checking PySide2 installation... {stage_count}'
|
||||
f' out of {stage_count}')
|
||||
self.stage_begin.emit(
|
||||
(f"Checking PySide2 installation... {stage_count} "
|
||||
f" out of {stage_count}"))
|
||||
python_path = None
|
||||
if platform.system().lower() == "windows":
|
||||
python_path = self.engine_path / ("Engine/Binaries/ThirdParty/"
|
||||
|
|
@ -225,9 +242,30 @@ class UEProjectGenerationWorker(QtCore.QObject):
|
|||
msg = f"Unreal Python not found at {python_path}"
|
||||
self.failed.emit(msg, 1)
|
||||
raise RuntimeError(msg)
|
||||
subprocess.check_call(
|
||||
[python_path.as_posix(), "-m", "pip", "install", "pyside2"]
|
||||
)
|
||||
pyside_cmd = [python_path.as_posix(),
|
||||
"-m",
|
||||
"pip",
|
||||
"install",
|
||||
"pyside2"]
|
||||
|
||||
pyside_install = subprocess.Popen(pyside_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
|
||||
for line in pyside_install.stdout:
|
||||
decoded_line: str = line.decode(errors="replace")
|
||||
print(decoded_line, end="")
|
||||
self.log.emit(decoded_line)
|
||||
|
||||
pyside_install.stdout.close()
|
||||
return_code = pyside_install.wait()
|
||||
|
||||
if return_code and return_code != 0:
|
||||
msg = ("Failed to create the project! "
|
||||
"The installation of PySide2 has failed!")
|
||||
self.failed.emit(msg, return_code)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
self.progress.emit(100)
|
||||
self.finished.emit("Project successfully built!")
|
||||
|
||||
|
|
@ -266,26 +304,30 @@ class UEPluginInstallWorker(QtCore.QObject):
|
|||
|
||||
# in order to successfully build the plugin,
|
||||
# It must be built outside the Engine directory and then moved
|
||||
build_plugin_cmd: List[str] = [f'{uat_path.as_posix()}',
|
||||
'BuildPlugin',
|
||||
f'-Plugin={uplugin_path.as_posix()}',
|
||||
f'-Package={temp_dir.as_posix()}']
|
||||
build_plugin_cmd: List[str] = [f"{uat_path.as_posix()}",
|
||||
"BuildPlugin",
|
||||
f"-Plugin={uplugin_path.as_posix()}",
|
||||
f"-Package={temp_dir.as_posix()}"]
|
||||
|
||||
build_proc = subprocess.Popen(build_plugin_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
return_code: Union[None, int] = None
|
||||
for line in build_proc.stdout:
|
||||
decoded_line: str = line.decode(errors='replace')
|
||||
print(decoded_line, end='')
|
||||
decoded_line: str = line.decode(errors="replace")
|
||||
print(decoded_line, end="")
|
||||
self.log.emit(decoded_line)
|
||||
if return_code is None:
|
||||
return_code = retrieve_exit_code(decoded_line)
|
||||
parse_comp_progress(decoded_line, self.progress)
|
||||
|
||||
build_proc.stdout.close()
|
||||
return_code = build_proc.wait()
|
||||
build_proc.wait()
|
||||
|
||||
if return_code and return_code != 0:
|
||||
msg = 'Failed to build plugin' \
|
||||
f' project! Exited with return code {return_code}'
|
||||
msg = ("Failed to build plugin"
|
||||
f" project! Exited with return code {return_code}")
|
||||
dir_util.remove_tree(temp_dir.as_posix())
|
||||
self.failed.emit(msg, return_code)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
|
|
|
|||
|
|
@ -889,7 +889,8 @@ class ApplicationLaunchContext:
|
|||
self.modules_manager = ModulesManager()
|
||||
|
||||
# Logger
|
||||
logger_name = "{}-{}".format(self.__class__.__name__, self.app_name)
|
||||
logger_name = "{}-{}".format(self.__class__.__name__,
|
||||
self.application.full_name)
|
||||
self.log = Logger.get_logger(logger_name)
|
||||
|
||||
self.executable = executable
|
||||
|
|
@ -1246,7 +1247,7 @@ class ApplicationLaunchContext:
|
|||
args_len_str = " ({})".format(len(args))
|
||||
self.log.info(
|
||||
"Launching \"{}\" with args{}: {}".format(
|
||||
self.app_name, args_len_str, args
|
||||
self.application.full_name, args_len_str, args
|
||||
)
|
||||
)
|
||||
self.launch_args = args
|
||||
|
|
@ -1271,7 +1272,9 @@ class ApplicationLaunchContext:
|
|||
exc_info=True
|
||||
)
|
||||
|
||||
self.log.debug("Launch of {} finished.".format(self.app_name))
|
||||
self.log.debug("Launch of {} finished.".format(
|
||||
self.application.full_name
|
||||
))
|
||||
|
||||
return self.process
|
||||
|
||||
|
|
@ -1508,8 +1511,8 @@ def prepare_app_environments(
|
|||
if key in source_env:
|
||||
source_env[key] = value
|
||||
|
||||
# `added_env_keys` has debug purpose
|
||||
added_env_keys = {app.group.name, app.name}
|
||||
# `app_and_tool_labels` has debug purpose
|
||||
app_and_tool_labels = [app.full_name]
|
||||
# Environments for application
|
||||
environments = [
|
||||
app.group.environment,
|
||||
|
|
@ -1532,15 +1535,14 @@ def prepare_app_environments(
|
|||
for group_name in sorted(groups_by_name.keys()):
|
||||
group = groups_by_name[group_name]
|
||||
environments.append(group.environment)
|
||||
added_env_keys.add(group_name)
|
||||
for tool_name in sorted(tool_by_group_name[group_name].keys()):
|
||||
tool = tool_by_group_name[group_name][tool_name]
|
||||
environments.append(tool.environment)
|
||||
added_env_keys.add(tool.name)
|
||||
app_and_tool_labels.append(tool.full_name)
|
||||
|
||||
log.debug(
|
||||
"Will add environments for apps and tools: {}".format(
|
||||
", ".join(added_env_keys)
|
||||
", ".join(app_and_tool_labels)
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ import tempfile
|
|||
from .log import Logger
|
||||
from .vendor_bin_utils import find_executable
|
||||
|
||||
from .openpype_version import is_running_from_build
|
||||
|
||||
# MSDN process creation flag (Windows only)
|
||||
CREATE_NO_WINDOW = 0x08000000
|
||||
|
||||
|
|
@ -102,6 +104,10 @@ def run_subprocess(*args, **kwargs):
|
|||
if (
|
||||
platform.system().lower() == "windows"
|
||||
and "creationflags" not in kwargs
|
||||
# shell=True already tries to hide the console window
|
||||
# and passing these creationflags then shows the window again
|
||||
# so we avoid it for shell=True cases
|
||||
and kwargs.get("shell") is not True
|
||||
):
|
||||
kwargs["creationflags"] = (
|
||||
subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
|
|
@ -196,6 +202,11 @@ def run_openpype_process(*args, **kwargs):
|
|||
# Skip envs that can affect OpenPype process
|
||||
# - fill more if you find more
|
||||
env = clean_envs_for_openpype_process(os.environ)
|
||||
|
||||
# Only keep OpenPype version if we are running from build.
|
||||
if not is_running_from_build():
|
||||
env.pop("OPENPYPE_VERSION", None)
|
||||
|
||||
return run_subprocess(args, env=env, **kwargs)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,16 @@ else:
|
|||
from shutil import copyfile
|
||||
|
||||
|
||||
class DuplicateDestinationError(ValueError):
|
||||
"""Error raised when transfer destination already exists in queue.
|
||||
|
||||
The error is only raised if `allow_queue_replacements` is False on the
|
||||
FileTransaction instance and the added file to transfer is of a different
|
||||
src file than the one already detected in the queue.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class FileTransaction(object):
|
||||
"""File transaction with rollback options.
|
||||
|
||||
|
|
@ -44,7 +54,7 @@ class FileTransaction(object):
|
|||
MODE_COPY = 0
|
||||
MODE_HARDLINK = 1
|
||||
|
||||
def __init__(self, log=None):
|
||||
def __init__(self, log=None, allow_queue_replacements=False):
|
||||
if log is None:
|
||||
log = logging.getLogger("FileTransaction")
|
||||
|
||||
|
|
@ -60,6 +70,8 @@ class FileTransaction(object):
|
|||
# Backup file location mapping to original locations
|
||||
self._backup_to_original = {}
|
||||
|
||||
self._allow_queue_replacements = allow_queue_replacements
|
||||
|
||||
def add(self, src, dst, mode=MODE_COPY):
|
||||
"""Add a new file to transfer queue.
|
||||
|
||||
|
|
@ -82,6 +94,14 @@ class FileTransaction(object):
|
|||
src, dst))
|
||||
return
|
||||
else:
|
||||
if not self._allow_queue_replacements:
|
||||
raise DuplicateDestinationError(
|
||||
"Transfer to destination is already in queue: "
|
||||
"{} -> {}. It's not allowed to be replaced by "
|
||||
"a new transfer from {}".format(
|
||||
queued_src, dst, src
|
||||
))
|
||||
|
||||
self.log.warning("File transfer in queue replaced..")
|
||||
self.log.debug(
|
||||
"Removed from queue: {} -> {} replaced by {} -> {}".format(
|
||||
|
|
|
|||
|
|
@ -224,18 +224,26 @@ def find_tool_in_custom_paths(paths, tool, validation_func=None):
|
|||
|
||||
def _check_args_returncode(args):
|
||||
try:
|
||||
# Python 2 compatibility where DEVNULL is not available
|
||||
kwargs = {}
|
||||
if platform.system().lower() == "windows":
|
||||
kwargs["creationflags"] = (
|
||||
subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
| getattr(subprocess, "DETACHED_PROCESS", 0)
|
||||
| getattr(subprocess, "CREATE_NO_WINDOW", 0)
|
||||
)
|
||||
|
||||
if hasattr(subprocess, "DEVNULL"):
|
||||
proc = subprocess.Popen(
|
||||
args,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
**kwargs
|
||||
)
|
||||
proc.wait()
|
||||
else:
|
||||
with open(os.devnull, "w") as devnull:
|
||||
proc = subprocess.Popen(
|
||||
args, stdout=devnull, stderr=devnull,
|
||||
args, stdout=devnull, stderr=devnull, **kwargs
|
||||
)
|
||||
proc.wait()
|
||||
|
||||
|
|
@ -375,7 +383,7 @@ def get_ffmpeg_tool_path(tool="ffmpeg"):
|
|||
# Look to PATH for the tool
|
||||
if not tool_executable_path:
|
||||
from_path = find_executable(tool)
|
||||
if from_path and _oiio_executable_validation(from_path):
|
||||
if from_path and _ffmpeg_executable_validation(from_path):
|
||||
tool_executable_path = from_path
|
||||
|
||||
CachedToolPaths.cache_executable_path(tool, tool_executable_path)
|
||||
|
|
|
|||
|
|
@ -6,34 +6,22 @@ import datetime
|
|||
import requests
|
||||
from .constants import (
|
||||
CLOCKIFY_ENDPOINT,
|
||||
ADMIN_PERMISSION_NAMES
|
||||
ADMIN_PERMISSION_NAMES,
|
||||
)
|
||||
|
||||
from openpype.lib.local_settings import OpenPypeSecureRegistry
|
||||
|
||||
|
||||
def time_check(obj):
|
||||
if obj.request_counter < 10:
|
||||
obj.request_counter += 1
|
||||
return
|
||||
|
||||
wait_time = 1 - (time.time() - obj.request_time)
|
||||
if wait_time > 0:
|
||||
time.sleep(wait_time)
|
||||
|
||||
obj.request_time = time.time()
|
||||
obj.request_counter = 0
|
||||
from openpype.lib import Logger
|
||||
|
||||
|
||||
class ClockifyAPI:
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
def __init__(self, api_key=None, master_parent=None):
|
||||
self.workspace_name = None
|
||||
self.workspace_id = None
|
||||
self.master_parent = master_parent
|
||||
self.api_key = api_key
|
||||
self.request_counter = 0
|
||||
self.request_time = time.time()
|
||||
|
||||
self._workspace_id = None
|
||||
self._user_id = None
|
||||
self._secure_registry = None
|
||||
|
||||
@property
|
||||
|
|
@ -44,11 +32,19 @@ class ClockifyAPI:
|
|||
|
||||
@property
|
||||
def headers(self):
|
||||
return {"X-Api-Key": self.api_key}
|
||||
return {"x-api-key": self.api_key}
|
||||
|
||||
@property
|
||||
def workspace_id(self):
|
||||
return self._workspace_id
|
||||
|
||||
@property
|
||||
def user_id(self):
|
||||
return self._user_id
|
||||
|
||||
def verify_api(self):
|
||||
for key, value in self.headers.items():
|
||||
if value is None or value.strip() == '':
|
||||
if value is None or value.strip() == "":
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
@ -59,65 +55,55 @@ class ClockifyAPI:
|
|||
if api_key is not None and self.validate_api_key(api_key) is True:
|
||||
self.api_key = api_key
|
||||
self.set_workspace()
|
||||
self.set_user_id()
|
||||
if self.master_parent:
|
||||
self.master_parent.signed_in()
|
||||
return True
|
||||
return False
|
||||
|
||||
def validate_api_key(self, api_key):
|
||||
test_headers = {'X-Api-Key': api_key}
|
||||
action_url = 'workspaces/'
|
||||
time_check(self)
|
||||
test_headers = {"x-api-key": api_key}
|
||||
action_url = "user"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=test_headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=test_headers
|
||||
)
|
||||
if response.status_code != 200:
|
||||
return False
|
||||
return True
|
||||
|
||||
def validate_workspace_perm(self, workspace_id=None):
|
||||
user_id = self.get_user_id()
|
||||
def validate_workspace_permissions(self, workspace_id=None, user_id=None):
|
||||
if user_id is None:
|
||||
self.log.info("No user_id found during validation")
|
||||
return False
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = "/workspaces/{}/users/{}/permissions".format(
|
||||
workspace_id, user_id
|
||||
)
|
||||
time_check(self)
|
||||
action_url = f"workspaces/{workspace_id}/users?includeRoles=1"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
user_permissions = response.json()
|
||||
for perm in user_permissions:
|
||||
if perm['name'] in ADMIN_PERMISSION_NAMES:
|
||||
data = response.json()
|
||||
for user in data:
|
||||
if user.get("id") == user_id:
|
||||
roles_data = user.get("roles")
|
||||
for entities in roles_data:
|
||||
if entities.get("role") in ADMIN_PERMISSION_NAMES:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_user_id(self):
|
||||
action_url = 'v1/user/'
|
||||
time_check(self)
|
||||
action_url = "user"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
# this regex is neccessary: UNICODE strings are crashing
|
||||
# during json serialization
|
||||
id_regex = '\"{1}id\"{1}\:{1}\"{1}\w+\"{1}'
|
||||
result = re.findall(id_regex, str(response.content))
|
||||
if len(result) != 1:
|
||||
# replace with log and better message?
|
||||
print('User ID was not found (this is a BUG!!!)')
|
||||
return None
|
||||
return json.loads('{'+result[0]+'}')['id']
|
||||
result = response.json()
|
||||
user_id = result.get("id", None)
|
||||
|
||||
return user_id
|
||||
|
||||
def set_workspace(self, name=None):
|
||||
if name is None:
|
||||
name = os.environ.get('CLOCKIFY_WORKSPACE', None)
|
||||
name = os.environ.get("CLOCKIFY_WORKSPACE", None)
|
||||
self.workspace_name = name
|
||||
self.workspace_id = None
|
||||
if self.workspace_name is None:
|
||||
return
|
||||
try:
|
||||
|
|
@ -125,7 +111,7 @@ class ClockifyAPI:
|
|||
except Exception:
|
||||
result = False
|
||||
if result is not False:
|
||||
self.workspace_id = result
|
||||
self._workspace_id = result
|
||||
if self.master_parent is not None:
|
||||
self.master_parent.start_timer_check()
|
||||
return True
|
||||
|
|
@ -139,6 +125,14 @@ class ClockifyAPI:
|
|||
return all_workspaces[name]
|
||||
return False
|
||||
|
||||
def set_user_id(self):
|
||||
try:
|
||||
user_id = self.get_user_id()
|
||||
except Exception:
|
||||
user_id = None
|
||||
if user_id is not None:
|
||||
self._user_id = user_id
|
||||
|
||||
def get_api_key(self):
|
||||
return self.secure_registry.get_item("api_key", None)
|
||||
|
||||
|
|
@ -146,11 +140,9 @@ class ClockifyAPI:
|
|||
self.secure_registry.set_item("api_key", api_key)
|
||||
|
||||
def get_workspaces(self):
|
||||
action_url = 'workspaces/'
|
||||
time_check(self)
|
||||
action_url = "workspaces/"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return {
|
||||
workspace["name"]: workspace["id"] for workspace in response.json()
|
||||
|
|
@ -159,27 +151,22 @@ class ClockifyAPI:
|
|||
def get_projects(self, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/projects/'.format(workspace_id)
|
||||
time_check(self)
|
||||
action_url = f"workspaces/{workspace_id}/projects"
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
|
||||
return {
|
||||
project["name"]: project["id"] for project in response.json()
|
||||
}
|
||||
if response.status_code != 403:
|
||||
result = response.json()
|
||||
return {project["name"]: project["id"] for project in result}
|
||||
|
||||
def get_project_by_id(self, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/projects/{}/'.format(
|
||||
action_url = "workspaces/{}/projects/{}".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
time_check(self)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
|
||||
return response.json()
|
||||
|
|
@ -187,32 +174,24 @@ class ClockifyAPI:
|
|||
def get_tags(self, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/tags/'.format(workspace_id)
|
||||
time_check(self)
|
||||
action_url = "workspaces/{}/tags".format(workspace_id)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
|
||||
return {
|
||||
tag["name"]: tag["id"] for tag in response.json()
|
||||
}
|
||||
return {tag["name"]: tag["id"] for tag in response.json()}
|
||||
|
||||
def get_tasks(self, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/projects/{}/tasks/'.format(
|
||||
action_url = "workspaces/{}/projects/{}/tasks".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
time_check(self)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
|
||||
return {
|
||||
task["name"]: task["id"] for task in response.json()
|
||||
}
|
||||
return {task["name"]: task["id"] for task in response.json()}
|
||||
|
||||
def get_workspace_id(self, workspace_name):
|
||||
all_workspaces = self.get_workspaces()
|
||||
|
|
@ -236,48 +215,64 @@ class ClockifyAPI:
|
|||
return None
|
||||
return all_tasks[tag_name]
|
||||
|
||||
def get_task_id(
|
||||
self, task_name, project_id, workspace_id=None
|
||||
):
|
||||
def get_task_id(self, task_name, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
all_tasks = self.get_tasks(
|
||||
project_id, workspace_id
|
||||
)
|
||||
all_tasks = self.get_tasks(project_id, workspace_id)
|
||||
if task_name not in all_tasks:
|
||||
return None
|
||||
return all_tasks[task_name]
|
||||
|
||||
def get_current_time(self):
|
||||
return str(datetime.datetime.utcnow().isoformat())+'Z'
|
||||
return str(datetime.datetime.utcnow().isoformat()) + "Z"
|
||||
|
||||
def start_time_entry(
|
||||
self, description, project_id, task_id=None, tag_ids=[],
|
||||
workspace_id=None, billable=True
|
||||
self,
|
||||
description,
|
||||
project_id,
|
||||
task_id=None,
|
||||
tag_ids=None,
|
||||
workspace_id=None,
|
||||
user_id=None,
|
||||
billable=True,
|
||||
):
|
||||
# Workspace
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
# User ID
|
||||
if user_id is None:
|
||||
user_id = self._user_id
|
||||
|
||||
# get running timer to check if we need to start it
|
||||
current_timer = self.get_in_progress()
|
||||
|
||||
# Check if is currently run another times and has same values
|
||||
current = self.get_in_progress(workspace_id)
|
||||
if current is not None:
|
||||
# DO not restart the timer, if it is already running for curent task
|
||||
if current_timer:
|
||||
current_timer_hierarchy = current_timer.get("description")
|
||||
current_project_id = current_timer.get("projectId")
|
||||
current_task_id = current_timer.get("taskId")
|
||||
if (
|
||||
current.get("description", None) == description and
|
||||
current.get("projectId", None) == project_id and
|
||||
current.get("taskId", None) == task_id
|
||||
description == current_timer_hierarchy
|
||||
and project_id == current_project_id
|
||||
and task_id == current_task_id
|
||||
):
|
||||
self.log.info(
|
||||
"Timer for the current project is already running"
|
||||
)
|
||||
self.bool_timer_run = True
|
||||
return self.bool_timer_run
|
||||
self.finish_time_entry(workspace_id)
|
||||
self.finish_time_entry()
|
||||
|
||||
# Convert billable to strings
|
||||
if billable:
|
||||
billable = 'true'
|
||||
billable = "true"
|
||||
else:
|
||||
billable = 'false'
|
||||
billable = "false"
|
||||
# Rest API Action
|
||||
action_url = 'workspaces/{}/timeEntries/'.format(workspace_id)
|
||||
action_url = "workspaces/{}/user/{}/time-entries".format(
|
||||
workspace_id, user_id
|
||||
)
|
||||
start = self.get_current_time()
|
||||
body = {
|
||||
"start": start,
|
||||
|
|
@ -285,169 +280,135 @@ class ClockifyAPI:
|
|||
"description": description,
|
||||
"projectId": project_id,
|
||||
"taskId": task_id,
|
||||
"tagIds": tag_ids
|
||||
"tagIds": tag_ids,
|
||||
}
|
||||
time_check(self)
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
json=body
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
|
||||
success = False
|
||||
if response.status_code < 300:
|
||||
success = True
|
||||
return success
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_in_progress(self, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/timeEntries/inProgress'.format(
|
||||
workspace_id
|
||||
)
|
||||
time_check(self)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
)
|
||||
def _get_current_timer_values(self, response):
|
||||
if response is None:
|
||||
return
|
||||
try:
|
||||
output = response.json()
|
||||
except json.decoder.JSONDecodeError:
|
||||
output = None
|
||||
return output
|
||||
return None
|
||||
if output and isinstance(output, list):
|
||||
return output[0]
|
||||
return None
|
||||
|
||||
def finish_time_entry(self, workspace_id=None):
|
||||
def get_in_progress(self, user_id=None, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
current = self.get_in_progress(workspace_id)
|
||||
if current is None:
|
||||
return
|
||||
if user_id is None:
|
||||
user_id = self.user_id
|
||||
|
||||
current_id = current["id"]
|
||||
action_url = 'workspaces/{}/timeEntries/{}'.format(
|
||||
workspace_id, current_id
|
||||
action_url = (
|
||||
f"workspaces/{workspace_id}/user/"
|
||||
f"{user_id}/time-entries?in-progress=1"
|
||||
)
|
||||
body = {
|
||||
"start": current["timeInterval"]["start"],
|
||||
"billable": current["billable"],
|
||||
"description": current["description"],
|
||||
"projectId": current["projectId"],
|
||||
"taskId": current["taskId"],
|
||||
"tagIds": current["tagIds"],
|
||||
"end": self.get_current_time()
|
||||
}
|
||||
time_check(self)
|
||||
response = requests.put(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
json=body
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return self._get_current_timer_values(response)
|
||||
|
||||
def finish_time_entry(self, workspace_id=None, user_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
if user_id is None:
|
||||
user_id = self.user_id
|
||||
current_timer = self.get_in_progress()
|
||||
if not current_timer:
|
||||
return
|
||||
action_url = "workspaces/{}/user/{}/time-entries".format(
|
||||
workspace_id, user_id
|
||||
)
|
||||
body = {"end": self.get_current_time()}
|
||||
response = requests.patch(
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def get_time_entries(
|
||||
self, workspace_id=None, quantity=10
|
||||
):
|
||||
def get_time_entries(self, workspace_id=None, user_id=None, quantity=10):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/timeEntries/'.format(workspace_id)
|
||||
time_check(self)
|
||||
if user_id is None:
|
||||
user_id = self.user_id
|
||||
action_url = "workspaces/{}/user/{}/time-entries".format(
|
||||
workspace_id, user_id
|
||||
)
|
||||
response = requests.get(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return response.json()[:quantity]
|
||||
|
||||
def remove_time_entry(self, tid, workspace_id=None):
|
||||
def remove_time_entry(self, tid, workspace_id=None, user_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/timeEntries/{}'.format(
|
||||
workspace_id, tid
|
||||
action_url = "workspaces/{}/user/{}/time-entries/{}".format(
|
||||
workspace_id, user_id, tid
|
||||
)
|
||||
time_check(self)
|
||||
response = requests.delete(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_project(self, name, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/projects/'.format(workspace_id)
|
||||
action_url = "workspaces/{}/projects".format(workspace_id)
|
||||
body = {
|
||||
"name": name,
|
||||
"clientId": "",
|
||||
"isPublic": "false",
|
||||
"estimate": {
|
||||
"estimate": 0,
|
||||
"type": "AUTO"
|
||||
},
|
||||
"estimate": {"estimate": 0, "type": "AUTO"},
|
||||
"color": "#f44336",
|
||||
"billable": "true"
|
||||
"billable": "true",
|
||||
}
|
||||
time_check(self)
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
json=body
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_workspace(self, name):
|
||||
action_url = 'workspaces/'
|
||||
action_url = "workspaces/"
|
||||
body = {"name": name}
|
||||
time_check(self)
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
json=body
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_task(
|
||||
self, name, project_id, workspace_id=None
|
||||
):
|
||||
def add_task(self, name, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/projects/{}/tasks/'.format(
|
||||
action_url = "workspaces/{}/projects/{}/tasks".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
body = {
|
||||
"name": name,
|
||||
"projectId": project_id
|
||||
}
|
||||
time_check(self)
|
||||
body = {"name": name, "projectId": project_id}
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
json=body
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def add_tag(self, name, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = 'workspaces/{}/tags'.format(workspace_id)
|
||||
body = {
|
||||
"name": name
|
||||
}
|
||||
time_check(self)
|
||||
action_url = "workspaces/{}/tags".format(workspace_id)
|
||||
body = {"name": name}
|
||||
response = requests.post(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
json=body
|
||||
CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def delete_project(
|
||||
self, project_id, workspace_id=None
|
||||
):
|
||||
def delete_project(self, project_id, workspace_id=None):
|
||||
if workspace_id is None:
|
||||
workspace_id = self.workspace_id
|
||||
action_url = '/workspaces/{}/projects/{}'.format(
|
||||
action_url = "/workspaces/{}/projects/{}".format(
|
||||
workspace_id, project_id
|
||||
)
|
||||
time_check(self)
|
||||
response = requests.delete(
|
||||
CLOCKIFY_ENDPOINT + action_url,
|
||||
headers=self.headers,
|
||||
|
|
@ -455,12 +416,12 @@ class ClockifyAPI:
|
|||
return response.json()
|
||||
|
||||
def convert_input(
|
||||
self, entity_id, entity_name, mode='Workspace', project_id=None
|
||||
self, entity_id, entity_name, mode="Workspace", project_id=None
|
||||
):
|
||||
if entity_id is None:
|
||||
error = False
|
||||
error_msg = 'Missing information "{}"'
|
||||
if mode.lower() == 'workspace':
|
||||
if mode.lower() == "workspace":
|
||||
if entity_id is None and entity_name is None:
|
||||
if self.workspace_id is not None:
|
||||
entity_id = self.workspace_id
|
||||
|
|
@ -471,14 +432,14 @@ class ClockifyAPI:
|
|||
else:
|
||||
if entity_id is None and entity_name is None:
|
||||
error = True
|
||||
elif mode.lower() == 'project':
|
||||
elif mode.lower() == "project":
|
||||
entity_id = self.get_project_id(entity_name)
|
||||
elif mode.lower() == 'task':
|
||||
elif mode.lower() == "task":
|
||||
entity_id = self.get_task_id(
|
||||
task_name=entity_name, project_id=project_id
|
||||
)
|
||||
else:
|
||||
raise TypeError('Unknown type')
|
||||
raise TypeError("Unknown type")
|
||||
# Raise error
|
||||
if error:
|
||||
raise ValueError(error_msg.format(mode))
|
||||
|
|
|
|||
|
|
@ -2,24 +2,13 @@ import os
|
|||
import threading
|
||||
import time
|
||||
|
||||
from openpype.modules import (
|
||||
OpenPypeModule,
|
||||
ITrayModule,
|
||||
IPluginPaths
|
||||
)
|
||||
from openpype.modules import OpenPypeModule, ITrayModule, IPluginPaths
|
||||
from openpype.client import get_asset_by_name
|
||||
|
||||
from .clockify_api import ClockifyAPI
|
||||
from .constants import (
|
||||
CLOCKIFY_FTRACK_USER_PATH,
|
||||
CLOCKIFY_FTRACK_SERVER_PATH
|
||||
)
|
||||
from .constants import CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH
|
||||
|
||||
|
||||
class ClockifyModule(
|
||||
OpenPypeModule,
|
||||
ITrayModule,
|
||||
IPluginPaths
|
||||
):
|
||||
class ClockifyModule(OpenPypeModule, ITrayModule, IPluginPaths):
|
||||
name = "clockify"
|
||||
|
||||
def initialize(self, modules_settings):
|
||||
|
|
@ -33,18 +22,23 @@ class ClockifyModule(
|
|||
self.timer_manager = None
|
||||
self.MessageWidgetClass = None
|
||||
self.message_widget = None
|
||||
|
||||
self.clockapi = ClockifyAPI(master_parent=self)
|
||||
self._clockify_api = None
|
||||
|
||||
# TimersManager attributes
|
||||
# - set `timers_manager_connector` only in `tray_init`
|
||||
self.timers_manager_connector = None
|
||||
self._timers_manager_module = None
|
||||
|
||||
@property
|
||||
def clockify_api(self):
|
||||
if self._clockify_api is None:
|
||||
from .clockify_api import ClockifyAPI
|
||||
|
||||
self._clockify_api = ClockifyAPI(master_parent=self)
|
||||
return self._clockify_api
|
||||
|
||||
def get_global_environments(self):
|
||||
return {
|
||||
"CLOCKIFY_WORKSPACE": self.workspace_name
|
||||
}
|
||||
return {"CLOCKIFY_WORKSPACE": self.workspace_name}
|
||||
|
||||
def tray_init(self):
|
||||
from .widgets import ClockifySettings, MessageWidget
|
||||
|
|
@ -52,7 +46,7 @@ class ClockifyModule(
|
|||
self.MessageWidgetClass = MessageWidget
|
||||
|
||||
self.message_widget = None
|
||||
self.widget_settings = ClockifySettings(self.clockapi)
|
||||
self.widget_settings = ClockifySettings(self.clockify_api)
|
||||
self.widget_settings_required = None
|
||||
|
||||
self.thread_timer_check = None
|
||||
|
|
@ -61,7 +55,7 @@ class ClockifyModule(
|
|||
self.bool_api_key_set = False
|
||||
self.bool_workspace_set = False
|
||||
self.bool_timer_run = False
|
||||
self.bool_api_key_set = self.clockapi.set_api()
|
||||
self.bool_api_key_set = self.clockify_api.set_api()
|
||||
|
||||
# Define itself as TimersManager connector
|
||||
self.timers_manager_connector = self
|
||||
|
|
@ -71,12 +65,11 @@ class ClockifyModule(
|
|||
self.show_settings()
|
||||
return
|
||||
|
||||
self.bool_workspace_set = self.clockapi.workspace_id is not None
|
||||
self.bool_workspace_set = self.clockify_api.workspace_id is not None
|
||||
if self.bool_workspace_set is False:
|
||||
return
|
||||
|
||||
self.start_timer_check()
|
||||
|
||||
self.set_menu_visibility()
|
||||
|
||||
def tray_exit(self, *_a, **_kw):
|
||||
|
|
@ -85,23 +78,19 @@ class ClockifyModule(
|
|||
def get_plugin_paths(self):
|
||||
"""Implementaton of IPluginPaths to get plugin paths."""
|
||||
actions_path = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)),
|
||||
"launcher_actions"
|
||||
os.path.dirname(os.path.abspath(__file__)), "launcher_actions"
|
||||
)
|
||||
return {
|
||||
"actions": [actions_path]
|
||||
}
|
||||
return {"actions": [actions_path]}
|
||||
|
||||
def get_ftrack_event_handler_paths(self):
|
||||
"""Function for Ftrack module to add ftrack event handler paths."""
|
||||
return {
|
||||
"user": [CLOCKIFY_FTRACK_USER_PATH],
|
||||
"server": [CLOCKIFY_FTRACK_SERVER_PATH]
|
||||
"server": [CLOCKIFY_FTRACK_SERVER_PATH],
|
||||
}
|
||||
|
||||
def clockify_timer_stopped(self):
|
||||
self.bool_timer_run = False
|
||||
# Call `ITimersManager` method
|
||||
self.timer_stopped()
|
||||
|
||||
def start_timer_check(self):
|
||||
|
|
@ -122,45 +111,44 @@ class ClockifyModule(
|
|||
def check_running(self):
|
||||
while self.bool_thread_check_running is True:
|
||||
bool_timer_run = False
|
||||
if self.clockapi.get_in_progress() is not None:
|
||||
if self.clockify_api.get_in_progress() is not None:
|
||||
bool_timer_run = True
|
||||
|
||||
if self.bool_timer_run != bool_timer_run:
|
||||
if self.bool_timer_run is True:
|
||||
self.clockify_timer_stopped()
|
||||
elif self.bool_timer_run is False:
|
||||
actual_timer = self.clockapi.get_in_progress()
|
||||
if not actual_timer:
|
||||
current_timer = self.clockify_api.get_in_progress()
|
||||
if current_timer is None:
|
||||
continue
|
||||
current_proj_id = current_timer.get("projectId")
|
||||
if not current_proj_id:
|
||||
continue
|
||||
|
||||
actual_proj_id = actual_timer["projectId"]
|
||||
if not actual_proj_id:
|
||||
continue
|
||||
|
||||
project = self.clockapi.get_project_by_id(actual_proj_id)
|
||||
project = self.clockify_api.get_project_by_id(
|
||||
current_proj_id
|
||||
)
|
||||
if project and project.get("code") == 501:
|
||||
continue
|
||||
|
||||
project_name = project["name"]
|
||||
project_name = project.get("name")
|
||||
|
||||
actual_timer_hierarchy = actual_timer["description"]
|
||||
hierarchy_items = actual_timer_hierarchy.split("/")
|
||||
current_timer_hierarchy = current_timer.get("description")
|
||||
if not current_timer_hierarchy:
|
||||
continue
|
||||
hierarchy_items = current_timer_hierarchy.split("/")
|
||||
# Each pype timer must have at least 2 items!
|
||||
if len(hierarchy_items) < 2:
|
||||
continue
|
||||
|
||||
task_name = hierarchy_items[-1]
|
||||
hierarchy = hierarchy_items[:-1]
|
||||
|
||||
task_type = None
|
||||
if len(actual_timer.get("tags", [])) > 0:
|
||||
task_type = actual_timer["tags"][0].get("name")
|
||||
data = {
|
||||
"task_name": task_name,
|
||||
"hierarchy": hierarchy,
|
||||
"project_name": project_name,
|
||||
"task_type": task_type
|
||||
}
|
||||
# Call `ITimersManager` method
|
||||
self.timer_started(data)
|
||||
|
||||
self.bool_timer_run = bool_timer_run
|
||||
|
|
@ -184,6 +172,7 @@ class ClockifyModule(
|
|||
def tray_menu(self, parent_menu):
|
||||
# Menu for Tray App
|
||||
from qtpy import QtWidgets
|
||||
|
||||
menu = QtWidgets.QMenu("Clockify", parent_menu)
|
||||
menu.setProperty("submenu", "on")
|
||||
|
||||
|
|
@ -204,7 +193,9 @@ class ClockifyModule(
|
|||
parent_menu.addMenu(menu)
|
||||
|
||||
def show_settings(self):
|
||||
self.widget_settings.input_api_key.setText(self.clockapi.get_api_key())
|
||||
self.widget_settings.input_api_key.setText(
|
||||
self.clockify_api.get_api_key()
|
||||
)
|
||||
self.widget_settings.show()
|
||||
|
||||
def set_menu_visibility(self):
|
||||
|
|
@ -218,72 +209,82 @@ class ClockifyModule(
|
|||
def timer_started(self, data):
|
||||
"""Tell TimersManager that timer started."""
|
||||
if self._timers_manager_module is not None:
|
||||
self._timers_manager_module.timer_started(self._module.id, data)
|
||||
self._timers_manager_module.timer_started(self.id, data)
|
||||
|
||||
def timer_stopped(self):
|
||||
"""Tell TimersManager that timer stopped."""
|
||||
if self._timers_manager_module is not None:
|
||||
self._timers_manager_module.timer_stopped(self._module.id)
|
||||
self._timers_manager_module.timer_stopped(self.id)
|
||||
|
||||
def stop_timer(self):
|
||||
"""Called from TimersManager to stop timer."""
|
||||
self.clockapi.finish_time_entry()
|
||||
self.clockify_api.finish_time_entry()
|
||||
|
||||
def start_timer(self, input_data):
|
||||
"""Called from TimersManager to start timer."""
|
||||
# If not api key is not entered then skip
|
||||
if not self.clockapi.get_api_key():
|
||||
return
|
||||
|
||||
actual_timer = self.clockapi.get_in_progress()
|
||||
actual_timer_hierarchy = None
|
||||
actual_project_id = None
|
||||
if actual_timer is not None:
|
||||
actual_timer_hierarchy = actual_timer.get("description")
|
||||
actual_project_id = actual_timer.get("projectId")
|
||||
|
||||
# Concatenate hierarchy and task to get description
|
||||
desc_items = [val for val in input_data.get("hierarchy", [])]
|
||||
desc_items.append(input_data["task_name"])
|
||||
description = "/".join(desc_items)
|
||||
|
||||
# Check project existence
|
||||
project_name = input_data["project_name"]
|
||||
project_id = self.clockapi.get_project_id(project_name)
|
||||
def _verify_project_exists(self, project_name):
|
||||
project_id = self.clockify_api.get_project_id(project_name)
|
||||
if not project_id:
|
||||
self.log.warning((
|
||||
"Project \"{}\" was not found in Clockify. Timer won't start."
|
||||
).format(project_name))
|
||||
self.log.warning(
|
||||
'Project "{}" was not found in Clockify. Timer won\'t start.'
|
||||
).format(project_name)
|
||||
|
||||
if not self.MessageWidgetClass:
|
||||
return
|
||||
|
||||
msg = (
|
||||
"Project <b>\"{}\"</b> is not"
|
||||
" in Clockify Workspace <b>\"{}\"</b>."
|
||||
'Project <b>"{}"</b> is not'
|
||||
' in Clockify Workspace <b>"{}"</b>.'
|
||||
"<br><br>Please inform your Project Manager."
|
||||
).format(project_name, str(self.clockapi.workspace_name))
|
||||
).format(project_name, str(self.clockify_api.workspace_name))
|
||||
|
||||
self.message_widget = self.MessageWidgetClass(
|
||||
msg, "Clockify - Info Message"
|
||||
)
|
||||
self.message_widget.closed.connect(self.on_message_widget_close)
|
||||
self.message_widget.show()
|
||||
return False
|
||||
return project_id
|
||||
|
||||
def start_timer(self, input_data):
|
||||
"""Called from TimersManager to start timer."""
|
||||
# If not api key is not entered then skip
|
||||
if not self.clockify_api.get_api_key():
|
||||
return
|
||||
|
||||
if (
|
||||
actual_timer is not None and
|
||||
description == actual_timer_hierarchy and
|
||||
project_id == actual_project_id
|
||||
):
|
||||
task_name = input_data.get("task_name")
|
||||
|
||||
# Concatenate hierarchy and task to get description
|
||||
description_items = list(input_data.get("hierarchy", []))
|
||||
description_items.append(task_name)
|
||||
description = "/".join(description_items)
|
||||
|
||||
# Check project existence
|
||||
project_name = input_data.get("project_name")
|
||||
project_id = self._verify_project_exists(project_name)
|
||||
if not project_id:
|
||||
return
|
||||
|
||||
# Setup timer tags
|
||||
tag_ids = []
|
||||
task_tag_id = self.clockapi.get_tag_id(input_data["task_type"])
|
||||
tag_name = input_data.get("task_type")
|
||||
if not tag_name:
|
||||
# no task_type found in the input data
|
||||
# if the timer is restarted by idle time (bug?)
|
||||
asset_name = input_data["hierarchy"][-1]
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
task_info = asset_doc["data"]["tasks"][task_name]
|
||||
tag_name = task_info.get("type", "")
|
||||
if not tag_name:
|
||||
self.log.info("No tag information found for the timer")
|
||||
|
||||
task_tag_id = self.clockify_api.get_tag_id(tag_name)
|
||||
if task_tag_id is not None:
|
||||
tag_ids.append(task_tag_id)
|
||||
|
||||
self.clockapi.start_time_entry(
|
||||
description, project_id, tag_ids=tag_ids
|
||||
# Start timer
|
||||
self.clockify_api.start_time_entry(
|
||||
description,
|
||||
project_id,
|
||||
tag_ids=tag_ids,
|
||||
workspace_id=self.clockify_api.workspace_id,
|
||||
user_id=self.clockify_api.user_id,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -9,4 +9,4 @@ CLOCKIFY_FTRACK_USER_PATH = os.path.join(
|
|||
)
|
||||
|
||||
ADMIN_PERMISSION_NAMES = ["WORKSPACE_OWN", "WORKSPACE_ADMIN"]
|
||||
CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/"
|
||||
CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/v1/"
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from openpype_modules.ftrack.lib import ServerAction
|
|||
from openpype_modules.clockify.clockify_api import ClockifyAPI
|
||||
|
||||
|
||||
class SyncClocifyServer(ServerAction):
|
||||
class SyncClockifyServer(ServerAction):
|
||||
'''Synchronise project names and task types.'''
|
||||
|
||||
identifier = "clockify.sync.server"
|
||||
|
|
@ -14,12 +14,12 @@ class SyncClocifyServer(ServerAction):
|
|||
role_list = ["Pypeclub", "Administrator", "project Manager"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SyncClocifyServer, self).__init__(*args, **kwargs)
|
||||
super(SyncClockifyServer, self).__init__(*args, **kwargs)
|
||||
|
||||
workspace_name = os.environ.get("CLOCKIFY_WORKSPACE")
|
||||
api_key = os.environ.get("CLOCKIFY_API_KEY")
|
||||
self.clockapi = ClockifyAPI(api_key)
|
||||
self.clockapi.set_workspace(workspace_name)
|
||||
self.clockify_api = ClockifyAPI(api_key)
|
||||
self.clockify_api.set_workspace(workspace_name)
|
||||
if api_key is None:
|
||||
modified_key = "None"
|
||||
else:
|
||||
|
|
@ -48,13 +48,16 @@ class SyncClocifyServer(ServerAction):
|
|||
return True
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
if self.clockapi.workspace_id is None:
|
||||
self.clockify_api.set_api()
|
||||
if self.clockify_api.workspace_id is None:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Clockify Workspace or API key are not set!"
|
||||
}
|
||||
|
||||
if self.clockapi.validate_workspace_perm() is False:
|
||||
if not self.clockify_api.validate_workspace_permissions(
|
||||
self.clockify_api.workspace_id, self.clockify_api.user_id
|
||||
):
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Missing permissions for this action!"
|
||||
|
|
@ -88,9 +91,9 @@ class SyncClocifyServer(ServerAction):
|
|||
task_type["name"] for task_type in task_types
|
||||
]
|
||||
try:
|
||||
clockify_projects = self.clockapi.get_projects()
|
||||
clockify_projects = self.clockify_api.get_projects()
|
||||
if project_name not in clockify_projects:
|
||||
response = self.clockapi.add_project(project_name)
|
||||
response = self.clockify_api.add_project(project_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Project \"{}\" can't be created. Response: {}".format(
|
||||
|
|
@ -105,7 +108,7 @@ class SyncClocifyServer(ServerAction):
|
|||
).format(project_name)
|
||||
}
|
||||
|
||||
clockify_workspace_tags = self.clockapi.get_tags()
|
||||
clockify_workspace_tags = self.clockify_api.get_tags()
|
||||
for task_type_name in task_type_names:
|
||||
if task_type_name in clockify_workspace_tags:
|
||||
self.log.debug(
|
||||
|
|
@ -113,7 +116,7 @@ class SyncClocifyServer(ServerAction):
|
|||
)
|
||||
continue
|
||||
|
||||
response = self.clockapi.add_tag(task_type_name)
|
||||
response = self.clockify_api.add_tag(task_type_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Task \"{}\" can't be created. Response: {}".format(
|
||||
|
|
@ -138,4 +141,4 @@ class SyncClocifyServer(ServerAction):
|
|||
|
||||
|
||||
def register(session, **kw):
|
||||
SyncClocifyServer(session).register()
|
||||
SyncClockifyServer(session).register()
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from openpype_modules.ftrack.lib import BaseAction, statics_icon
|
|||
from openpype_modules.clockify.clockify_api import ClockifyAPI
|
||||
|
||||
|
||||
class SyncClocifyLocal(BaseAction):
|
||||
class SyncClockifyLocal(BaseAction):
|
||||
'''Synchronise project names and task types.'''
|
||||
|
||||
#: Action identifier.
|
||||
|
|
@ -18,9 +18,9 @@ class SyncClocifyLocal(BaseAction):
|
|||
icon = statics_icon("app_icons", "clockify-white.png")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SyncClocifyLocal, self).__init__(*args, **kwargs)
|
||||
super(SyncClockifyLocal, self).__init__(*args, **kwargs)
|
||||
#: CLockifyApi
|
||||
self.clockapi = ClockifyAPI()
|
||||
self.clockify_api = ClockifyAPI()
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
if (
|
||||
|
|
@ -31,14 +31,18 @@ class SyncClocifyLocal(BaseAction):
|
|||
return False
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
self.clockapi.set_api()
|
||||
if self.clockapi.workspace_id is None:
|
||||
self.clockify_api.set_api()
|
||||
if self.clockify_api.workspace_id is None:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Clockify Workspace or API key are not set!"
|
||||
}
|
||||
|
||||
if self.clockapi.validate_workspace_perm() is False:
|
||||
if (
|
||||
self.clockify_api.validate_workspace_permissions(
|
||||
self.clockify_api.workspace_id, self.clockify_api.user_id)
|
||||
is False
|
||||
):
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Missing permissions for this action!"
|
||||
|
|
@ -74,9 +78,9 @@ class SyncClocifyLocal(BaseAction):
|
|||
task_type["name"] for task_type in task_types
|
||||
]
|
||||
try:
|
||||
clockify_projects = self.clockapi.get_projects()
|
||||
clockify_projects = self.clockify_api.get_projects()
|
||||
if project_name not in clockify_projects:
|
||||
response = self.clockapi.add_project(project_name)
|
||||
response = self.clockify_api.add_project(project_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Project \"{}\" can't be created. Response: {}".format(
|
||||
|
|
@ -91,7 +95,7 @@ class SyncClocifyLocal(BaseAction):
|
|||
).format(project_name)
|
||||
}
|
||||
|
||||
clockify_workspace_tags = self.clockapi.get_tags()
|
||||
clockify_workspace_tags = self.clockify_api.get_tags()
|
||||
for task_type_name in task_type_names:
|
||||
if task_type_name in clockify_workspace_tags:
|
||||
self.log.debug(
|
||||
|
|
@ -99,7 +103,7 @@ class SyncClocifyLocal(BaseAction):
|
|||
)
|
||||
continue
|
||||
|
||||
response = self.clockapi.add_tag(task_type_name)
|
||||
response = self.clockify_api.add_tag(task_type_name)
|
||||
if "id" not in response:
|
||||
self.log.warning(
|
||||
"Task \"{}\" can't be created. Response: {}".format(
|
||||
|
|
@ -121,4 +125,4 @@ class SyncClocifyLocal(BaseAction):
|
|||
|
||||
|
||||
def register(session, **kw):
|
||||
SyncClocifyLocal(session).register()
|
||||
SyncClockifyLocal(session).register()
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ from openpype_modules.clockify.clockify_api import ClockifyAPI
|
|||
class ClockifyStart(LauncherAction):
|
||||
name = "clockify_start_timer"
|
||||
label = "Clockify - Start Timer"
|
||||
icon = "clockify_icon"
|
||||
icon = "app_icons/clockify.png"
|
||||
order = 500
|
||||
clockapi = ClockifyAPI()
|
||||
clockify_api = ClockifyAPI()
|
||||
|
||||
def is_compatible(self, session):
|
||||
"""Return whether the action is compatible with the session"""
|
||||
|
|
@ -17,23 +17,39 @@ class ClockifyStart(LauncherAction):
|
|||
return False
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
self.clockify_api.set_api()
|
||||
user_id = self.clockify_api.user_id
|
||||
workspace_id = self.clockify_api.workspace_id
|
||||
project_name = session["AVALON_PROJECT"]
|
||||
asset_name = session["AVALON_ASSET"]
|
||||
task_name = session["AVALON_TASK"]
|
||||
|
||||
description = asset_name
|
||||
asset_doc = get_asset_by_name(
|
||||
project_name, asset_name, fields=["data.parents"]
|
||||
)
|
||||
if asset_doc is not None:
|
||||
desc_items = asset_doc.get("data", {}).get("parents", [])
|
||||
desc_items.append(asset_name)
|
||||
desc_items.append(task_name)
|
||||
description = "/".join(desc_items)
|
||||
|
||||
project_id = self.clockapi.get_project_id(project_name)
|
||||
tag_ids = []
|
||||
tag_ids.append(self.clockapi.get_tag_id(task_name))
|
||||
self.clockapi.start_time_entry(
|
||||
description, project_id, tag_ids=tag_ids
|
||||
# fetch asset docs
|
||||
asset_doc = get_asset_by_name(project_name, asset_name)
|
||||
|
||||
# get task type to fill the timer tag
|
||||
task_info = asset_doc["data"]["tasks"][task_name]
|
||||
task_type = task_info["type"]
|
||||
|
||||
# check if the task has hierarchy and fill the
|
||||
parents_data = asset_doc["data"]
|
||||
if parents_data is not None:
|
||||
description_items = parents_data.get("parents", [])
|
||||
description_items.append(asset_name)
|
||||
description_items.append(task_name)
|
||||
description = "/".join(description_items)
|
||||
|
||||
project_id = self.clockify_api.get_project_id(
|
||||
project_name, workspace_id
|
||||
)
|
||||
tag_ids = []
|
||||
tag_name = task_type
|
||||
tag_ids.append(self.clockify_api.get_tag_id(tag_name, workspace_id))
|
||||
self.clockify_api.start_time_entry(
|
||||
description,
|
||||
project_id,
|
||||
tag_ids=tag_ids,
|
||||
workspace_id=workspace_id,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -3,20 +3,39 @@ from openpype_modules.clockify.clockify_api import ClockifyAPI
|
|||
from openpype.pipeline import LauncherAction
|
||||
|
||||
|
||||
class ClockifySync(LauncherAction):
|
||||
class ClockifyPermissionsCheckFailed(Exception):
|
||||
"""Timer start failed due to user permissions check.
|
||||
Message should be self explanatory as traceback won't be shown.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ClockifySync(LauncherAction):
|
||||
name = "sync_to_clockify"
|
||||
label = "Sync to Clockify"
|
||||
icon = "clockify_white_icon"
|
||||
icon = "app_icons/clockify-white.png"
|
||||
order = 500
|
||||
clockapi = ClockifyAPI()
|
||||
have_permissions = clockapi.validate_workspace_perm()
|
||||
clockify_api = ClockifyAPI()
|
||||
|
||||
def is_compatible(self, session):
|
||||
"""Return whether the action is compatible with the session"""
|
||||
return self.have_permissions
|
||||
"""Check if there's some projects to sync"""
|
||||
try:
|
||||
next(get_projects())
|
||||
return True
|
||||
except StopIteration:
|
||||
return False
|
||||
|
||||
def process(self, session, **kwargs):
|
||||
self.clockify_api.set_api()
|
||||
workspace_id = self.clockify_api.workspace_id
|
||||
user_id = self.clockify_api.user_id
|
||||
if not self.clockify_api.validate_workspace_permissions(
|
||||
workspace_id, user_id
|
||||
):
|
||||
raise ClockifyPermissionsCheckFailed(
|
||||
"Current CLockify user is missing permissions for this action!"
|
||||
)
|
||||
project_name = session.get("AVALON_PROJECT") or ""
|
||||
|
||||
projects_to_sync = []
|
||||
|
|
@ -30,24 +49,28 @@ class ClockifySync(LauncherAction):
|
|||
task_types = project["config"]["tasks"].keys()
|
||||
projects_info[project["name"]] = task_types
|
||||
|
||||
clockify_projects = self.clockapi.get_projects()
|
||||
clockify_projects = self.clockify_api.get_projects(workspace_id)
|
||||
for project_name, task_types in projects_info.items():
|
||||
if project_name in clockify_projects:
|
||||
continue
|
||||
|
||||
response = self.clockapi.add_project(project_name)
|
||||
response = self.clockify_api.add_project(
|
||||
project_name, workspace_id
|
||||
)
|
||||
if "id" not in response:
|
||||
self.log.error("Project {} can't be created".format(
|
||||
project_name
|
||||
))
|
||||
self.log.error(
|
||||
"Project {} can't be created".format(project_name)
|
||||
)
|
||||
continue
|
||||
|
||||
clockify_workspace_tags = self.clockapi.get_tags()
|
||||
clockify_workspace_tags = self.clockify_api.get_tags(workspace_id)
|
||||
for task_type in task_types:
|
||||
if task_type not in clockify_workspace_tags:
|
||||
response = self.clockapi.add_tag(task_type)
|
||||
response = self.clockify_api.add_tag(
|
||||
task_type, workspace_id
|
||||
)
|
||||
if "id" not in response:
|
||||
self.log.error('Task {} can\'t be created'.format(
|
||||
task_type
|
||||
))
|
||||
self.log.error(
|
||||
"Task {} can't be created".format(task_type)
|
||||
)
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -77,15 +77,15 @@ class MessageWidget(QtWidgets.QWidget):
|
|||
|
||||
|
||||
class ClockifySettings(QtWidgets.QWidget):
|
||||
SIZE_W = 300
|
||||
SIZE_W = 500
|
||||
SIZE_H = 130
|
||||
|
||||
loginSignal = QtCore.Signal(object, object, object)
|
||||
|
||||
def __init__(self, clockapi, optional=True):
|
||||
def __init__(self, clockify_api, optional=True):
|
||||
super(ClockifySettings, self).__init__()
|
||||
|
||||
self.clockapi = clockapi
|
||||
self.clockify_api = clockify_api
|
||||
self.optional = optional
|
||||
self.validated = False
|
||||
|
||||
|
|
@ -162,17 +162,17 @@ class ClockifySettings(QtWidgets.QWidget):
|
|||
def click_ok(self):
|
||||
api_key = self.input_api_key.text().strip()
|
||||
if self.optional is True and api_key == '':
|
||||
self.clockapi.save_api_key(None)
|
||||
self.clockapi.set_api(api_key)
|
||||
self.clockify_api.save_api_key(None)
|
||||
self.clockify_api.set_api(api_key)
|
||||
self.validated = False
|
||||
self._close_widget()
|
||||
return
|
||||
|
||||
validation = self.clockapi.validate_api_key(api_key)
|
||||
validation = self.clockify_api.validate_api_key(api_key)
|
||||
|
||||
if validation:
|
||||
self.clockapi.save_api_key(api_key)
|
||||
self.clockapi.set_api(api_key)
|
||||
self.clockify_api.save_api_key(api_key)
|
||||
self.clockify_api.set_api(api_key)
|
||||
self.validated = True
|
||||
self._close_widget()
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -3,21 +3,60 @@
|
|||
|
||||
"""
|
||||
import pyblish.api
|
||||
from openpype.lib import TextDef
|
||||
from openpype.pipeline.publish import OpenPypePyblishPluginMixin
|
||||
|
||||
|
||||
class CollectDeadlinePools(pyblish.api.InstancePlugin):
|
||||
class CollectDeadlinePools(pyblish.api.InstancePlugin,
|
||||
OpenPypePyblishPluginMixin):
|
||||
"""Collect pools from instance if present, from Setting otherwise."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.420
|
||||
label = "Collect Deadline Pools"
|
||||
families = ["rendering", "render.farm", "renderFarm", "renderlayer"]
|
||||
families = ["rendering",
|
||||
"render.farm",
|
||||
"renderFarm",
|
||||
"renderlayer",
|
||||
"maxrender"]
|
||||
|
||||
primary_pool = None
|
||||
secondary_pool = None
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
# deadline.publish.CollectDeadlinePools
|
||||
settings = project_settings["deadline"]["publish"]["CollectDeadlinePools"] # noqa
|
||||
cls.primary_pool = settings.get("primary_pool", None)
|
||||
cls.secondary_pool = settings.get("secondary_pool", None)
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
if not instance.data.get("primaryPool"):
|
||||
instance.data["primaryPool"] = self.primary_pool or "none"
|
||||
instance.data["primaryPool"] = (
|
||||
attr_values.get("primaryPool") or self.primary_pool or "none"
|
||||
)
|
||||
|
||||
if not instance.data.get("secondaryPool"):
|
||||
instance.data["secondaryPool"] = self.secondary_pool or "none"
|
||||
instance.data["secondaryPool"] = (
|
||||
attr_values.get("secondaryPool") or self.secondary_pool or "none" # noqa
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
# TODO: Preferably this would be an enum for the user
|
||||
# but the Deadline server URL can be dynamic and
|
||||
# can be set per render instance. Since get_attribute_defs
|
||||
# can't be dynamic unfortunately EnumDef isn't possible (yet?)
|
||||
# pool_names = self.deadline_module.get_deadline_pools(deadline_url,
|
||||
# self.log)
|
||||
# secondary_pool_names = ["-"] + pool_names
|
||||
|
||||
return [
|
||||
TextDef("primaryPool",
|
||||
label="Primary Pool",
|
||||
default=cls.primary_pool),
|
||||
TextDef("secondaryPool",
|
||||
label="Secondary Pool",
|
||||
default=cls.secondary_pool)
|
||||
]
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
# define chunk and priority
|
||||
chunk_size = instance.context.data.get("chunk")
|
||||
if chunk_size == 0:
|
||||
if not chunk_size:
|
||||
chunk_size = self.deadline_chunk_size
|
||||
|
||||
# search for %02d pattern in name, and padding number
|
||||
|
|
|
|||
|
|
@ -3,7 +3,15 @@ import getpass
|
|||
import copy
|
||||
|
||||
import attr
|
||||
from openpype.pipeline import legacy_io
|
||||
from openpype.lib import (
|
||||
TextDef,
|
||||
BoolDef,
|
||||
NumberDef,
|
||||
)
|
||||
from openpype.pipeline import (
|
||||
legacy_io,
|
||||
OpenPypePyblishPluginMixin
|
||||
)
|
||||
from openpype.settings import get_project_settings
|
||||
from openpype.hosts.max.api.lib import (
|
||||
get_current_renderer,
|
||||
|
|
@ -22,7 +30,8 @@ class MaxPluginInfo(object):
|
|||
IgnoreInputs = attr.ib(default=True)
|
||||
|
||||
|
||||
class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
||||
class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline,
|
||||
OpenPypePyblishPluginMixin):
|
||||
|
||||
label = "Submit Render to Deadline"
|
||||
hosts = ["max"]
|
||||
|
|
@ -31,14 +40,22 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
|
||||
use_published = True
|
||||
priority = 50
|
||||
tile_priority = 50
|
||||
chunk_size = 1
|
||||
jobInfo = {}
|
||||
pluginInfo = {}
|
||||
group = None
|
||||
deadline_pool = None
|
||||
deadline_pool_secondary = None
|
||||
framePerTask = 1
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings, system_settings):
|
||||
settings = project_settings["deadline"]["publish"]["MaxSubmitDeadline"] # noqa
|
||||
|
||||
# Take some defaults from settings
|
||||
cls.use_published = settings.get("use_published",
|
||||
cls.use_published)
|
||||
cls.priority = settings.get("priority",
|
||||
cls.priority)
|
||||
cls.chuck_size = settings.get("chunk_size", cls.chunk_size)
|
||||
cls.group = settings.get("group", cls.group)
|
||||
|
||||
def get_job_info(self):
|
||||
job_info = DeadlineJobInfo(Plugin="3dsmax")
|
||||
|
|
@ -49,11 +66,11 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
|
||||
instance = self._instance
|
||||
context = instance.context
|
||||
|
||||
# Always use the original work file name for the Job name even when
|
||||
# rendering is done from the published Work File. The original work
|
||||
# file name is clearer because it can also have subversion strings,
|
||||
# etc. which are stripped for the published file.
|
||||
|
||||
src_filepath = context.data["currentFile"]
|
||||
src_filename = os.path.basename(src_filepath)
|
||||
|
||||
|
|
@ -71,13 +88,13 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
|
||||
job_info.Pool = instance.data.get("primaryPool")
|
||||
job_info.SecondaryPool = instance.data.get("secondaryPool")
|
||||
job_info.ChunkSize = instance.data.get("chunkSize", 1)
|
||||
job_info.Comment = context.data.get("comment")
|
||||
job_info.Priority = instance.data.get("priority", self.priority)
|
||||
job_info.FramesPerTask = instance.data.get("framesPerTask", 1)
|
||||
|
||||
if self.group:
|
||||
job_info.Group = self.group
|
||||
attr_values = self.get_attr_values_from_data(instance.data)
|
||||
|
||||
job_info.ChunkSize = attr_values.get("chunkSize", 1)
|
||||
job_info.Comment = context.data.get("comment")
|
||||
job_info.Priority = attr_values.get("priority", self.priority)
|
||||
job_info.Group = attr_values.get("group", self.group)
|
||||
|
||||
# Add options from RenderGlobals
|
||||
render_globals = instance.data.get("renderGlobals", {})
|
||||
|
|
@ -216,3 +233,32 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
plugin_info.update(plugin_data)
|
||||
|
||||
return job_info, plugin_info
|
||||
|
||||
@classmethod
|
||||
def get_attribute_defs(cls):
|
||||
defs = super(MaxSubmitDeadline, cls).get_attribute_defs()
|
||||
defs.extend([
|
||||
BoolDef("use_published",
|
||||
default=cls.use_published,
|
||||
label="Use Published Scene"),
|
||||
|
||||
NumberDef("priority",
|
||||
minimum=1,
|
||||
maximum=250,
|
||||
decimals=0,
|
||||
default=cls.priority,
|
||||
label="Priority"),
|
||||
|
||||
NumberDef("chunkSize",
|
||||
minimum=1,
|
||||
maximum=50,
|
||||
decimals=0,
|
||||
default=cls.chunk_size,
|
||||
label="Frame Per Task"),
|
||||
|
||||
TextDef("group",
|
||||
default=cls.group,
|
||||
label="Group Name"),
|
||||
])
|
||||
|
||||
return defs
|
||||
|
|
|
|||
|
|
@ -422,6 +422,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
assembly_job_info.Priority = instance.data.get(
|
||||
"tile_priority", self.tile_priority
|
||||
)
|
||||
assembly_job_info.TileJob = False
|
||||
|
||||
pool = instance.context.data["project_settings"]["deadline"]
|
||||
pool = pool["publish"]["ProcessSubmittedJobOnFarm"]["deadline_pool"]
|
||||
|
|
@ -450,15 +451,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
frame_assembly_job_info.ExtraInfo[0] = file_hash
|
||||
frame_assembly_job_info.ExtraInfo[1] = file
|
||||
frame_assembly_job_info.JobDependencies = tile_job_id
|
||||
frame_assembly_job_info.Frames = frame
|
||||
|
||||
# write assembly job config files
|
||||
now = datetime.now()
|
||||
|
||||
config_file = os.path.join(
|
||||
output_dir,
|
||||
"{}_config_{}.txt".format(
|
||||
os.path.splitext(file)[0],
|
||||
now.strftime("%Y_%m_%d_%H_%M_%S")
|
||||
datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
|
||||
)
|
||||
)
|
||||
try:
|
||||
|
|
@ -469,6 +469,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
self.log.warning("Path is unreachable: "
|
||||
"`{}`".format(output_dir))
|
||||
|
||||
assembly_plugin_info["ConfigFile"] = config_file
|
||||
|
||||
with open(config_file, "w") as cf:
|
||||
print("TileCount={}".format(tiles_count), file=cf)
|
||||
print("ImageFileName={}".format(file), file=cf)
|
||||
|
|
@ -477,25 +479,30 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
print("ImageHeight={}".format(
|
||||
instance.data.get("resolutionHeight")), file=cf)
|
||||
|
||||
with open(config_file, "a") as cf:
|
||||
# Need to reverse the order of the y tiles, because image
|
||||
# coordinates are calculated from bottom left corner.
|
||||
tiles = _format_tiles(
|
||||
file, 0,
|
||||
instance.data.get("tilesX"),
|
||||
instance.data.get("tilesY"),
|
||||
instance.data.get("resolutionWidth"),
|
||||
instance.data.get("resolutionHeight"),
|
||||
payload_plugin_info["OutputFilePrefix"]
|
||||
payload_plugin_info["OutputFilePrefix"],
|
||||
reversed_y=True
|
||||
)[1]
|
||||
for k, v in sorted(tiles.items()):
|
||||
print("{}={}".format(k, v), file=cf)
|
||||
|
||||
payload = self.assemble_payload(
|
||||
job_info=frame_assembly_job_info,
|
||||
plugin_info=assembly_plugin_info.copy(),
|
||||
# todo: aux file transfers don't work with deadline webservice
|
||||
# add config file as job auxFile
|
||||
# aux_files=[config_file]
|
||||
assembly_payloads.append(
|
||||
self.assemble_payload(
|
||||
job_info=frame_assembly_job_info,
|
||||
plugin_info=assembly_plugin_info.copy(),
|
||||
# This would fail if the client machine and webserice are
|
||||
# using different storage paths.
|
||||
aux_files=[config_file]
|
||||
)
|
||||
)
|
||||
assembly_payloads.append(payload)
|
||||
|
||||
# Submit assembly jobs
|
||||
assembly_job_ids = []
|
||||
|
|
@ -505,6 +512,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
"submitting assembly job {} of {}".format(i + 1,
|
||||
num_assemblies)
|
||||
)
|
||||
self.log.info(payload)
|
||||
assembly_job_id = self.submit(payload)
|
||||
assembly_job_ids.append(assembly_job_id)
|
||||
|
||||
|
|
@ -764,8 +772,15 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline):
|
|||
|
||||
|
||||
def _format_tiles(
|
||||
filename, index, tiles_x, tiles_y,
|
||||
width, height, prefix):
|
||||
filename,
|
||||
index,
|
||||
tiles_x,
|
||||
tiles_y,
|
||||
width,
|
||||
height,
|
||||
prefix,
|
||||
reversed_y=False
|
||||
):
|
||||
"""Generate tile entries for Deadline tile job.
|
||||
|
||||
Returns two dictionaries - one that can be directly used in Deadline
|
||||
|
|
@ -802,6 +817,7 @@ def _format_tiles(
|
|||
width (int): Width resolution of final image.
|
||||
height (int): Height resolution of final image.
|
||||
prefix (str): Image prefix.
|
||||
reversed_y (bool): Reverses the order of the y tiles.
|
||||
|
||||
Returns:
|
||||
(dict, dict): Tuple of two dictionaries - first can be used to
|
||||
|
|
@ -824,12 +840,16 @@ def _format_tiles(
|
|||
cfg["TilesCropped"] = "False"
|
||||
|
||||
tile = 0
|
||||
range_y = range(1, tiles_y + 1)
|
||||
reversed_y_range = list(reversed(range_y))
|
||||
for tile_x in range(1, tiles_x + 1):
|
||||
for tile_y in reversed(range(1, tiles_y + 1)):
|
||||
for i, tile_y in enumerate(range_y):
|
||||
tile_y_index = tile_y
|
||||
if reversed_y:
|
||||
tile_y_index = reversed_y_range[i]
|
||||
|
||||
tile_prefix = "_tile_{}x{}_{}x{}_".format(
|
||||
tile_x, tile_y,
|
||||
tiles_x,
|
||||
tiles_y
|
||||
tile_x, tile_y_index, tiles_x, tiles_y
|
||||
)
|
||||
|
||||
new_filename = "{}/{}{}".format(
|
||||
|
|
@ -844,11 +864,14 @@ def _format_tiles(
|
|||
right = (tile_x * w_space) - 1
|
||||
|
||||
# Job info
|
||||
out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa: E501
|
||||
key = "OutputFilename{}".format(index)
|
||||
out["JobInfo"][key] = new_filename
|
||||
|
||||
# Plugin Info
|
||||
out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \
|
||||
"/{}".format(tile_prefix).join(prefix.rsplit("/", 1))
|
||||
key = "RegionPrefix{}".format(str(tile))
|
||||
out["PluginInfo"][key] = "/{}".format(
|
||||
tile_prefix
|
||||
).join(prefix.rsplit("/", 1))
|
||||
out["PluginInfo"]["RegionTop{}".format(tile)] = top
|
||||
out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom
|
||||
out["PluginInfo"]["RegionLeft{}".format(tile)] = left
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
label = "Submit Nuke to Deadline"
|
||||
order = pyblish.api.IntegratorOrder + 0.1
|
||||
hosts = ["nuke"]
|
||||
families = ["render", "prerender.farm"]
|
||||
families = ["render", "prerender"]
|
||||
optional = True
|
||||
targets = ["local"]
|
||||
|
||||
|
|
@ -80,6 +80,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
]
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data.get("farm"):
|
||||
self.log.info("Skipping local instance.")
|
||||
return
|
||||
|
||||
instance.data["attributeValues"] = self.get_attr_values_from_data(
|
||||
instance.data)
|
||||
|
||||
|
|
@ -168,10 +172,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin,
|
|||
resp.json()["_id"])
|
||||
|
||||
# redefinition of families
|
||||
if "render.farm" in families:
|
||||
if "render" in instance.data["family"]:
|
||||
instance.data['family'] = 'write'
|
||||
families.insert(0, "render2d")
|
||||
elif "prerender.farm" in families:
|
||||
elif "prerender" in instance.data["family"]:
|
||||
instance.data['family'] = 'write'
|
||||
families.insert(0, "prerender")
|
||||
instance.data["families"] = families
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue