diff --git a/.github/pr-branch-labeler.yml b/.github/pr-branch-labeler.yml new file mode 100644 index 0000000000..58bcbcb72a --- /dev/null +++ b/.github/pr-branch-labeler.yml @@ -0,0 +1,15 @@ +# Apply label "feature" if head matches "feature/*" +'type: feature': + head: "feature/*" + +# Apply label "feature" if head matches "feature/*" +'type: enhancement': + head: "enhancement/*" + +# Apply label "bugfix" if head matches one of "bugfix/*" or "hotfix/*" +'type: bugfix': + head: ["bugfix/*", "hotfix/*"] + +# Apply label "release" if base matches "release/*" +'Bump Minor': + base: "release/next-minor" \ No newline at end of file diff --git a/.github/pr-glob-labeler.yml b/.github/pr-glob-labeler.yml new file mode 100644 index 0000000000..f23f74f310 --- /dev/null +++ b/.github/pr-glob-labeler.yml @@ -0,0 +1,102 @@ +# Add type: unittest label if any changes in tests folders +'type: unittest': +- **/*tests*/**/* + +# any changes in documentation structure +'type: documentation': +- **/*website*/**/* +- **/*docs*/**/* + +# hosts triage +'host: Nuke': +- **/*nuke* +- **/*nuke*/**/* + +'host: Photoshop': +- **/*photoshop* +- **/*photoshop*/**/* + +'host: Harmony': +- **/*harmony* +- **/*harmony*/**/* + +'host: UE': +- **/*unreal* +- **/*unreal*/**/* + +'host: Houdini': +- **/*houdini* +- **/*houdini*/**/* + +'host: Maya': +- **/*maya* +- **/*maya*/**/* + +'host: Resolve': +- **/*resolve* +- **/*resolve*/**/* + +'host: Blender': +- **/*blender* +- **/*blender*/**/* + +'host: Hiero': +- **/*hiero* +- **/*hiero*/**/* + +'host: Fusion': +- **/*fusion* +- **/*fusion*/**/* + +'host: Flame': +- **/*flame* +- **/*flame*/**/* + +'host: TrayPublisher': +- **/*traypublisher* +- **/*traypublisher*/**/* + +'host: 3dsmax': +- **/*max* +- **/*max*/**/* + +'host: TV Paint': +- **/*tvpaint* +- **/*tvpaint*/**/* + +'host: CelAction': +- **/*celaction* +- **/*celaction*/**/* + +'host: After Effects': +- **/*aftereffects* +- **/*aftereffects*/**/* + +'host: Substance Painter': +- **/*substancepainter* +- **/*substancepainter*/**/* + +# modules triage +'module: Deadline': +- **/*deadline* +- **/*deadline*/**/* + +'module: RoyalRender': +- **/*royalrender* +- **/*royalrender*/**/* + +'module: Sitesync': +- **/*sync_server* +- **/*sync_server*/**/* + +'module: Ftrack': +- **/*ftrack* +- **/*ftrack*/**/* + +'module: Shotgrid': +- **/*shotgrid* +- **/*shotgrid*/**/* + +'module: Kitsu': +- **/*kitsu* +- **/*kitsu*/**/* diff --git a/.github/workflows/project_actions.yml b/.github/workflows/project_actions.yml new file mode 100644 index 0000000000..c66e378a5a --- /dev/null +++ b/.github/workflows/project_actions.yml @@ -0,0 +1,71 @@ +name: project-actions + +on: + pull_request: + types: [opened, synchronize, assigned, review_requested] + pull_request_review: + types: [submitted] + +jobs: + pr_review_requested: + name: pr_review_requested + runs-on: ubuntu-latest + if: github.event_name == 'pull_request_review' && github.event.review.state == 'changes_requested' + steps: + - name: Move PR to 'Change Requested' + uses: leonsteinhaeuser/project-beta-automations@v2.1.0 + with: + gh_token: ${{ secrets.YNPUT_BOT_TOKEN }} + organization: ynput + project_id: 11 + resource_node_id: ${{ github.event.pull_request.node_id }} + status_value: Change Requested + + size-label: + name: pr_size_label + runs-on: ubuntu-latest + if: | + ${{(github.event_name == 'pull_request' && github.event.action == 'synchronize') + || (github.event_name == 'pull_request' && github.event.action == 'assigned')}} + + steps: + - name: Add size label + uses: "pascalgn/size-label-action@v0.4.3" + env: + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" + IGNORED: ".gitignore\n*.md\n*.json" + with: + sizes: > + { + "0": "XS", + "100": "S", + "500": "M", + "1000": "L", + "1500": "XL", + "2500": "XXL" + } + + label_prs_branch: + name: pr_branch_label + runs-on: ubuntu-latest + if: | + ${{(github.event_name == 'pull_request' && github.event.action == 'synchronize') + || (github.event_name == 'pull_request' && github.event.action == 'opened')}} + steps: + - name: Label PRs - Branch name detection + uses: ffittschen/pr-branch-labeler@v1 + with: + repo-token: ${{ secrets.YNPUT_BOT_TOKEN }} + + label_prs_globe: + name: pr_globe_label + runs-on: ubuntu-latest + if: | + ${{(github.event_name == 'pull_request' && github.event.action == 'synchronize') + || (github.event_name == 'pull_request' && github.event.action == 'opened')}} + steps: + - name: Label PRs - Globe detection + uses: actions/labeler@v4 + with: + repo-token: ${{ secrets.YNPUT_BOT_TOKEN }} + configuration-path: ".github/pr-glob-labeler.yml" diff --git a/CHANGELOG.md b/CHANGELOG.md index 145c2e2c1a..4e22b783c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,931 @@ # Changelog +## [3.15.3](https://github.com/ynput/OpenPype/tree/3.15.3) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.2...3.15.3) + +### **πŸ†• New features** + + +
+Blender: Extract Review #3616 + +Added Review to Blender. + +This implementation is based on #3508 but made compatible for the current implementation of OpenPype for Blender. + + +___ + +
+ + +
+Data Exchanges: Point Cloud for 3dsMax #4532 + +Publish PRT format with tyFlow in 3dsmax + +Publish PRT format with tyFlow in 3dsmax and possibly set up loader to load the format too. +- [x] creator +- [x] extractor +- [x] validator +- [x] loader + + +___ + +
+ + +
+Global: persistent staging directory for renders #4583 + +Allows configure if staging directory (`stagingDir`) should be persistent with use of profiles. + +With this feature, users can specify a transient data folder path based on presets, which can be used during the creation and publishing stages. In some cases, these DCCs automatically add a rendering path during the creation stage, which is then used in publishing.One of the key advantages of this feature is that it allows users to take advantage of faster storages for rendering, which can help improve workflow efficiency. Additionally, this feature allows users to keep their rendered data persistent, and use their own infrastructure for regular cleaning.However, it should be noted that some productions may want to use this feature without persistency. Furthermore, there may be a need for retargeting the rendering folder to faster storages, which is also not supported at the moment.It is studio responsibility to clean up obsolete folders with data.Location of the folder is configured in `project_anatomy/templates/others`. ('transient' key is expected, with 'folder' key, could be more templates)Which family/task type/subset is applicable is configured in:`project_settings/global/tools/publish/transient_dir_profiles` + + +___ + +
+ + +
+Kitsu custom comment template #4599 + +Kitsu allows to write markdown in its comment field. This can be something very powerful to deliver dynamic comments with the help the data from the instance.This feature is defaults to off so the admin have to manually set up the comment field the way they want.I have added a basic example on how the comment can look like as the comment-fields default value.To this I want to add some documentation also but that's on its way when the code itself looks good for the reviewers. + + +___ + +
+ + +
+MaxScene Family #4615 + +Introduction of the Max Scene Family + + +___ + +
+ +### **πŸš€ Enhancements** + + +
+Maya: Multiple values on single render attribute - OP-4131 #4631 + +When validating render attributes, this adds support for multiple values. When repairing first value in list is used. + + +___ + +
+ + +
+Maya: enable 2D Pan/Zoom for playblasts - OP-5213 #4687 + +Setting for enabling 2D Pan/Zoom on reviews. + + +___ + +
+ + +
+Copy existing or generate new Fusion profile on prelaunch #4572 + +Fusion preferences will be copied to the predefined `~/.openpype/hosts/fusion/prefs` folder (or any other folder set in system settings) on launch. + +The idea is to create a copy of existing Fusion profile, adding an OpenPype menu to the Fusion instance.By default the copy setting is turned off, so no file copying is performed. Instead the clean Fusion profile is created by Fusion in the predefined folder. The default locaion is set to `~/.openpype/hosts/fusion/prefs`, to better comply with the other os platforms. After creating the default profile, some modifications are applied: +- forced Python3 +- forced English interface +- setup Openpype specific path maps.If the `copy_prefs` checkbox is toggled, a copy of existing Fusion profile folder will be placed in the mentioned location. Then they are altered the same way as described above. The operation is run only once, on the first launch, unless the `force_sync [Resync profile on each launch]` is toggled.English interface is forced because the `FUSION16_PROFILE_DIR` environment variable is not read otherwise (seems to be a Fusion bug). + + +___ + +
+ + +
+Houdini: Create button open new publisher's "create" tab #4601 + +During a talk with @maxpareschi he mentioned that the new publisher in Houdini felt super confusing due to "Create" going to the older creator but now being completely empty and the publish button directly went to the publish tab.This resolves that by fixing the Create button to now open the new publisher but on the Create tab.Also made publish button enforce going to the "publish" tab for consistency in usage.@antirotor I think changing the Create button's callback was just missed in this commit or was there a specific reason to not change that around yet? + + +___ + +
+ + +
+Clockify: refresh and fix the integration #4607 + +Due to recent API changes, Clockify requires `user_id` to operate with the timers. I updated this part and currently it is a WIP for making it fully functional. Most functions, such as start and stop timer, and projects sync are currently working. For the rate limiting task new dependency is added: https://pypi.org/project/ratelimiter/ + + +___ + +
+ + +
+Fusion publish existing frames #4611 + +This PR adds the function to publish existing frames instead of having to re-render all of them for each new publish.I have split the render_locally plugin so the review-part is its own plugin now.I also change the saver-creator-plugin's label from Saver to Render (saver) as I intend to add a Prerender creator like in Nuke. + + +___ + +
+ + +
+Resolution settings referenced from DB record for 3dsMax #4652 + +- Add Callback for setting the resolution according to DB after the new scene is created. +- Add a new Action into openpype menu which allows the user to reset the resolution in 3dsMax + + +___ + +
+ + +
+3dsmax: render instance settings in Publish tab #4658 + +Allows user preset the pools, group and use_published settings in Render Creator in the Max Hosts.User can set the settings before or after creating instance in the new publisher + + +___ + +
+ + +
+scene length setting referenced from DB record for 3dsMax #4665 + +Setting the timeline length based on DB record in 3dsMax Hosts + + +___ + +
+ + +
+Publisher: Windows reduce command window pop-ups during Publishing #4672 + +Reduce the command line pop-ups that show on Windows during publishing. + + +___ + +
+ + +
+Publisher: Explicit save #4676 + +Publisher have explicit button to save changes, so reset can happen without saving any changes. Save still happens automatically when publishing is started or on publisher window close. But a popup is shown if context of host has changed. Important context was enhanced by workfile path (if host integration supports it) so workfile changes are captured too. In that case a dialog with confirmation is shown to user. All callbacks that may require save of context were moved to main window to be able handle dialog show at one place. Save changes now returns success so the rest of logic is skipped -> publishing won't start, when save of instances fails.Save and reset buttons have shortcuts (Ctrl + s and Ctrls + r). + + +___ + +
+ + +
+CelAction: conditional workfile parameters from settings #4677 + +Since some productions were requesting excluding some workfile parameters from publishing submission, we needed to move them to settings so those could be altered per project. + + +___ + +
+ + +
+Improve logging of used app + tool envs on application launch #4682 + +Improve logging of what apps + tool environments got loaded for an application launch. + + +___ + +
+ + +
+Fix name and docstring for Create Workdir Extra Folders prelaunch hook #4683 + +Fix class name and docstring for Create Workdir Extra Folders prelaunch hookThe class name and docstring were originally copied from another plug-in and didn't match the plug-in logic.This also fixes potentially seeing this twice in your logs. Before:After:Where it was actually running both this prelaunch hook and the actual `AddLastWorkfileToLaunchArgs` plugin. + + +___ + +
+ + +
+Application launch context: Include app group name in logger #4684 + +Clarify in logs better what app group the ApplicationLaunchContext belongs to and what application is being launched.Before:After: + + +___ + +
+ + +
+increment workfile version 3dsmax #4685 + +increment workfile version in 3dsmax as if in blender and maya hosts. + + +___ + +
+ +### **πŸ› Bug fixes** + + +
+Maya: Fix getting non-active model panel. #2968 + +When capturing multiple cameras with image planes that have file sequences playing, only the active (first) camera will play through the file sequence. + + +___ + +
+ + +
+Maya: Fix broken review publishing. #4549 + +Resolves #4547 + + +___ + +
+ + +
+Maya: Avoid error on right click in Loader if `mtoa` is not loaded #4616 + +Fix an error on right clicking in the Loader when `mtoa` is not a loaded plug-in.Additionally if `mtoa` isn't loaded the loader will now load the plug-in before trying to create the arnold standin. + + +___ + +
+ + +
+Maya: Fix extract look colorspace detection #4618 + +Fix the logic which guesses the colorspace using `arnold` python library. +- Previously it'd error if `mtoa` was not available on path so it still required `mtoa` to be available. +- The guessing colorspace logic doesn't actually require `mtoa` to be loaded, but just the `arnold` python library to be available. This changes the logic so it doesn't require the `mtoa` plugin to get loaded to guess the colorspace. +- The if/else branch was likely not doing what was intended `cmds.loadPlugin("mtoa", quiet=True)` returns None if the plug-in was already loaded. So this would only ever be true if it ends up loading the `mtoa` plugin the first time. +```python +# Tested in Maya 2022.1 +print(cmds.loadPlugin("mtoa", quiet=True)) +# ['mtoa'] +print(cmds.loadPlugin("mtoa", quiet=True)) +# None +``` + + +___ + +
+ + +
+Maya: Maya Playblast Options overrides - OP-3847 #4634 + +When publishing a review in Maya, the extractor would fail due to wrong (long) panel name. + + +___ + +
+ + +
+Bugfix/op 2834 fix extract playblast #4701 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Bugfix/op 2834 fix extract playblast #4704 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Maya: bug fix for passing zoom settings if review is attached to subset #4716 + +Fix for attaching review to subset with pan/zoom option. + + +___ + +
+ + +
+Maya: tile assembly fail in draft - OP-4820 #4416 + +Tile assembly in Deadline was broken. + +Initial bug report revealed other areas of the tile assembly that needed fixing. + + +___ + +
+ + +
+Maya: Yeti Validate Rig Input - OP-3454 #4554 + +Fix Yeti Validate Rig Input + +Existing workflow was broken due to this #3297. + + +___ + +
+ + +
+Scene inventory: Fix code errors when "not found" entries are found #4594 + +Whenever a "NOT FOUND" entry is present a lot of errors happened in the Scene Inventory: +- It started spamming a lot of errors for the VersionDelegate since it had no numeric version (no version at all).Error reported on Discord: +```python +Traceback (most recent call last): + File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 65, in paint + text = self.displayText( + File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 33, in displayText + assert isinstance(value, numbers.Integral), ( +AssertionError: Version is not integer. "None" +``` +- Right click menu would error on NOT FOUND entries, and thus not show. With this PR it will now _disregard_ not found items for "Set version" and "Remove" but still allow actions.This PR resolves those. + + +___ + +
+ + +
+Kitsu: Sync OP with zou, make sure value-data is int or float #4596 + +Currently the data zou pulls is a string and not a value causing some bugs in the pipe where a value is expected (like `Set frame range` in Fusion). + + + +This PR makes sure each value is set with int() or float() so these bugs can't happen later on. + + + +_(A request to cgwire has also bin sent to allow force values only for some metadata columns, but currently the user can enter what ever they want in there)_ + + +___ + +
+ + +
+Max: fix the bug of removing an instance #4617 + +fix the bug of removing an instance in 3dsMax + + +___ + +
+ + +
+Global | Nuke: fixing farm publishing workflow #4623 + +After Nuke had adopted new publisher with new creators new issues were introduced. Those issues were addressed with this PR. Those are for example broken reviewable video files publishing if published via farm. Also fixed local publishing. + + +___ + +
+ + +
+Ftrack: Ftrack additional families filtering #4633 + +Ftrack family collector makes sure the subset family is also in instance families for additional families filtering. + + +___ + +
+ + +
+Ftrack: Hierarchical <> Non-Hierarchical attributes sync fix #4635 + +Sync between hierarchical and non-hierarchical attributes should be fixed and work as expected. Action should sync the values as expected and event handler should do it too and only on newly created entities. + + +___ + +
+ + +
+bugfix for 3dsmax publishing error #4637 + +fix the bug of failing publishing job in 3dsMax + + +___ + +
+ + +
+General: Use right validation for ffmpeg executable #4640 + +Use ffmpeg exec validation for ffmpeg executables instead of oiio exec validation. The validation is used as last possible source of ffmpeg from `PATH` environment variables, which is an edge case but can cause issues. + + +___ + +
+ + +
+3dsmax: opening last workfile #4644 + +Supports opening last saved workfile in 3dsmax host. + + +___ + +
+ + +
+Fixed a bug where a QThread in the splash screen could be destroyed before finishing execution #4647 + +This should fix the occasional behavior of the QThread being destroyed before even its worker returns from the `run()` function.After quiting, it should wait for the QThread object to properly close itself. + + +___ + +
+ + +
+General: Use right plugin class for Collect Comment #4653 + +Collect Comment plugin is instance plugin so should inherit from `InstancePlugin` instead of `ContextPlugin`. + + +___ + +
+ + +
+Global: add tags field to thumbnail representation #4660 + +Thumbnail representation might be missing tags field. + + +___ + +
+ + +
+Integrator: Enforce unique destination transfers, disallow overwrites in queued transfers #4662 + +Fix #4656 by enforcing unique destination transfers in the Integrator. It's now disallowed to a destination in the file transaction queue with a new source path during the publish. + + +___ + +
+ + +
+Hiero: Creator with correct workfile numeric padding input #4666 + +Creator was showing 99 in workfile input for long time, even if users set default value to 1001 in studio settings. This has been fixed now. + + +___ + +
+ + +
+Nuke: Nukenodes family instance without frame range #4669 + +No need to add frame range data into `nukenodes` (backdrop) family publishes - since those are timeless. + + +___ + +
+ + +
+TVPaint: Optional Validation plugins can be de/activated by user #4674 + +Added `OptionalPyblishPluginMixin` to TVpaint plugins that can be optional. + + +___ + +
+ + +
+Kitsu: Slightly less strict with instance data #4678 + +- Allow to take task name from context if asset doesn't have any. Fixes an issue with Photoshop's review instance not having `task` in data. +- Allow to match "review" against both `instance.data["family"]` and `instance.data["families"]` because some instances don't have the primary family in families, e.g. in Photoshop and TVPaint. +- Do not error on Integrate Kitsu Review whenever for whatever reason Integrate Kitsu Note did not created a comment but just log the message that it was unable to connect a review. + + +___ + +
+ + +
+Publisher: Fix reset shortcut sequence #4694 + +Fix bug created in https://github.com/ynput/OpenPype/pull/4676 where key sequence is checked using unsupported method. The check was changed to convert event into `QKeySequence` object which can be compared to prepared sequence. + + +___ + +
+ + +
+Refactor _capture #4702 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Hiero: correct container colors if UpToDate #4708 + +Colors on loaded containers are now correctly identifying real state of version. `Red` for out of date and `green` for up to date. + + +___ + +
+ +### **πŸ”€ Refactored code** + + +
+Look Assigner: Move Look Assigner tool since it's Maya only #4604 + +Fix #4357: Move Look Assigner tool to maya since it's Maya only + + +___ + +
+ + +
+Maya: Remove unused functions from Extract Look #4671 + +Remove unused functions from Maya Extract Look plug-in + + +___ + +
+ + +
+Extract Review code refactor #3930 + +Trying to reduce complexity of Extract Review plug-in +- Re-use profile filtering from lib +- Remove "combination families" additional filtering which supposedly was from OP v2 +- Simplify 'formatting' for filling gaps +- Use `legacy_io.Session` over `os.environ` + + +___ + +
+ + +
+Maya: Replace last usages of Qt module #4610 + +Replace last usage of `Qt` module with `qtpy`. This change is needed for `PySide6` support. All changes happened in Maya loader plugins. + + +___ + +
+ + +
+Update tests and documentation for `ColormanagedPyblishPluginMixin` #4612 + +Refactor `ExtractorColormanaged` to `ColormanagedPyblishPluginMixin` in tests and documentation. + + +___ + +
+ + +
+Improve logging of used app + tool envs on application launch (minor tweak) #4686 + +Use `app.full_name` for change done in #4682 + + +___ + +
+ +### **πŸ“ƒ Documentation** + + +
+Docs/add architecture document #4344 + +Add `ARCHITECTURE.md` document. + +his document attemps to give a quick overview of the project to help onboarding, it's not an extensive documentation but more of a elevator pitch one-line descriptions of files/directories and what the attempt to do. + + +___ + +
+ + +
+Documentation: Tweak grammar and fix some typos #4613 + +This resolves some grammar and typos in the documentation.Also fixes the extension of some images in after effects docs which used uppercase extension even though files were lowercase extension. + + +___ + +
+ + +
+Docs: Fix some minor grammar/typos #4680 + +Typo/grammar fixes in documentation. + + +___ + +
+ +### **Merged pull requests** + + +
+Maya: Implement image file node loader #4313 + +Implements a loader for loading texture image into a `file` node in Maya. + +Similar to Maya's hypershade creation of textures on load you have the option to choose for three modes of creating: +- Texture +- Projection +- StencilThese should match what Maya generates if you create those in Maya. +- [x] Load and manage file nodes +- [x] Apply color spaces after #4195 +- [x] Support for _either_ UDIM or image sequence - currently it seems to always load sequences as UDIM automatically. +- [ ] Add support for animation sequences of UDIM textures using the `..exr` path format? + + +___ + +
+ + +
+Maya Look Assigner: Don't rely on containers for get all assets #4600 + +This resolves #4044 by not actually relying on containers in the scene but instead just rely on finding nodes with `cbId` attributes. As such, imported nodes would also be found and a shader can be assigned (similar to when using get from selection).**Please take into consideration the potential downsides below**Potential downsides would be: +- IF an already loaded look has any dagNodes, say a 3D Projection node - then that will also show up as a loaded asset where previously nodes from loaded looks were ignored. +- If any dag nodes were created locally - they would have gotten `cbId` attributes on scene save and thus the current asset would almost always show? + + +___ + +
+ + +
+Maya: Unify menu labels for "Set Frame Range" and "Set Resolution" #4605 + +Fix #4109: Unify menu labels for "Set Frame Range" and "Set Resolution"This also tweaks it in Houdini from Reset Frame Range to Set Frame Range. + + +___ + +
+ + +
+Resolve missing OPENPYPE_MONGO in deadline global job preload #4484 + +In the GlobalJobPreLoad plugin, we propose to replace the SpawnProcess by a sub-process and to pass the environment variables in the parameters, since the SpawnProcess under Centos Linux does not pass the environment variables. + +In the GlobalJobPreLoad plugin, the Deadline SpawnProcess is used to start the OpenPype process. The problem is that the SpawnProcess does not pass environment variables, including OPENPYPE_MONGO, to the process when it is under Centos7 linux, and the process gets stuck. We propose to replace it by a subprocess and to pass the variable in the parameters. + + +___ + +
+ + +
+Tests: Added setup_only to tests #4591 + +Allows to download test zip, unzip and restore DB in preparation for new test. + + +___ + +
+ + +
+Maya: Arnold don't reset maya timeline frame range on render creation (or setting render settings) #4603 + +Fix #4429: Do not reset fps or playback timeline on applying or creating render settings + + +___ + +
+ + +
+Bump @sideway/formula from 3.0.0 to 3.0.1 in /website #4609 + +Bumps [@sideway/formula](https://github.com/sideway/formula) from 3.0.0 to 3.0.1. +
+Commits + +
+
+Maintainer changes +

This version was pushed to npm by marsup, a new releaser for @​sideway/formula since your current version.

+
+
+ + +[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@sideway/formula&package-manager=npm_and_yarn&previous-version=3.0.0&new-version=3.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) + +Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. + +[//]: # (dependabot-automerge-start) +[//]: # (dependabot-automerge-end) + +--- + +
+Dependabot commands and options +
+ +You can trigger Dependabot actions by commenting on this PR: +- `@dependabot rebase` will rebase this PR +- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it +- `@dependabot merge` will merge this PR after your CI passes on it +- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it +- `@dependabot cancel merge` will cancel a previously requested merge and block automerging +- `@dependabot reopen` will reopen this PR if it is closed +- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually +- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) +- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language +- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language +- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language +- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language + +You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). + +
+___ + +
+ + +
+Update artist_hosts_maya_arnold.md #4626 + +Correct Arnold docs. +___ + +
+ + +
+Maya: Add "Include Parent Hierarchy" option in animation creator plugin #4645 + +Add an option in Project Settings > Maya > Creator Plugins > Create Animation to include (or not) parent hierarchy. This is to avoid artists to check manually the option for all create animation. + + +___ + +
+ + +
+General: Filter available applications #4667 + +Added option to filter applications that don't have valid executable available in settings in launcher and ftrack actions. This option can be disabled in new settings category `Applications`. The filtering is by default disabled. + + +___ + +
+ + +
+3dsmax: make sure that startup script executes #4695 + +Fixing reliability of OpenPype startup in 3dsmax. + + +___ + +
+ + +
+Project Manager: Change minimum frame start/end to '0' #4719 + +Project manager can have frame start/end set to `0`. + + +___ + +
+ + + +## [3.15.2](https://github.com/ynput/OpenPype/tree/3.15.2) [Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.1...3.15.2) diff --git a/openpype/hooks/pre_foundry_apps.py b/openpype/hooks/pre_foundry_apps.py index 2092d5025d..21ec8e7881 100644 --- a/openpype/hooks/pre_foundry_apps.py +++ b/openpype/hooks/pre_foundry_apps.py @@ -7,7 +7,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook): Nuke is executed "like" python process so it is required to pass `CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console. - At the same time the newly created console won't create it's own stdout + At the same time the newly created console won't create its own stdout and stderr handlers so they should not be redirected to DEVNULL. """ @@ -18,7 +18,7 @@ class LaunchFoundryAppsWindows(PreLaunchHook): def execute(self): # Change `creationflags` to CREATE_NEW_CONSOLE - # - on Windows will nuke create new window using it's console + # - on Windows nuke will create new window using its console # Set `stdout` and `stderr` to None so new created console does not # have redirected output to DEVNULL in build self.launch_context.kwargs.update({ diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index b1fa13acb9..158c32fb5a 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -84,11 +84,11 @@ class MainThreadItem: self.kwargs = kwargs def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ print("Executing process in main thread") if self.done: diff --git a/openpype/hosts/fusion/api/action.py b/openpype/hosts/fusion/api/action.py index 1750920950..ff5dd14caa 100644 --- a/openpype/hosts/fusion/api/action.py +++ b/openpype/hosts/fusion/api/action.py @@ -6,12 +6,13 @@ from openpype.pipeline.publish import get_errored_instances_from_context class SelectInvalidAction(pyblish.api.Action): - """Select invalid nodes in Maya when plug-in failed. + """Select invalid nodes in Fusion when plug-in failed. To retrieve the invalid nodes this assumes a static `get_invalid()` method is available on the plugin. """ + label = "Select invalid" on = "failed" # This action is only available on a failed plug-in icon = "search" # Icon from Awesome Icon @@ -31,8 +32,10 @@ class SelectInvalidAction(pyblish.api.Action): if isinstance(invalid_nodes, (list, tuple)): invalid.extend(invalid_nodes) else: - self.log.warning("Plug-in returned to be invalid, " - "but has no selectable nodes.") + self.log.warning( + "Plug-in returned to be invalid, " + "but has no selectable nodes." + ) if not invalid: # Assume relevant comp is current comp and clear selection @@ -51,4 +54,6 @@ class SelectInvalidAction(pyblish.api.Action): for tool in invalid: flow.Select(tool, True) names.add(tool.Name) - self.log.info("Selecting invalid tools: %s" % ", ".join(sorted(names))) + self.log.info( + "Selecting invalid tools: %s" % ", ".join(sorted(names)) + ) diff --git a/openpype/hosts/fusion/api/menu.py b/openpype/hosts/fusion/api/menu.py index 343f5f803a..92f38a64c2 100644 --- a/openpype/hosts/fusion/api/menu.py +++ b/openpype/hosts/fusion/api/menu.py @@ -6,7 +6,6 @@ from openpype.tools.utils import host_tools from openpype.style import load_stylesheet from openpype.lib import register_event_callback from openpype.hosts.fusion.scripts import ( - set_rendermode, duplicate_with_inputs, ) from openpype.hosts.fusion.api.lib import ( @@ -60,7 +59,6 @@ class OpenPypeMenu(QtWidgets.QWidget): publish_btn = QtWidgets.QPushButton("Publish...", self) manager_btn = QtWidgets.QPushButton("Manage...", self) libload_btn = QtWidgets.QPushButton("Library...", self) - rendermode_btn = QtWidgets.QPushButton("Set render mode...", self) set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self) set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self) duplicate_with_inputs_btn = QtWidgets.QPushButton( @@ -91,7 +89,6 @@ class OpenPypeMenu(QtWidgets.QWidget): layout.addWidget(set_framerange_btn) layout.addWidget(set_resolution_btn) - layout.addWidget(rendermode_btn) layout.addSpacing(20) @@ -108,7 +105,6 @@ class OpenPypeMenu(QtWidgets.QWidget): load_btn.clicked.connect(self.on_load_clicked) manager_btn.clicked.connect(self.on_manager_clicked) libload_btn.clicked.connect(self.on_libload_clicked) - rendermode_btn.clicked.connect(self.on_rendermode_clicked) duplicate_with_inputs_btn.clicked.connect( self.on_duplicate_with_inputs_clicked ) @@ -162,15 +158,6 @@ class OpenPypeMenu(QtWidgets.QWidget): def on_libload_clicked(self): host_tools.show_library_loader() - def on_rendermode_clicked(self): - if self.render_mode_widget is None: - window = set_rendermode.SetRenderMode() - window.setStyleSheet(load_stylesheet()) - window.show() - self.render_mode_widget = window - else: - self.render_mode_widget.show() - def on_duplicate_with_inputs_clicked(self): duplicate_with_inputs.duplicate_with_input_connections() diff --git a/openpype/hosts/fusion/plugins/create/create_saver.py b/openpype/hosts/fusion/plugins/create/create_saver.py index e581bac20f..56085b0a06 100644 --- a/openpype/hosts/fusion/plugins/create/create_saver.py +++ b/openpype/hosts/fusion/plugins/create/create_saver.py @@ -4,29 +4,34 @@ import qtawesome from openpype.hosts.fusion.api import ( get_current_comp, - comp_lock_and_undo_chunk + comp_lock_and_undo_chunk, ) -from openpype.lib import BoolDef +from openpype.lib import ( + BoolDef, + EnumDef, +) from openpype.pipeline import ( legacy_io, Creator, - CreatedInstance + CreatedInstance, +) +from openpype.client import ( + get_asset_by_name, ) -from openpype.client import get_asset_by_name class CreateSaver(Creator): identifier = "io.openpype.creators.fusion.saver" - name = "saver" - label = "Saver" + label = "Render (saver)" + name = "render" family = "render" - default_variants = ["Main"] - + default_variants = ["Main", "Mask"] description = "Fusion Saver to generate image sequence" - def create(self, subset_name, instance_data, pre_create_data): + instance_attributes = ["reviewable"] + def create(self, subset_name, instance_data, pre_create_data): # TODO: Add pre_create attributes to choose file format? file_format = "OpenEXRFormat" @@ -58,7 +63,8 @@ class CreateSaver(Creator): family=self.family, subset_name=subset_name, data=instance_data, - creator=self) + creator=self, + ) # Insert the transient data instance.transient_data["tool"] = saver @@ -68,11 +74,9 @@ class CreateSaver(Creator): return instance def collect_instances(self): - comp = get_current_comp() tools = comp.GetToolList(False, "Saver").values() for tool in tools: - data = self.get_managed_tool_data(tool) if not data: data = self._collect_unmanaged_saver(tool) @@ -90,7 +94,6 @@ class CreateSaver(Creator): def update_instances(self, update_list): for created_inst, _changes in update_list: - new_data = created_inst.data_to_store() tool = created_inst.transient_data["tool"] self._update_tool_with_data(tool, new_data) @@ -139,7 +142,6 @@ class CreateSaver(Creator): tool.SetAttrs({"TOOLS_Name": subset}) def _collect_unmanaged_saver(self, tool): - # TODO: this should not be done this way - this should actually # get the data as stored on the tool explicitly (however) # that would disallow any 'regular saver' to be collected @@ -153,8 +155,7 @@ class CreateSaver(Creator): asset = legacy_io.Session["AVALON_ASSET"] task = legacy_io.Session["AVALON_TASK"] - asset_doc = get_asset_by_name(project_name=project, - asset_name=asset) + asset_doc = get_asset_by_name(project_name=project, asset_name=asset) path = tool["Clip"][comp.TIME_UNDEFINED] fname = os.path.basename(path) @@ -178,21 +179,20 @@ class CreateSaver(Creator): "variant": variant, "active": not passthrough, "family": self.family, - # Unique identifier for instance and this creator "id": "pyblish.avalon.instance", - "creator_identifier": self.identifier + "creator_identifier": self.identifier, } def get_managed_tool_data(self, tool): """Return data of the tool if it matches creator identifier""" - data = tool.GetData('openpype') + data = tool.GetData("openpype") if not isinstance(data, dict): return required = { "id": "pyblish.avalon.instance", - "creator_identifier": self.identifier + "creator_identifier": self.identifier, } for key, value in required.items(): if key not in data or data[key] != value: @@ -205,11 +205,40 @@ class CreateSaver(Creator): return data - def get_instance_attr_defs(self): - return [ - BoolDef( - "review", - default=True, - label="Review" - ) + def get_pre_create_attr_defs(self): + """Settings for create page""" + attr_defs = [ + self._get_render_target_enum(), + self._get_reviewable_bool(), ] + return attr_defs + + def get_instance_attr_defs(self): + """Settings for publish page""" + attr_defs = [ + self._get_render_target_enum(), + self._get_reviewable_bool(), + ] + return attr_defs + + # These functions below should be moved to another file + # so it can be used by other plugins. plugin.py ? + + def _get_render_target_enum(self): + rendering_targets = { + "local": "Local machine rendering", + "frames": "Use existing frames", + } + if "farm_rendering" in self.instance_attributes: + rendering_targets["farm"] = "Farm rendering" + + return EnumDef( + "render_target", items=rendering_targets, label="Render target" + ) + + def _get_reviewable_bool(self): + return BoolDef( + "review", + default=("reviewable" in self.instance_attributes), + label="Review", + ) diff --git a/openpype/hosts/fusion/plugins/publish/collect_expected_frames.py b/openpype/hosts/fusion/plugins/publish/collect_expected_frames.py new file mode 100644 index 0000000000..0ba777629f --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_expected_frames.py @@ -0,0 +1,50 @@ +import pyblish.api +from openpype.pipeline import publish +import os + + +class CollectFusionExpectedFrames( + pyblish.api.InstancePlugin, publish.ColormanagedPyblishPluginMixin +): + """Collect all frames needed to publish expected frames""" + + order = pyblish.api.CollectorOrder + 0.5 + label = "Collect Expected Frames" + hosts = ["fusion"] + families = ["render"] + + def process(self, instance): + context = instance.context + + frame_start = context.data["frameStartHandle"] + frame_end = context.data["frameEndHandle"] + path = instance.data["path"] + output_dir = instance.data["outputDir"] + + basename = os.path.basename(path) + head, ext = os.path.splitext(basename) + files = [ + f"{head}{str(frame).zfill(4)}{ext}" + for frame in range(frame_start, frame_end + 1) + ] + repre = { + "name": ext[1:], + "ext": ext[1:], + "frameStart": f"%0{len(str(frame_end))}d" % frame_start, + "files": files, + "stagingDir": output_dir, + } + + self.set_representation_colorspace( + representation=repre, + context=context, + ) + + # review representation + if instance.data.get("review", False): + repre["tags"] = ["review"] + + # add the repre to the instance + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(repre) diff --git a/openpype/hosts/fusion/plugins/publish/collect_render_target.py b/openpype/hosts/fusion/plugins/publish/collect_render_target.py deleted file mode 100644 index 39017f32e0..0000000000 --- a/openpype/hosts/fusion/plugins/publish/collect_render_target.py +++ /dev/null @@ -1,44 +0,0 @@ -import pyblish.api - - -class CollectFusionRenderMode(pyblish.api.InstancePlugin): - """Collect current comp's render Mode - - Options: - local - farm - - Note that this value is set for each comp separately. When you save the - comp this information will be stored in that file. If for some reason the - available tool does not visualize which render mode is set for the - current comp, please run the following line in the console (Py2) - - comp.GetData("openpype.rendermode") - - This will return the name of the current render mode as seen above under - Options. - - """ - - order = pyblish.api.CollectorOrder + 0.4 - label = "Collect Render Mode" - hosts = ["fusion"] - families = ["render"] - - def process(self, instance): - """Collect all image sequence tools""" - options = ["local", "farm"] - - comp = instance.context.data.get("currentComp") - if not comp: - raise RuntimeError("No comp previously collected, unable to " - "retrieve Fusion version.") - - rendermode = comp.GetData("openpype.rendermode") or "local" - assert rendermode in options, "Must be supported render mode" - - self.log.info("Render mode: {0}".format(rendermode)) - - # Append family - family = "render.{0}".format(rendermode) - instance.data["families"].append(family) diff --git a/openpype/hosts/fusion/plugins/publish/collect_renders.py b/openpype/hosts/fusion/plugins/publish/collect_renders.py new file mode 100644 index 0000000000..7f38e68447 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_renders.py @@ -0,0 +1,25 @@ +import pyblish.api + + +class CollectFusionRenders(pyblish.api.InstancePlugin): + """Collect current saver node's render Mode + + Options: + local (Render locally) + frames (Use existing frames) + + """ + + order = pyblish.api.CollectorOrder + 0.4 + label = "Collect Renders" + hosts = ["fusion"] + families = ["render"] + + def process(self, instance): + render_target = instance.data["render_target"] + family = instance.data["family"] + + # add targeted family to families + instance.data["families"].append( + "{}.{}".format(family, render_target) + ) diff --git a/openpype/hosts/fusion/plugins/publish/extract_render_local.py b/openpype/hosts/fusion/plugins/publish/extract_render_local.py new file mode 100644 index 0000000000..5a0140c525 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/extract_render_local.py @@ -0,0 +1,109 @@ +import logging +import contextlib +import pyblish.api +from openpype.hosts.fusion.api import comp_lock_and_undo_chunk + + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def enabled_savers(comp, savers): + """Enable only the `savers` in Comp during the context. + + Any Saver tool in the passed composition that is not in the savers list + will be set to passthrough during the context. + + Args: + comp (object): Fusion composition object. + savers (list): List of Saver tool objects. + + """ + passthrough_key = "TOOLB_PassThrough" + original_states = {} + enabled_save_names = {saver.Name for saver in savers} + try: + all_savers = comp.GetToolList(False, "Saver").values() + for saver in all_savers: + original_state = saver.GetAttrs()[passthrough_key] + original_states[saver] = original_state + + # The passthrough state we want to set (passthrough != enabled) + state = saver.Name not in enabled_save_names + if state != original_state: + saver.SetAttrs({passthrough_key: state}) + yield + finally: + for saver, original_state in original_states.items(): + saver.SetAttrs({"TOOLB_PassThrough": original_state}) + + +class FusionRenderLocal(pyblish.api.InstancePlugin): + """Render the current Fusion composition locally.""" + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Render Local" + hosts = ["fusion"] + families = ["render.local"] + + def process(self, instance): + context = instance.context + + # Start render + self.render_once(context) + + # Log render status + self.log.info( + "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( + nm=instance.data["name"], + ast=instance.data["asset"], + tsk=instance.data["task"], + ) + ) + + def render_once(self, context): + """Render context comp only once, even with more render instances""" + + # This plug-in assumes all render nodes get rendered at the same time + # to speed up the rendering. The check below makes sure that we only + # execute the rendering once and not for each instance. + key = f"__hasRun{self.__class__.__name__}" + + savers_to_render = [ + # Get the saver tool from the instance + instance[0] for instance in context if + # Only active instances + instance.data.get("publish", True) and + # Only render.local instances + "render.local" in instance.data["families"] + ] + + if key not in context.data: + # We initialize as false to indicate it wasn't successful yet + # so we can keep track of whether Fusion succeeded + context.data[key] = False + + current_comp = context.data["currentComp"] + frame_start = context.data["frameStartHandle"] + frame_end = context.data["frameEndHandle"] + + self.log.info("Starting Fusion render") + self.log.info(f"Start frame: {frame_start}") + self.log.info(f"End frame: {frame_end}") + saver_names = ", ".join(saver.Name for saver in savers_to_render) + self.log.info(f"Rendering tools: {saver_names}") + + with comp_lock_and_undo_chunk(current_comp): + with enabled_savers(current_comp, savers_to_render): + result = current_comp.Render( + { + "Start": frame_start, + "End": frame_end, + "Wait": True, + } + ) + + context.data[key] = bool(result) + + if context.data[key] is False: + raise RuntimeError("Comp render failed") diff --git a/openpype/hosts/fusion/plugins/publish/render_local.py b/openpype/hosts/fusion/plugins/publish/render_local.py deleted file mode 100644 index 7d5f1a40c7..0000000000 --- a/openpype/hosts/fusion/plugins/publish/render_local.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline import publish -from openpype.hosts.fusion.api import comp_lock_and_undo_chunk - - -class Fusionlocal(pyblish.api.InstancePlugin, - publish.ColormanagedPyblishPluginMixin): - """Render the current Fusion composition locally. - - Extract the result of savers by starting a comp render - This will run the local render of Fusion. - - """ - - order = pyblish.api.ExtractorOrder - 0.1 - label = "Render Local" - hosts = ["fusion"] - families = ["render.local"] - - def process(self, instance): - context = instance.context - - # Start render - self.render_once(context) - - # Log render status - self.log.info( - "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( - nm=instance.data["name"], - ast=instance.data["asset"], - tsk=instance.data["task"], - ) - ) - - frame_start = context.data["frameStartHandle"] - frame_end = context.data["frameEndHandle"] - path = instance.data["path"] - output_dir = instance.data["outputDir"] - - basename = os.path.basename(path) - head, ext = os.path.splitext(basename) - files = [ - f"{head}{str(frame).zfill(4)}{ext}" - for frame in range(frame_start, frame_end + 1) - ] - repre = { - "name": ext[1:], - "ext": ext[1:], - "frameStart": f"%0{len(str(frame_end))}d" % frame_start, - "files": files, - "stagingDir": output_dir, - } - - self.set_representation_colorspace( - representation=repre, - context=context, - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - instance.data["representations"].append(repre) - - # review representation - if instance.data.get("review", False): - repre["tags"] = ["review", "ftrackreview"] - - def render_once(self, context): - """Render context comp only once, even with more render instances""" - - # This plug-in assumes all render nodes get rendered at the same time - # to speed up the rendering. The check below makes sure that we only - # execute the rendering once and not for each instance. - key = f"__hasRun{self.__class__.__name__}" - if key not in context.data: - # We initialize as false to indicate it wasn't successful yet - # so we can keep track of whether Fusion succeeded - context.data[key] = False - - current_comp = context.data["currentComp"] - frame_start = context.data["frameStartHandle"] - frame_end = context.data["frameEndHandle"] - - self.log.info("Starting Fusion render") - self.log.info(f"Start frame: {frame_start}") - self.log.info(f"End frame: {frame_end}") - - with comp_lock_and_undo_chunk(current_comp): - result = current_comp.Render( - { - "Start": frame_start, - "End": frame_end, - "Wait": True, - } - ) - - context.data[key] = bool(result) - - if context.data[key] is False: - raise RuntimeError("Comp render failed") diff --git a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py index ba943abacb..8a91f23578 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py +++ b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py @@ -14,22 +14,19 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): """ order = pyblish.api.ValidatorOrder - actions = [RepairAction] label = "Validate Create Folder Checked" families = ["render"] hosts = ["fusion"] - actions = [SelectInvalidAction] + actions = [RepairAction, SelectInvalidAction] @classmethod def get_invalid(cls, instance): - active = instance.data.get("active", instance.data.get("publish")) - if not active: - return [] - tool = instance[0] create_dir = tool.GetInput("CreateDir") if create_dir == 0.0: - cls.log.error("%s has Create Folder turned off" % instance[0].Name) + cls.log.error( + "%s has Create Folder turned off" % instance[0].Name + ) return [tool] def process(self, instance): @@ -37,7 +34,8 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): if invalid: raise PublishValidationError( "Found Saver with Create Folder During Render checked off", - title=self.label) + title=self.label, + ) @classmethod def repair(cls, instance): diff --git a/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py b/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py new file mode 100644 index 0000000000..c208b8ef15 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py @@ -0,0 +1,78 @@ +import os +import pyblish.api + +from openpype.pipeline.publish import RepairAction +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction + + +class ValidateLocalFramesExistence(pyblish.api.InstancePlugin): + """Checks if files for savers that's set + to publish expected frames exists + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Expected Frames Exists" + families = ["render"] + hosts = ["fusion"] + actions = [RepairAction, SelectInvalidAction] + + @classmethod + def get_invalid(cls, instance, non_existing_frames=None): + if non_existing_frames is None: + non_existing_frames = [] + + if instance.data.get("render_target") == "frames": + tool = instance[0] + + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + path = instance.data["path"] + output_dir = instance.data["outputDir"] + + basename = os.path.basename(path) + head, ext = os.path.splitext(basename) + files = [ + f"{head}{str(frame).zfill(4)}{ext}" + for frame in range(frame_start, frame_end + 1) + ] + + for file in files: + if not os.path.exists(os.path.join(output_dir, file)): + cls.log.error( + f"Missing file: {os.path.join(output_dir, file)}" + ) + non_existing_frames.append(file) + + if len(non_existing_frames) > 0: + cls.log.error(f"Some of {tool.Name}'s files does not exist") + return [tool] + + def process(self, instance): + non_existing_frames = [] + invalid = self.get_invalid(instance, non_existing_frames) + if invalid: + raise PublishValidationError( + "{} is set to publish existing frames but " + "some frames are missing. " + "The missing file(s) are:\n\n{}".format( + invalid[0].Name, + "\n\n".join(non_existing_frames), + ), + title=self.label, + ) + + @classmethod + def repair(cls, instance): + invalid = cls.get_invalid(instance) + if invalid: + tool = invalid[0] + + # Change render target to local to render locally + tool.SetData("openpype.creator_attributes.render_target", "local") + + cls.log.info( + f"Reload the publisher and {tool.Name} " + "will be set to render locally" + ) diff --git a/openpype/hosts/fusion/scripts/set_rendermode.py b/openpype/hosts/fusion/scripts/set_rendermode.py deleted file mode 100644 index 9d2bfef310..0000000000 --- a/openpype/hosts/fusion/scripts/set_rendermode.py +++ /dev/null @@ -1,112 +0,0 @@ -from qtpy import QtWidgets -import qtawesome -from openpype.hosts.fusion.api import get_current_comp - - -_help = {"local": "Render the comp on your own machine and publish " - "it from that the destination folder", - "farm": "Submit a Fusion render job to a Render farm to use all other" - " computers and add a publish job"} - - -class SetRenderMode(QtWidgets.QWidget): - - def __init__(self, parent=None): - QtWidgets.QWidget.__init__(self, parent) - - self._comp = get_current_comp() - self._comp_name = self._get_comp_name() - - self.setWindowTitle("Set Render Mode") - self.setFixedSize(300, 175) - - layout = QtWidgets.QVBoxLayout() - - # region comp info - comp_info_layout = QtWidgets.QHBoxLayout() - - update_btn = QtWidgets.QPushButton(qtawesome.icon("fa.refresh", - color="white"), "") - update_btn.setFixedWidth(25) - update_btn.setFixedHeight(25) - - comp_information = QtWidgets.QLineEdit() - comp_information.setEnabled(False) - - comp_info_layout.addWidget(comp_information) - comp_info_layout.addWidget(update_btn) - # endregion comp info - - # region modes - mode_options = QtWidgets.QComboBox() - mode_options.addItems(_help.keys()) - - mode_information = QtWidgets.QTextEdit() - mode_information.setReadOnly(True) - # endregion modes - - accept_btn = QtWidgets.QPushButton("Accept") - - layout.addLayout(comp_info_layout) - layout.addWidget(mode_options) - layout.addWidget(mode_information) - layout.addWidget(accept_btn) - - self.setLayout(layout) - - self.comp_information = comp_information - self.update_btn = update_btn - - self.mode_options = mode_options - self.mode_information = mode_information - - self.accept_btn = accept_btn - - self.connections() - self.update() - - # Force updated render mode help text - self._update_rendermode_info() - - def connections(self): - """Build connections between code and buttons""" - - self.update_btn.clicked.connect(self.update) - self.accept_btn.clicked.connect(self._set_comp_rendermode) - self.mode_options.currentIndexChanged.connect( - self._update_rendermode_info) - - def update(self): - """Update all information in the UI""" - - self._comp = get_current_comp() - self._comp_name = self._get_comp_name() - self.comp_information.setText(self._comp_name) - - # Update current comp settings - mode = self._get_comp_rendermode() - index = self.mode_options.findText(mode) - self.mode_options.setCurrentIndex(index) - - def _update_rendermode_info(self): - rendermode = self.mode_options.currentText() - self.mode_information.setText(_help[rendermode]) - - def _get_comp_name(self): - return self._comp.GetAttrs("COMPS_Name") - - def _get_comp_rendermode(self): - return self._comp.GetData("openpype.rendermode") or "local" - - def _set_comp_rendermode(self): - rendermode = self.mode_options.currentText() - self._comp.SetData("openpype.rendermode", rendermode) - - self._comp.Print("Updated render mode to '%s'\n" % rendermode) - self.hide() - - def _validation(self): - ui_mode = self.mode_options.currentText() - comp_mode = self._get_comp_rendermode() - - return comp_mode == ui_mode diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index bbd1edc14a..0d4368529f 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -1221,7 +1221,7 @@ def set_track_color(track_item, color): def check_inventory_versions(track_items=None): """ - Actual version color idetifier of Loaded containers + Actual version color identifier of Loaded containers Check all track items and filter only Loader nodes for its version. It will get all versions from database @@ -1249,10 +1249,10 @@ def check_inventory_versions(track_items=None): project_name = legacy_io.active_project() filter_result = filter_containers(containers, project_name) for container in filter_result.latest: - set_track_color(container["_item"], clip_color) + set_track_color(container["_item"], clip_color_last) for container in filter_result.outdated: - set_track_color(container["_item"], clip_color_last) + set_track_color(container["_item"], clip_color) def selection_changed_timeline(event): diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py index 4fb750d91b..ac7d75db08 100644 --- a/openpype/hosts/max/api/lib.py +++ b/openpype/hosts/max/api/lib.py @@ -6,6 +6,11 @@ from pymxs import runtime as rt from typing import Union import contextlib +from openpype.pipeline.context_tools import ( + get_current_project_asset, + get_current_project +) + JSON_PREFIX = "JSON::" @@ -157,6 +162,112 @@ def get_multipass_setting(project_setting=None): ["multipass"]) +def set_scene_resolution(width: int, height: int): + """Set the render resolution + + Args: + width(int): value of the width + height(int): value of the height + + Returns: + None + + """ + rt.renderWidth = width + rt.renderHeight = height + + +def reset_scene_resolution(): + """Apply the scene resolution from the project definition + + scene resolution can be overwritten by an asset if the asset.data contains + any information regarding scene resolution . + Returns: + None + """ + data = ["data.resolutionWidth", "data.resolutionHeight"] + project_resolution = get_current_project(fields=data) + project_resolution_data = project_resolution["data"] + asset_resolution = get_current_project_asset(fields=data) + asset_resolution_data = asset_resolution["data"] + # Set project resolution + project_width = int(project_resolution_data.get("resolutionWidth", 1920)) + project_height = int(project_resolution_data.get("resolutionHeight", 1080)) + width = int(asset_resolution_data.get("resolutionWidth", project_width)) + height = int(asset_resolution_data.get("resolutionHeight", project_height)) + + set_scene_resolution(width, height) + + +def get_frame_range() -> dict: + """Get the current assets frame range and handles. + + Returns: + dict: with frame start, frame end, handle start, handle end. + """ + # Set frame start/end + asset = get_current_project_asset() + frame_start = asset["data"].get("frameStart") + frame_end = asset["data"].get("frameEnd") + # Backwards compatibility + if frame_start is None or frame_end is None: + frame_start = asset["data"].get("edit_in") + frame_end = asset["data"].get("edit_out") + if frame_start is None or frame_end is None: + return + handles = asset["data"].get("handles") or 0 + handle_start = asset["data"].get("handleStart") + if handle_start is None: + handle_start = handles + handle_end = asset["data"].get("handleEnd") + if handle_end is None: + handle_end = handles + return { + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end + } + + +def reset_frame_range(fps: bool = True): + """Set frame range to current asset. + This is part of 3dsmax documentation: + + animationRange: A System Global variable which lets you get and + set an Interval value that defines the start and end frames + of the Active Time Segment. + frameRate: A System Global variable which lets you get + and set an Integer value that defines the current + scene frame rate in frames-per-second. + """ + if fps: + data_fps = get_current_project(fields=["data.fps"]) + fps_number = float(data_fps["data"]["fps"]) + rt.frameRate = fps_number + frame_range = get_frame_range() + frame_start = frame_range["frameStart"] - int(frame_range["handleStart"]) + frame_end = frame_range["frameEnd"] + int(frame_range["handleEnd"]) + frange_cmd = f"animationRange = interval {frame_start} {frame_end}" + rt.execute(frange_cmd) + + +def set_context_setting(): + """Apply the project settings from the project definition + + Settings can be overwritten by an asset if the asset.data contains + any information regarding those settings. + + Examples of settings: + frame range + resolution + + Returns: + None + """ + reset_scene_resolution() + + def get_max_version(): """ Args: diff --git a/openpype/hosts/max/api/menu.py b/openpype/hosts/max/api/menu.py index 5c273b49b4..066cc90039 100644 --- a/openpype/hosts/max/api/menu.py +++ b/openpype/hosts/max/api/menu.py @@ -4,6 +4,7 @@ from qtpy import QtWidgets, QtCore from pymxs import runtime as rt from openpype.tools.utils import host_tools +from openpype.hosts.max.api import lib class OpenPypeMenu(object): @@ -107,6 +108,17 @@ class OpenPypeMenu(object): workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu) workfiles_action.triggered.connect(self.workfiles_callback) openpype_menu.addAction(workfiles_action) + + openpype_menu.addSeparator() + + res_action = QtWidgets.QAction("Set Resolution", openpype_menu) + res_action.triggered.connect(self.resolution_callback) + openpype_menu.addAction(res_action) + + frame_action = QtWidgets.QAction("Set Frame Range", openpype_menu) + frame_action.triggered.connect(self.frame_range_callback) + openpype_menu.addAction(frame_action) + return openpype_menu def load_callback(self): @@ -128,3 +140,11 @@ class OpenPypeMenu(object): def workfiles_callback(self): """Callback to show Workfiles tool.""" host_tools.show_workfiles(parent=self.main_widget) + + def resolution_callback(self): + """Callback to reset scene resolution""" + return lib.reset_scene_resolution() + + def frame_range_callback(self): + """Callback to reset frame range""" + return lib.reset_frame_range() diff --git a/openpype/hosts/max/api/pipeline.py b/openpype/hosts/max/api/pipeline.py index f8a7b8ea5c..dacc402318 100644 --- a/openpype/hosts/max/api/pipeline.py +++ b/openpype/hosts/max/api/pipeline.py @@ -50,6 +50,11 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher): self._has_been_setup = True + def context_setting(): + return lib.set_context_setting() + rt.callbacks.addScript(rt.Name('systemPostNew'), + context_setting) + def has_unsaved_changes(self): # TODO: how to get it from 3dsmax? return True diff --git a/openpype/hosts/max/plugins/publish/collect_render.py b/openpype/hosts/max/plugins/publish/collect_render.py index 7c9e311c2f..63e4108c84 100644 --- a/openpype/hosts/max/plugins/publish/collect_render.py +++ b/openpype/hosts/max/plugins/publish/collect_render.py @@ -61,7 +61,7 @@ class CollectRender(pyblish.api.InstancePlugin): "plugin": "3dsmax", "frameStart": context.data['frameStart'], "frameEnd": context.data['frameEnd'], - "version": version_int + "version": version_int, } self.log.info("data: {0}".format(data)) instance.data.update(data) diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index f1b626c06b..e709239ae7 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -26,6 +26,7 @@ class CreateReview(plugin.Creator): "alpha cut" ] useMayaTimeline = True + panZoom = False def __init__(self, *args, **kwargs): super(CreateReview, self).__init__(*args, **kwargs) @@ -45,5 +46,6 @@ class CreateReview(plugin.Creator): data["keepImages"] = self.keepImages data["imagePlane"] = self.imagePlane data["transparency"] = self.transparency + data["panZoom"] = self.panZoom self.data = data diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py index 548b1c996a..00565c5819 100644 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ b/openpype/hosts/maya/plugins/publish/collect_review.py @@ -5,6 +5,7 @@ import pyblish.api from openpype.client import get_subset_by_name from openpype.pipeline import legacy_io +from openpype.hosts.maya.api.lib import get_attribute_input class CollectReview(pyblish.api.InstancePlugin): @@ -79,6 +80,8 @@ class CollectReview(pyblish.api.InstancePlugin): data['review_width'] = instance.data['review_width'] data['review_height'] = instance.data['review_height'] data["isolate"] = instance.data["isolate"] + data["panZoom"] = instance.data.get("panZoom", False) + data["panel"] = instance.data["panel"] cmds.setAttr(str(instance) + '.active', 1) self.log.debug('data {}'.format(instance.context[i].data)) instance.context[i].data.update(data) @@ -144,3 +147,21 @@ class CollectReview(pyblish.api.InstancePlugin): "filename": node.filename.get() } ) + + # Collect focal length. + attr = camera + ".focalLength" + focal_length = None + if get_attribute_input(attr): + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + 1 + focal_length = [ + cmds.getAttr(attr, time=t) for t in range(int(start), int(end)) + ] + else: + focal_length = cmds.getAttr(attr) + + key = "focalLength" + try: + instance.data["burninDataMembers"][key] = focal_length + except KeyError: + instance.data["burninDataMembers"] = {key: focal_length} diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 94571ff731..72b1489522 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -1,5 +1,6 @@ import os import json +import contextlib import clique import capture @@ -11,6 +12,16 @@ from maya import cmds import pymel.core as pm +@contextlib.contextmanager +def panel_camera(panel, camera): + original_camera = cmds.modelPanel(panel, query=True, camera=True) + try: + cmds.modelPanel(panel, edit=True, camera=camera) + yield + finally: + cmds.modelPanel(panel, edit=True, camera=original_camera) + + class ExtractPlayblast(publish.Extractor): """Extract viewport playblast. @@ -25,6 +36,16 @@ class ExtractPlayblast(publish.Extractor): optional = True capture_preset = {} + def _capture(self, preset): + self.log.info( + "Using preset:\n{}".format( + json.dumps(preset, sort_keys=True, indent=4) + ) + ) + + path = capture.capture(log=self.log, **preset) + self.log.debug("playblast path {}".format(path)) + def process(self, instance): self.log.info("Extracting capture..") @@ -43,7 +64,7 @@ class ExtractPlayblast(publish.Extractor): self.log.info("start: {}, end: {}".format(start, end)) # get cameras - camera = instance.data['review_camera'] + camera = instance.data["review_camera"] preset = lib.load_capture_preset(data=self.capture_preset) # Grab capture presets from the project settings @@ -57,23 +78,23 @@ class ExtractPlayblast(publish.Extractor): asset_height = asset_data.get("resolutionHeight") review_instance_width = instance.data.get("review_width") review_instance_height = instance.data.get("review_height") - preset['camera'] = camera + preset["camera"] = camera # Tests if project resolution is set, # if it is a value other than zero, that value is # used, if not then the asset resolution is # used if review_instance_width and review_instance_height: - preset['width'] = review_instance_width - preset['height'] = review_instance_height + preset["width"] = review_instance_width + preset["height"] = review_instance_height elif width_preset and height_preset: - preset['width'] = width_preset - preset['height'] = height_preset + preset["width"] = width_preset + preset["height"] = height_preset elif asset_width and asset_height: - preset['width'] = asset_width - preset['height'] = asset_height - preset['start_frame'] = start - preset['end_frame'] = end + preset["width"] = asset_width + preset["height"] = asset_height + preset["start_frame"] = start + preset["end_frame"] = end # Enforce persisting camera depth of field camera_options = preset.setdefault("camera_options", {}) @@ -86,8 +107,8 @@ class ExtractPlayblast(publish.Extractor): self.log.info("Outputting images to %s" % path) - preset['filename'] = path - preset['overwrite'] = True + preset["filename"] = path + preset["overwrite"] = True pm.refresh(f=True) @@ -114,7 +135,8 @@ class ExtractPlayblast(publish.Extractor): # Disable Pan/Zoom. pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"])) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False) + preset.pop("pan_zoom", None) + preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"] # Need to explicitly enable some viewport changes so the viewport is # refreshed ahead of playblasting. @@ -136,30 +158,39 @@ class ExtractPlayblast(publish.Extractor): ) override_viewport_options = ( - capture_presets['Viewport Options']['override_viewport_options'] + capture_presets["Viewport Options"]["override_viewport_options"] ) - with lib.maintained_time(): - filename = preset.get("filename", "%TEMP%") - # Force viewer to False in call to capture because we have our own - # viewer opening call to allow a signal to trigger between - # playblast and viewer - preset['viewer'] = False + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between + # playblast and viewer + preset["viewer"] = False - # Update preset with current panel setting - # if override_viewport_options is turned off - if not override_viewport_options: - panel_preset = capture.parse_view(instance.data["panel"]) - panel_preset.pop("camera") - preset.update(panel_preset) + # Update preset with current panel setting + # if override_viewport_options is turned off + if not override_viewport_options: + panel_preset = capture.parse_view(instance.data["panel"]) + panel_preset.pop("camera") + preset.update(panel_preset) - self.log.info( - "Using preset:\n{}".format( - json.dumps(preset, sort_keys=True, indent=4) + # Need to ensure Python 2 compatibility. + # TODO: Remove once dropping Python 2. + if getattr(contextlib, "nested", None): + # Python 3 compatibility. + with contextlib.nested( + lib.maintained_time(), + panel_camera(instance.data["panel"], preset["camera"]) + ): + self._capture(preset) + else: + # Python 2 compatibility. + with contextlib.ExitStack() as stack: + stack.enter_context(lib.maintained_time()) + stack.enter_context( + panel_camera(instance.data["panel"], preset["camera"]) ) - ) - path = capture.capture(log=self.log, **preset) + self._capture(preset) # Restoring viewport options. if viewport_defaults: @@ -169,18 +200,17 @@ class ExtractPlayblast(publish.Extractor): cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom) - self.log.debug("playblast path {}".format(path)) - collected_files = os.listdir(stagingdir) patterns = [clique.PATTERNS["frames"]] collections, remainder = clique.assemble(collected_files, minimum_items=1, patterns=patterns) + filename = preset.get("filename", "%TEMP%") self.log.debug("filename {}".format(filename)) frame_collection = None for collection in collections: - filebase = collection.format('{head}').rstrip(".") + filebase = collection.format("{head}").rstrip(".") self.log.debug("collection head {}".format(filebase)) if filebase in filename: frame_collection = collection @@ -204,15 +234,15 @@ class ExtractPlayblast(publish.Extractor): collected_files = collected_files[0] representation = { - 'name': 'png', - 'ext': 'png', - 'files': collected_files, + "name": self.capture_preset["Codec"]["compression"], + "ext": self.capture_preset["Codec"]["compression"], + "files": collected_files, "stagingDir": stagingdir, "frameStart": start, "frameEnd": end, - 'fps': fps, - 'preview': True, - 'tags': tags, - 'camera_name': camera_node_name + "fps": fps, + "preview": True, + "tags": tags, + "camera_name": camera_node_name } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 1d94bd58c5..f2d084b828 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -26,28 +26,28 @@ class ExtractThumbnail(publish.Extractor): def process(self, instance): self.log.info("Extracting capture..") - camera = instance.data['review_camera'] + camera = instance.data["review_camera"] - capture_preset = ( - instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast']['capture_preset'] - ) + maya_setting = instance.context.data["project_settings"]["maya"] + plugin_setting = maya_setting["publish"]["ExtractPlayblast"] + capture_preset = plugin_setting["capture_preset"] override_viewport_options = ( - capture_preset['Viewport Options']['override_viewport_options'] + capture_preset["Viewport Options"]["override_viewport_options"] ) try: preset = lib.load_capture_preset(data=capture_preset) except KeyError as ke: - self.log.error('Error loading capture presets: {}'.format(str(ke))) + self.log.error("Error loading capture presets: {}".format(str(ke))) preset = {} - self.log.info('Using viewport preset: {}'.format(preset)) + self.log.info("Using viewport preset: {}".format(preset)) # preset["off_screen"] = False - preset['camera'] = camera - preset['start_frame'] = instance.data["frameStart"] - preset['end_frame'] = instance.data["frameStart"] - preset['camera_options'] = { + preset["camera"] = camera + preset["start_frame"] = instance.data["frameStart"] + preset["end_frame"] = instance.data["frameStart"] + preset["camera_options"] = { "displayGateMask": False, "displayResolution": False, "displayFilmGate": False, @@ -74,14 +74,14 @@ class ExtractThumbnail(publish.Extractor): # used, if not then the asset resolution is # used if review_instance_width and review_instance_height: - preset['width'] = review_instance_width - preset['height'] = review_instance_height + preset["width"] = review_instance_width + preset["height"] = review_instance_height elif width_preset and height_preset: - preset['width'] = width_preset - preset['height'] = height_preset + preset["width"] = width_preset + preset["height"] = height_preset elif asset_width and asset_height: - preset['width'] = asset_width - preset['height'] = asset_height + preset["width"] = asset_width + preset["height"] = asset_height # Create temp directory for thumbnail # - this is to avoid "override" of source file @@ -96,8 +96,8 @@ class ExtractThumbnail(publish.Extractor): self.log.info("Outputting images to %s" % path) - preset['filename'] = path - preset['overwrite'] = True + preset["filename"] = path + preset["overwrite"] = True pm.refresh(f=True) @@ -123,14 +123,14 @@ class ExtractThumbnail(publish.Extractor): preset["viewport_options"] = {"imagePlane": image_plane} # Disable Pan/Zoom. - pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"])) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False) + preset.pop("pan_zoom", None) + preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"] with lib.maintained_time(): # Force viewer to False in call to capture because we have our own # viewer opening call to allow a signal to trigger between # playblast and viewer - preset['viewer'] = False + preset["viewer"] = False # Update preset with current panel setting # if override_viewport_options is turned off @@ -145,17 +145,15 @@ class ExtractThumbnail(publish.Extractor): _, thumbnail = os.path.split(playblast) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom) - self.log.info("file list {}".format(thumbnail)) if "representations" not in instance.data: instance.data["representations"] = [] representation = { - 'name': 'thumbnail', - 'ext': 'jpg', - 'files': thumbnail, + "name": "thumbnail", + "ext": "jpg", + "files": thumbnail, "stagingDir": dst_staging, "thumbnail": True } diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index f227aa161a..67c7877e60 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -54,22 +54,19 @@ class LoadBackdropNodes(load.LoaderPlugin): version = context['version'] version_data = version.get("data", {}) vname = version.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) namespace = namespace or context['asset']['name'] colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) # prepare data for imprinting # add additional metadata from the version to imprint to Avalon knob - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] + add_keys = ["source", "author", "fps"] - data_imprint = {"frameStart": first, - "frameEnd": last, - "version": vname, - "colorspaceInput": colorspace, - "objectName": object_name} + data_imprint = { + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name + } for k in add_keys: data_imprint.update({k: version_data[k]}) @@ -204,18 +201,13 @@ class LoadBackdropNodes(load.LoaderPlugin): name = container['name'] version_data = version_doc.get("data", {}) vname = version_doc.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) namespace = container['namespace'] colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] + add_keys = ["source", "author", "fps"] data_imprint = {"representation": str(representation["_id"]), - "frameStart": first, - "frameEnd": last, "version": vname, "colorspaceInput": colorspace, "objectName": object_name} diff --git a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py b/openpype/hosts/nuke/plugins/publish/collect_backdrop.py index 8eaefa6854..7d51af7e9e 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/collect_backdrop.py @@ -51,38 +51,10 @@ class CollectBackdrops(pyblish.api.InstancePlugin): instance.data["label"] = "{0} ({1} nodes)".format( bckn.name(), len(instance.data["transientData"]["childNodes"])) - instance.data["families"].append(instance.data["family"]) - - # Get frame range - handle_start = instance.context.data["handleStart"] - handle_end = instance.context.data["handleEnd"] - first_frame = int(nuke.root()["first_frame"].getValue()) - last_frame = int(nuke.root()["last_frame"].getValue()) - # get version version = instance.context.data.get('version') - if not version: - raise RuntimeError("Script name has no version in the name.") + if version: + instance.data['version'] = version - instance.data['version'] = version - - # Add version data to instance - version_data = { - "handles": handle_start, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "version": int(version), - "families": [instance.data["family"]] + instance.data["families"], - "subset": instance.data["subset"], - "fps": instance.context.data["fps"] - } - - instance.data.update({ - "versionData": version_data, - "frameStart": first_frame, - "frameEnd": last_frame - }) self.log.info("Backdrop instance collected: `{}`".format(instance)) diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 89ba6ad4e6..25732446b5 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -66,11 +66,11 @@ class MainThreadItem: return self._result def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ log.debug("Executing process in main thread") if self.done: diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index e94e64e04a..6f76c25e0c 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -389,11 +389,11 @@ class MainThreadItem: self.kwargs = kwargs def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ log.debug("Executing process in main thread") if self.done: diff --git a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py index 538c6e4c5e..5cfa1faa50 100644 --- a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py +++ b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py @@ -55,7 +55,7 @@ class TVPaintLegacyConverted(SubsetConvertorPlugin): self._convert_render_layers( to_convert["renderLayer"], current_instances) self._convert_render_passes( - to_convert["renderpass"], current_instances) + to_convert["renderPass"], current_instances) self._convert_render_scenes( to_convert["renderScene"], current_instances) self._convert_workfiles( @@ -116,7 +116,7 @@ class TVPaintLegacyConverted(SubsetConvertorPlugin): render_layers_by_group_id = {} for instance in current_instances: if instance.get("creator_identifier") == "render.layer": - group_id = instance["creator_identifier"]["group_id"] + group_id = instance["creator_attributes"]["group_id"] render_layers_by_group_id[group_id] = instance for render_pass in render_passes: diff --git a/openpype/hosts/tvpaint/plugins/create/create_render.py b/openpype/hosts/tvpaint/plugins/create/create_render.py index 9711024c79..2369c7329f 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render.py @@ -415,11 +415,11 @@ class CreateRenderPass(TVPaintCreator): .get("creator_attributes", {}) .get("render_layer_instance_id") ) - render_layer_info = render_layers.get(render_layer_instance_id) + render_layer_info = render_layers.get(render_layer_instance_id, {}) self.update_instance_labels( instance_data, - render_layer_info["variant"], - render_layer_info["template_data"] + render_layer_info.get("variant"), + render_layer_info.get("template_data") ) instance = CreatedInstance.from_existing(instance_data, self) self._add_instance_to_context(instance) @@ -607,11 +607,11 @@ class CreateRenderPass(TVPaintCreator): current_instances = self.host.list_instances() render_layers = [ { - "value": instance["instance_id"], - "label": instance["subset"] + "value": inst["instance_id"], + "label": inst["subset"] } - for instance in current_instances - if instance["creator_identifier"] == CreateRenderlayer.identifier + for inst in current_instances + if inst.get("creator_identifier") == CreateRenderlayer.identifier ] if not render_layers: render_layers.append({"value": None, "label": "N/A"}) @@ -697,6 +697,7 @@ class TVPaintAutoDetectRenderCreator(TVPaintCreator): ["create"] ["auto_detect_render"] ) + self.enabled = plugin_settings.get("enabled", False) self.allow_group_rename = plugin_settings["allow_group_rename"] self.group_name_template = plugin_settings["group_name_template"] self.group_idx_offset = plugin_settings["group_idx_offset"] diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py index 5eb702a1da..63f04cf3ce 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -22,9 +22,11 @@ class CollectOutputFrameRange(pyblish.api.InstancePlugin): context = instance.context frame_start = asset_doc["data"]["frameStart"] + fps = asset_doc["data"]["fps"] frame_end = frame_start + ( context.data["sceneMarkOut"] - context.data["sceneMarkIn"] ) + instance.data["fps"] = fps instance.data["frameStart"] = frame_start instance.data["frameEnd"] = frame_end self.log.info( diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py index 7e35726030..d7984ce971 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -1,5 +1,8 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.pipeline import ( list_instances, write_instances, @@ -31,7 +34,10 @@ class FixAssetNames(pyblish.api.Action): write_instances(new_instance_items) -class ValidateAssetNames(pyblish.api.ContextPlugin): +class ValidateAssetName( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate assset name present on instance. Asset name on instance should be the same as context's. @@ -43,6 +49,8 @@ class ValidateAssetNames(pyblish.api.ContextPlugin): actions = [FixAssetNames] def process(self, context): + if not self.is_active(context.data): + return context_asset_name = context.data["asset"] for instance in context: asset_name = instance.data.get("asset") diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index 6a496a2e49..8e52a636f4 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -11,7 +11,7 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): families = ["review", "render"] def process(self, instance): - layers = instance.data["layers"] + layers = instance.data.get("layers") # Instance have empty layers # - it is not job of this validator to check that if not layers: diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index 0030b0fd1c..7b2cc62bb5 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -1,7 +1,10 @@ import json import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.lib import execute_george @@ -23,7 +26,10 @@ class ValidateMarksRepair(pyblish.api.Action): ) -class ValidateMarks(pyblish.api.ContextPlugin): +class ValidateMarks( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate mark in and out are enabled and it's duration. Mark In/Out does not have to match frameStart and frameEnd but duration is @@ -59,6 +65,9 @@ class ValidateMarks(pyblish.api.ContextPlugin): } def process(self, context): + if not self.is_active(context.data): + return + current_data = { "markIn": context.data["sceneMarkIn"], "markInState": context.data["sceneMarkInState"], diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py index 4473e4b1b7..0ab8e811f5 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py @@ -1,11 +1,17 @@ import json import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) # TODO @iLliCiTiT add fix action for fps -class ValidateProjectSettings(pyblish.api.ContextPlugin): +class ValidateProjectSettings( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate scene settings against database.""" label = "Validate Scene Settings" @@ -13,6 +19,9 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin): optional = True def process(self, context): + if not self.is_active(context.data): + return + expected_data = context.data["assetEntity"]["data"] scene_data = { "fps": context.data.get("sceneFps"), diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py index 066e54c670..229ccfcd18 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -1,5 +1,8 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.lib import execute_george @@ -14,7 +17,10 @@ class RepairStartFrame(pyblish.api.Action): execute_george("tv_startframe 0") -class ValidateStartFrame(pyblish.api.ContextPlugin): +class ValidateStartFrame( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate start frame being at frame 0.""" label = "Validate Start Frame" @@ -24,6 +30,9 @@ class ValidateStartFrame(pyblish.api.ContextPlugin): optional = True def process(self, context): + if not self.is_active(context.data): + return + start_frame = execute_george("tv_startframe") if start_frame == 0: return diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index 7a929a0ade..6f9a095285 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -8,6 +8,8 @@ import tempfile from .log import Logger from .vendor_bin_utils import find_executable +from .openpype_version import is_running_from_build + # MSDN process creation flag (Windows only) CREATE_NO_WINDOW = 0x08000000 @@ -200,6 +202,11 @@ def run_openpype_process(*args, **kwargs): # Skip envs that can affect OpenPype process # - fill more if you find more env = clean_envs_for_openpype_process(os.environ) + + # Only keep OpenPype version if we are running from build. + if not is_running_from_build(): + env.pop("OPENPYPE_VERSION", None) + return run_subprocess(args, env=env, **kwargs) diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py index 48130848d5..e221eb00ea 100644 --- a/openpype/modules/deadline/plugins/publish/collect_pools.py +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -3,21 +3,60 @@ """ import pyblish.api +from openpype.lib import TextDef +from openpype.pipeline.publish import OpenPypePyblishPluginMixin -class CollectDeadlinePools(pyblish.api.InstancePlugin): +class CollectDeadlinePools(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): """Collect pools from instance if present, from Setting otherwise.""" order = pyblish.api.CollectorOrder + 0.420 label = "Collect Deadline Pools" - families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + families = ["rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender"] primary_pool = None secondary_pool = None + @classmethod + def apply_settings(cls, project_settings, system_settings): + # deadline.publish.CollectDeadlinePools + settings = project_settings["deadline"]["publish"]["CollectDeadlinePools"] # noqa + cls.primary_pool = settings.get("primary_pool", None) + cls.secondary_pool = settings.get("secondary_pool", None) + def process(self, instance): + + attr_values = self.get_attr_values_from_data(instance.data) if not instance.data.get("primaryPool"): - instance.data["primaryPool"] = self.primary_pool or "none" + instance.data["primaryPool"] = ( + attr_values.get("primaryPool") or self.primary_pool or "none" + ) if not instance.data.get("secondaryPool"): - instance.data["secondaryPool"] = self.secondary_pool or "none" + instance.data["secondaryPool"] = ( + attr_values.get("secondaryPool") or self.secondary_pool or "none" # noqa + ) + + @classmethod + def get_attribute_defs(cls): + # TODO: Preferably this would be an enum for the user + # but the Deadline server URL can be dynamic and + # can be set per render instance. Since get_attribute_defs + # can't be dynamic unfortunately EnumDef isn't possible (yet?) + # pool_names = self.deadline_module.get_deadline_pools(deadline_url, + # self.log) + # secondary_pool_names = ["-"] + pool_names + + return [ + TextDef("primaryPool", + label="Primary Pool", + default=cls.primary_pool), + TextDef("secondaryPool", + label="Secondary Pool", + default=cls.secondary_pool) + ] diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py index 417a03de74..c728b6b9c7 100644 --- a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py @@ -3,7 +3,15 @@ import getpass import copy import attr -from openpype.pipeline import legacy_io +from openpype.lib import ( + TextDef, + BoolDef, + NumberDef, +) +from openpype.pipeline import ( + legacy_io, + OpenPypePyblishPluginMixin +) from openpype.settings import get_project_settings from openpype.hosts.max.api.lib import ( get_current_renderer, @@ -22,7 +30,8 @@ class MaxPluginInfo(object): IgnoreInputs = attr.ib(default=True) -class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): +class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, + OpenPypePyblishPluginMixin): label = "Submit Render to Deadline" hosts = ["max"] @@ -31,14 +40,22 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): use_published = True priority = 50 - tile_priority = 50 chunk_size = 1 jobInfo = {} pluginInfo = {} group = None - deadline_pool = None - deadline_pool_secondary = None - framePerTask = 1 + + @classmethod + def apply_settings(cls, project_settings, system_settings): + settings = project_settings["deadline"]["publish"]["MaxSubmitDeadline"] # noqa + + # Take some defaults from settings + cls.use_published = settings.get("use_published", + cls.use_published) + cls.priority = settings.get("priority", + cls.priority) + cls.chuck_size = settings.get("chunk_size", cls.chunk_size) + cls.group = settings.get("group", cls.group) def get_job_info(self): job_info = DeadlineJobInfo(Plugin="3dsmax") @@ -49,11 +66,11 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): instance = self._instance context = instance.context - # Always use the original work file name for the Job name even when # rendering is done from the published Work File. The original work # file name is clearer because it can also have subversion strings, # etc. which are stripped for the published file. + src_filepath = context.data["currentFile"] src_filename = os.path.basename(src_filepath) @@ -71,13 +88,13 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): job_info.Pool = instance.data.get("primaryPool") job_info.SecondaryPool = instance.data.get("secondaryPool") - job_info.ChunkSize = instance.data.get("chunkSize", 1) - job_info.Comment = context.data.get("comment") - job_info.Priority = instance.data.get("priority", self.priority) - job_info.FramesPerTask = instance.data.get("framesPerTask", 1) - if self.group: - job_info.Group = self.group + attr_values = self.get_attr_values_from_data(instance.data) + + job_info.ChunkSize = attr_values.get("chunkSize", 1) + job_info.Comment = context.data.get("comment") + job_info.Priority = attr_values.get("priority", self.priority) + job_info.Group = attr_values.get("group", self.group) # Add options from RenderGlobals render_globals = instance.data.get("renderGlobals", {}) @@ -216,3 +233,32 @@ class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): plugin_info.update(plugin_data) return job_info, plugin_info + + @classmethod + def get_attribute_defs(cls): + defs = super(MaxSubmitDeadline, cls).get_attribute_defs() + defs.extend([ + BoolDef("use_published", + default=cls.use_published, + label="Use Published Scene"), + + NumberDef("priority", + minimum=1, + maximum=250, + decimals=0, + default=cls.priority, + label="Priority"), + + NumberDef("chunkSize", + minimum=1, + maximum=50, + decimals=0, + default=cls.chunk_size, + label="Frame Per Task"), + + TextDef("group", + default=cls.group, + label="Group Name"), + ]) + + return defs diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py index 05afa5080d..7c8ab62d4d 100644 --- a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py +++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -17,7 +17,11 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, label = "Validate Deadline Pools" order = pyblish.api.ValidatorOrder - families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + families = ["rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender"] optional = True def process(self, instance): diff --git a/openpype/modules/example_addons/example_addon/addon.py b/openpype/modules/example_addons/example_addon/addon.py index ead647b41d..be1d3ff920 100644 --- a/openpype/modules/example_addons/example_addon/addon.py +++ b/openpype/modules/example_addons/example_addon/addon.py @@ -44,7 +44,7 @@ class AddonSettingsDef(JsonFilesSettingsDef): class ExampleAddon(OpenPypeAddOn, IPluginPaths, ITrayAction): - """This Addon has defined it's settings and interface. + """This Addon has defined its settings and interface. This example has system settings with an enabled option. And use few other interfaces: diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py index 1209375f82..a698195c59 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py @@ -9,7 +9,7 @@ from openpype_modules.ftrack.lib import ( class PushHierValuesToNonHier(ServerAction): - """Action push hierarchical custom attribute values to non hierarchical. + """Action push hierarchical custom attribute values to non-hierarchical. Hierarchical value is also pushed to their task entities. @@ -119,17 +119,109 @@ class PushHierValuesToNonHier(ServerAction): self.join_query_keys(object_ids) )).all() - output = {} + attrs_by_obj_id = collections.defaultdict(list) hiearchical = [] for attr in attrs: if attr["is_hierarchical"]: hiearchical.append(attr) continue obj_id = attr["object_type_id"] - if obj_id not in output: - output[obj_id] = [] - output[obj_id].append(attr) - return output, hiearchical + attrs_by_obj_id[obj_id].append(attr) + return attrs_by_obj_id, hiearchical + + def query_attr_value( + self, + session, + hier_attrs, + attrs_by_obj_id, + dst_object_type_ids, + task_entity_ids, + non_task_entity_ids, + parent_id_by_entity_id + ): + all_non_task_ids_with_parents = set() + for entity_id in non_task_entity_ids: + all_non_task_ids_with_parents.add(entity_id) + _entity_id = entity_id + while True: + parent_id = parent_id_by_entity_id.get(_entity_id) + if ( + parent_id is None + or parent_id in all_non_task_ids_with_parents + ): + break + all_non_task_ids_with_parents.add(parent_id) + _entity_id = parent_id + + all_entity_ids = ( + set(all_non_task_ids_with_parents) + | set(task_entity_ids) + ) + attr_ids = {attr["id"] for attr in hier_attrs} + for obj_id in dst_object_type_ids: + attrs = attrs_by_obj_id.get(obj_id) + if attrs is not None: + for attr in attrs: + attr_ids.add(attr["id"]) + + real_values_by_entity_id = { + entity_id: {} + for entity_id in all_entity_ids + } + + attr_values = query_custom_attributes( + session, attr_ids, all_entity_ids, True + ) + for item in attr_values: + entity_id = item["entity_id"] + attr_id = item["configuration_id"] + real_values_by_entity_id[entity_id][attr_id] = item["value"] + + # Fill hierarchical values + hier_attrs_key_by_id = { + hier_attr["id"]: hier_attr + for hier_attr in hier_attrs + } + hier_values_per_entity_id = {} + for entity_id in all_non_task_ids_with_parents: + real_values = real_values_by_entity_id[entity_id] + hier_values_per_entity_id[entity_id] = {} + for attr_id, attr in hier_attrs_key_by_id.items(): + key = attr["key"] + hier_values_per_entity_id[entity_id][key] = ( + real_values.get(attr_id) + ) + + output = {} + for entity_id in non_task_entity_ids: + output[entity_id] = {} + for attr in hier_attrs_key_by_id.values(): + key = attr["key"] + value = hier_values_per_entity_id[entity_id][key] + tried_ids = set() + if value is None: + tried_ids.add(entity_id) + _entity_id = entity_id + while value is None: + parent_id = parent_id_by_entity_id.get(_entity_id) + if not parent_id: + break + value = hier_values_per_entity_id[parent_id][key] + if value is not None: + break + _entity_id = parent_id + tried_ids.add(parent_id) + + if value is None: + value = attr["default"] + + if value is not None: + for ent_id in tried_ids: + hier_values_per_entity_id[ent_id][key] = value + + output[entity_id][key] = value + + return real_values_by_entity_id, output def propagate_values(self, session, event, selected_entities): ftrack_settings = self.get_ftrack_settings( @@ -156,29 +248,24 @@ class PushHierValuesToNonHier(ServerAction): } task_object_type = object_types_by_low_name["task"] - destination_object_types = [task_object_type] + dst_object_type_ids = {task_object_type["id"]} for ent_type in interest_entity_types: obj_type = object_types_by_low_name.get(ent_type) - if obj_type and obj_type not in destination_object_types: - destination_object_types.append(obj_type) - - destination_object_type_ids = set( - obj_type["id"] - for obj_type in destination_object_types - ) + if obj_type: + dst_object_type_ids.add(obj_type["id"]) interest_attributes = action_settings["interest_attributes"] # Find custom attributes definitions attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, destination_object_type_ids, interest_attributes + session, dst_object_type_ids, interest_attributes ) # Filter destination object types if they have any object specific # custom attribute - for obj_id in tuple(destination_object_type_ids): + for obj_id in tuple(dst_object_type_ids): if obj_id not in attrs_by_obj_id: - destination_object_type_ids.remove(obj_id) + dst_object_type_ids.remove(obj_id) - if not destination_object_type_ids: + if not dst_object_type_ids: # TODO report that there are not matching custom attributes return { "success": True, @@ -192,14 +279,14 @@ class PushHierValuesToNonHier(ServerAction): session, selected_ids, project_entity, - destination_object_type_ids + dst_object_type_ids ) self.log.debug("Preparing whole project hierarchy by ids.") entities_by_obj_id = { obj_id: [] - for obj_id in destination_object_type_ids + for obj_id in dst_object_type_ids } self.log.debug("Filtering Task entities.") @@ -223,10 +310,16 @@ class PushHierValuesToNonHier(ServerAction): "message": "Nothing to do in your selection." } - self.log.debug("Getting Hierarchical custom attribute values parents.") - hier_values_by_entity_id = self.get_hier_values( + self.log.debug("Getting Custom attribute values.") + ( + real_values_by_entity_id, + hier_values_by_entity_id + ) = self.query_attr_value( session, hier_attrs, + attrs_by_obj_id, + dst_object_type_ids, + task_entity_ids, non_task_entity_ids, parent_id_by_entity_id ) @@ -237,7 +330,8 @@ class PushHierValuesToNonHier(ServerAction): hier_attrs, task_entity_ids, hier_values_by_entity_id, - parent_id_by_entity_id + parent_id_by_entity_id, + real_values_by_entity_id ) self.log.debug("Setting values to entities themselves.") @@ -245,7 +339,8 @@ class PushHierValuesToNonHier(ServerAction): session, entities_by_obj_id, attrs_by_obj_id, - hier_values_by_entity_id + hier_values_by_entity_id, + real_values_by_entity_id ) return True @@ -322,112 +417,64 @@ class PushHierValuesToNonHier(ServerAction): return parent_id_by_entity_id, filtered_entities - def get_hier_values( - self, - session, - hier_attrs, - focus_entity_ids, - parent_id_by_entity_id - ): - all_ids_with_parents = set() - for entity_id in focus_entity_ids: - all_ids_with_parents.add(entity_id) - _entity_id = entity_id - while True: - parent_id = parent_id_by_entity_id.get(_entity_id) - if ( - not parent_id - or parent_id in all_ids_with_parents - ): - break - all_ids_with_parents.add(parent_id) - _entity_id = parent_id - - hier_attr_ids = tuple(hier_attr["id"] for hier_attr in hier_attrs) - hier_attrs_key_by_id = { - hier_attr["id"]: hier_attr["key"] - for hier_attr in hier_attrs - } - - values_per_entity_id = {} - for entity_id in all_ids_with_parents: - values_per_entity_id[entity_id] = {} - for key in hier_attrs_key_by_id.values(): - values_per_entity_id[entity_id][key] = None - - values = query_custom_attributes( - session, hier_attr_ids, all_ids_with_parents, True - ) - for item in values: - entity_id = item["entity_id"] - key = hier_attrs_key_by_id[item["configuration_id"]] - - values_per_entity_id[entity_id][key] = item["value"] - - output = {} - for entity_id in focus_entity_ids: - output[entity_id] = {} - for key in hier_attrs_key_by_id.values(): - value = values_per_entity_id[entity_id][key] - tried_ids = set() - if value is None: - tried_ids.add(entity_id) - _entity_id = entity_id - while value is None: - parent_id = parent_id_by_entity_id.get(_entity_id) - if not parent_id: - break - value = values_per_entity_id[parent_id][key] - if value is not None: - break - _entity_id = parent_id - tried_ids.add(parent_id) - - if value is not None: - for ent_id in tried_ids: - values_per_entity_id[ent_id][key] = value - - output[entity_id][key] = value - return output - def set_task_attr_values( self, session, hier_attrs, task_entity_ids, hier_values_by_entity_id, - parent_id_by_entity_id + parent_id_by_entity_id, + real_values_by_entity_id ): hier_attr_id_by_key = { attr["key"]: attr["id"] for attr in hier_attrs } + filtered_task_ids = set() for task_id in task_entity_ids: - parent_id = parent_id_by_entity_id.get(task_id) or {} + parent_id = parent_id_by_entity_id.get(task_id) parent_values = hier_values_by_entity_id.get(parent_id) - if not parent_values: - continue + if parent_values: + filtered_task_ids.add(task_id) + if not filtered_task_ids: + return + + for task_id in filtered_task_ids: + parent_id = parent_id_by_entity_id[task_id] + parent_values = hier_values_by_entity_id[parent_id] hier_values_by_entity_id[task_id] = {} + real_task_attr_values = real_values_by_entity_id[task_id] for key, value in parent_values.items(): hier_values_by_entity_id[task_id][key] = value + if value is None: + continue + configuration_id = hier_attr_id_by_key[key] _entity_key = collections.OrderedDict([ ("configuration_id", configuration_id), ("entity_id", task_id) ]) - - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + op = None + if configuration_id not in real_task_attr_values: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + _entity_key, + {"value": value} + ) + elif real_task_attr_values[configuration_id] != value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", _entity_key, "value", - ftrack_api.symbol.NOT_SET, + real_task_attr_values[configuration_id], value ) - ) - if len(session.recorded_operations) > 100: - session.commit() + + if op is not None: + session.recorded_operations.push(op) + if len(session.recorded_operations) > 100: + session.commit() session.commit() @@ -436,39 +483,68 @@ class PushHierValuesToNonHier(ServerAction): session, entities_by_obj_id, attrs_by_obj_id, - hier_values_by_entity_id + hier_values_by_entity_id, + real_values_by_entity_id ): + """Push values from hierarchical custom attributes to non-hierarchical. + + Args: + session (ftrack_api.Sessison): Session which queried entities, + values and which is used for change propagation. + entities_by_obj_id (dict[str, list[str]]): TypedContext + ftrack entity ids where the attributes are propagated by their + object ids. + attrs_by_obj_id (dict[str, ftrack_api.Entity]): Objects of + 'CustomAttributeConfiguration' by their ids. + hier_values_by_entity_id (doc[str, dict[str, Any]]): Attribute + values by entity id and by their keys. + real_values_by_entity_id (doc[str, dict[str, Any]]): Real attribute + values of entities. + """ + for object_id, entity_ids in entities_by_obj_id.items(): attrs = attrs_by_obj_id.get(object_id) if not attrs or not entity_ids: continue - for attr in attrs: - for entity_id in entity_ids: - value = ( - hier_values_by_entity_id - .get(entity_id, {}) - .get(attr["key"]) - ) + for entity_id in entity_ids: + real_values = real_values_by_entity_id.get(entity_id) + hier_values = hier_values_by_entity_id.get(entity_id) + if hier_values is None: + continue + + for attr in attrs: + attr_id = attr["id"] + attr_key = attr["key"] + value = hier_values.get(attr_key) if value is None: continue _entity_key = collections.OrderedDict([ - ("configuration_id", attr["id"]), + ("configuration_id", attr_id), ("entity_id", entity_id) ]) - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + op = None + if attr_id not in real_values: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + _entity_key, + {"value": value} + ) + elif real_values[attr_id] != value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", _entity_key, "value", - ftrack_api.symbol.NOT_SET, + real_values[attr_id], value ) - ) - if len(session.recorded_operations) > 100: - session.commit() + + if op is not None: + session.recorded_operations.push(op) + if len(session.recorded_operations) > 100: + session.commit() session.commit() diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index dc76920a57..0f10145c06 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,6 +1,6 @@ import collections -import datetime import copy +from typing import Any import ftrack_api from openpype_modules.ftrack.lib import ( @@ -9,13 +9,30 @@ from openpype_modules.ftrack.lib import ( ) -class PushFrameValuesToTaskEvent(BaseEvent): +class PushHierValuesToNonHierEvent(BaseEvent): + """Push value changes between hierarchical and non-hierarchical attributes. + + Changes of non-hierarchical attributes are pushed to hierarchical and back. + The attributes must have same definition of custom attribute. + + Handler does not handle changes of hierarchical parents. So if entity does + not have explicitly set value of hierarchical attribute and any parent + would change it the change would not be propagated. + + The handler also push the value to task entity on task creation + and movement. To push values between hierarchical & non-hierarchical + add 'Task' to entity types in settings. + + Todos: + Task attribute values push on create/move should be possible to + enabled by settings. + """ + # Ignore event handler by default cust_attrs_query = ( "select id, key, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" - " where key in ({}) and" - " (object_type_id in ({}) or is_hierarchical is true)" + " where key in ({})" ) _cached_task_object_id = None @@ -26,35 +43,35 @@ class PushFrameValuesToTaskEvent(BaseEvent): settings_key = "sync_hier_entity_attributes" - def session_user_id(self, session): - if self._cached_user_id is None: - user = session.query( - "User where username is \"{}\"".format(session.api_user) - ).one() - self._cached_user_id = user["id"] - return self._cached_user_id + def filter_entities_info( + self, event: ftrack_api.event.base.Event + ) -> dict[str, list[dict[str, Any]]]: + """Basic entities filter info we care about. - def launch(self, session, event): - filtered_entities_info = self.filter_entities_info(event) - if not filtered_entities_info: - return + This filtering is first of many filters. This does not query anything + from ftrack nor use settings. - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) + Args: + event (ftrack_api.event.base.Event): Ftrack event with update + information. + + Returns: + dict[str, list[dict[str, Any]]]: Filtered entity changes by + project id. + """ - def filter_entities_info(self, event): # Filter if event contain relevant data entities_info = event["data"].get("entities") if not entities_info: return - entities_info_by_project_id = {} + entities_info_by_project_id = collections.defaultdict(list) for entity_info in entities_info: - # Care only about tasks - if entity_info.get("entityType") != "task": + # Ignore removed entities + if entity_info.get("action") == "remove": continue - # Care only about changes of status + # Care only about information with changes of entities changes = entity_info.get("changes") if not changes: continue @@ -69,367 +86,287 @@ class PushFrameValuesToTaskEvent(BaseEvent): if project_id is None: continue - # Skip `Task` entity type if parent didn't change - if entity_info["entity_type"].lower() == "task": - if ( - "parent_id" not in changes - or changes["parent_id"]["new"] is None - ): - continue - - if project_id not in entities_info_by_project_id: - entities_info_by_project_id[project_id] = [] entities_info_by_project_id[project_id].append(entity_info) return entities_info_by_project_id - def process_by_project(self, session, event, project_id, entities_info): - project_name = self.get_project_name_from_event( + def _get_attrs_configurations(self, session, interest_attributes): + """Get custom attribute configurations by name. + + Args: + session (ftrack_api.Session): Ftrack sesson. + interest_attributes (list[str]): Names of custom attributes + that should be synchronized. + + Returns: + tuple[dict[str, list], list]: Attributes by object id and + hierarchical attributes. + """ + + attrs = session.query(self.cust_attrs_query.format( + self.join_query_keys(interest_attributes) + )).all() + + attrs_by_obj_id = collections.defaultdict(list) + hier_attrs = [] + for attr in attrs: + if attr["is_hierarchical"]: + hier_attrs.append(attr) + continue + obj_id = attr["object_type_id"] + attrs_by_obj_id[obj_id].append(attr) + return attrs_by_obj_id, hier_attrs + + def _get_handler_project_settings( + self, + session: ftrack_api.Session, + event: ftrack_api.event.base.Event, + project_id: str + ) -> tuple[set[str], set[str]]: + """Get handler settings based on the project. + + Args: + session (ftrack_api.Session): Ftrack session. + event (ftrack_api.event.base.Event): Ftrack event which triggered + the changes. + project_id (str): Project id where the current changes are handled. + + Returns: + tuple[set[str], set[str]]: Attribute names we care about and + entity types we care about. + """ + + project_name: str = self.get_project_name_from_event( session, event, project_id ) # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name + project_settings: dict[str, Any] = ( + self.get_project_settings_from_event(event, project_name) ) # Load status mapping from presets - event_settings = ( + event_settings: dict[str, Any] = ( project_settings ["ftrack"] ["events"] - ["sync_hier_entity_attributes"] + [self.settings_key] ) # Skip if event is not enabled if not event_settings["enabled"]: self.log.debug("Project \"{}\" has disabled {}".format( project_name, self.__class__.__name__ )) - return + return set(), set() - interest_attributes = event_settings["interest_attributes"] + interest_attributes: list[str] = event_settings["interest_attributes"] if not interest_attributes: self.log.info(( "Project \"{}\" does not have filled 'interest_attributes'," " skipping." )) - return - interest_entity_types = event_settings["interest_entity_types"] + + interest_entity_types: list[str] = ( + event_settings["interest_entity_types"]) if not interest_entity_types: self.log.info(( "Project \"{}\" does not have filled 'interest_entity_types'," " skipping." )) - return - interest_attributes = set(interest_attributes) - interest_entity_types = set(interest_entity_types) + # Unify possible issues from settings ('Asset Build' -> 'assetbuild') + interest_entity_types: set[str] = { + entity_type.replace(" ", "").lower() + for entity_type in interest_entity_types + } + return set(interest_attributes), interest_entity_types - # Separate value changes and task parent changes - _entities_info = [] - added_entities = [] - added_entity_ids = set() - task_parent_changes = [] + def _entities_filter_by_settings( + self, + entities_info: list[dict[str, Any]], + interest_attributes: set[str], + interest_entity_types: set[str] + ): + new_entities_info = [] for entity_info in entities_info: - if entity_info["entity_type"].lower() == "task": - task_parent_changes.append(entity_info) - elif entity_info.get("action") == "add": - added_entities.append(entity_info) - added_entity_ids.add(entity_info["entityId"]) - else: - _entities_info.append(entity_info) - entities_info = _entities_info + entity_type_low = entity_info["entity_type"].lower() - # Filter entities info with changes - interesting_data, changed_keys_by_object_id = self.filter_changes( - session, event, entities_info, interest_attributes - ) - self.interesting_data_for_added( - session, - added_entities, - interest_attributes, - interesting_data, - changed_keys_by_object_id - ) - if not interesting_data and not task_parent_changes: - return + changes = entity_info["changes"] + # SPECIAL CASE: Capture changes of task created/moved under + # interested entity type + if ( + entity_type_low == "task" + and "parent_id" in changes + ): + # Direct parent is always second item in 'parents' and 'Task' + # must have at least one parent + parent_info = entity_info["parents"][1] + parent_entity_type = ( + parent_info["entity_type"] + .replace(" ", "") + .lower() + ) + if parent_entity_type in interest_entity_types: + new_entities_info.append(entity_info) + continue - # Prepare object types - object_types = session.query("select id, name from ObjectType").all() - object_types_by_name = {} - for object_type in object_types: - name_low = object_type["name"].lower() - object_types_by_name[name_low] = object_type + # Skip if entity type is not enabled for attr value sync + if entity_type_low not in interest_entity_types: + continue - # NOTE it would be nice to check if `interesting_data` do not contain - # value changs of tasks that were created or moved - # - it is a complex way how to find out - if interesting_data: - self.process_attribute_changes( - session, - object_types_by_name, - interesting_data, - changed_keys_by_object_id, - interest_entity_types, - interest_attributes, - added_entity_ids - ) + valid_attr_change = entity_info.get("action") == "add" + for attr_key in interest_attributes: + if valid_attr_change: + break - if task_parent_changes: - self.process_task_parent_change( - session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes - ) + if attr_key not in changes: + continue - def process_task_parent_change( + if changes[attr_key]["new"] is not None: + valid_attr_change = True + + if not valid_attr_change: + continue + + new_entities_info.append(entity_info) + + return new_entities_info + + def propagate_attribute_changes( self, session, - object_types_by_name, - task_parent_changes, - interest_entity_types, - interest_attributes + interest_attributes, + entities_info, + attrs_by_obj_id, + hier_attrs, + real_values_by_entity_id, + hier_values_by_entity_id, ): - """Push custom attribute values if task parent has changed. + hier_attr_ids_by_key = { + attr["key"]: attr["id"] + for attr in hier_attrs + } + filtered_interest_attributes = { + attr_name + for attr_name in interest_attributes + if attr_name in hier_attr_ids_by_key + } + attrs_keys_by_obj_id = {} + for obj_id, attrs in attrs_by_obj_id.items(): + attrs_keys_by_obj_id[obj_id] = { + attr["key"]: attr["id"] + for attr in attrs + } - Parent is changed if task is created or if is moved under different - entity. We don't care about all task changes only about those that - have it's parent in interest types (from settings). + op_changes = [] + for entity_info in entities_info: + entity_id = entity_info["entityId"] + obj_id = entity_info["objectTypeId"] + # Skip attributes sync if does not have object specific custom + # attribute + if obj_id not in attrs_keys_by_obj_id: + continue + attr_keys = attrs_keys_by_obj_id[obj_id] + real_values = real_values_by_entity_id[entity_id] + hier_values = hier_values_by_entity_id[entity_id] - Tasks hierarchical value should be unset or set based on parents - real hierarchical value and non hierarchical custom attribute value - should be set to hierarchical value. - """ - - # Store task ids which were created or moved under parent with entity - # type defined in settings (interest_entity_types). - task_ids = set() - # Store parent ids of matching task ids - matching_parent_ids = set() - # Store all entity ids of all entities to be able query hierarchical - # values. - whole_hierarchy_ids = set() - # Store parent id of each entity id - parent_id_by_entity_id = {} - for entity_info in task_parent_changes: - # Ignore entities with less parents than 2 - # NOTE entity itself is also part of "parents" value - parents = entity_info.get("parents") or [] - if len(parents) < 2: + changes = copy.deepcopy(entity_info["changes"]) + obj_id_attr_keys = { + attr_key + for attr_key in filtered_interest_attributes + if attr_key in attr_keys + } + if not obj_id_attr_keys: continue - parent_info = parents[1] - # Check if parent has entity type we care about. - if parent_info["entity_type"] not in interest_entity_types: - continue + value_by_key = {} + is_new_entity = entity_info.get("action") == "add" + for attr_key in obj_id_attr_keys: + if ( + attr_key in changes + and changes[attr_key]["new"] is not None + ): + value_by_key[attr_key] = changes[attr_key]["new"] - task_ids.add(entity_info["entityId"]) - matching_parent_ids.add(parent_info["entityId"]) - - # Store whole hierarchi of task entity - prev_id = None - for item in parents: - item_id = item["entityId"] - whole_hierarchy_ids.add(item_id) - - if prev_id is None: - prev_id = item_id + if not is_new_entity: continue - parent_id_by_entity_id[prev_id] = item_id - if item["entityType"] == "show": - break - prev_id = item_id + hier_attr_id = hier_attr_ids_by_key[attr_key] + attr_id = attr_keys[attr_key] + if hier_attr_id in real_values or attr_id in real_values: + continue - # Just skip if nothing is interesting for our settings - if not matching_parent_ids: - return + value_by_key[attr_key] = hier_values[hier_attr_id] - # Query object type ids of parent ids for custom attribute - # definitions query - entities = session.query( - "select object_type_id from TypedContext where id in ({})".format( - self.join_query_keys(matching_parent_ids) - ) - ) + for key, new_value in value_by_key.items(): + if new_value is None: + continue - # Prepare task object id - task_object_id = object_types_by_name["task"]["id"] + hier_id = hier_attr_ids_by_key[key] + std_id = attr_keys[key] + real_hier_value = real_values.get(hier_id) + real_std_value = real_values.get(std_id) + hier_value = hier_values[hier_id] + # Get right type of value for conversion + # - values in event are strings + type_value = real_hier_value + if type_value is None: + type_value = real_std_value + if type_value is None: + type_value = hier_value + # Skip if current values are not set + if type_value is None: + continue - # All object ids for which we're querying custom attribute definitions - object_type_ids = set() - object_type_ids.add(task_object_id) - for entity in entities: - object_type_ids.add(entity["object_type_id"]) + try: + new_value = type(type_value)(new_value) + except Exception: + self.log.warning(( + "Couldn't convert from {} to {}." + " Skipping update values." + ).format(type(new_value), type(type_value))) + continue - attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, object_type_ids, interest_attributes - ) + real_std_value_is_same = new_value == real_std_value + real_hier_value_is_same = new_value == real_hier_value + # New value does not match anything in current entity values + if ( + not is_new_entity + and not real_std_value_is_same + and not real_hier_value_is_same + ): + continue - # Skip if all task attributes are not available - task_attrs = attrs_by_obj_id.get(task_object_id) - if not task_attrs: - return + if not real_std_value_is_same: + op_changes.append(( + std_id, + entity_id, + new_value, + real_values.get(std_id), + std_id in real_values + )) - # Skip attributes that is not in both hierarchical and nonhierarchical - # TODO be able to push values if hierarchical is available - for key in interest_attributes: - if key not in hier_attrs: - task_attrs.pop(key, None) + if not real_hier_value_is_same: + op_changes.append(( + hier_id, + entity_id, + new_value, + real_values.get(hier_id), + hier_id in real_values + )) - elif key not in task_attrs: - hier_attrs.pop(key) + for change in op_changes: + ( + attr_id, + entity_id, + new_value, + old_value, + do_update + ) = change - # Skip if nothing remained - if not task_attrs: - return - - # Do some preparations for custom attribute values query - attr_key_by_id = {} - nonhier_id_by_key = {} - hier_attr_ids = [] - for key, attr_id in hier_attrs.items(): - attr_key_by_id[attr_id] = key - hier_attr_ids.append(attr_id) - - conf_ids = list(hier_attr_ids) - task_conf_ids = [] - for key, attr_id in task_attrs.items(): - attr_key_by_id[attr_id] = key - nonhier_id_by_key[key] = attr_id - conf_ids.append(attr_id) - task_conf_ids.append(attr_id) - - # Query custom attribute values - # - result does not contain values for all entities only result of - # query callback to ftrack server - result = query_custom_attributes( - session, list(hier_attr_ids), whole_hierarchy_ids, True - ) - result.extend( - query_custom_attributes( - session, task_conf_ids, whole_hierarchy_ids, False - ) - ) - - # Prepare variables where result will be stored - # - hierachical values should not contain attribute with value by - # default - hier_values_by_entity_id = { - entity_id: {} - for entity_id in whole_hierarchy_ids - } - # - real values of custom attributes - values_by_entity_id = { - entity_id: { - attr_id: None - for attr_id in conf_ids - } - for entity_id in whole_hierarchy_ids - } - for item in result: - attr_id = item["configuration_id"] - entity_id = item["entity_id"] - value = item["value"] - - values_by_entity_id[entity_id][attr_id] = value - - if attr_id in hier_attr_ids and value is not None: - hier_values_by_entity_id[entity_id][attr_id] = value - - # Prepare values for all task entities - # - going through all parents and storing first value value - # - store None to those that are already known that do not have set - # value at all - for task_id in tuple(task_ids): - for attr_id in hier_attr_ids: - entity_ids = [] - value = None - entity_id = task_id - while value is None: - entity_value = hier_values_by_entity_id[entity_id] - if attr_id in entity_value: - value = entity_value[attr_id] - if value is None: - break - - if value is None: - entity_ids.append(entity_id) - - entity_id = parent_id_by_entity_id.get(entity_id) - if entity_id is None: - break - - for entity_id in entity_ids: - hier_values_by_entity_id[entity_id][attr_id] = value - - # Prepare changes to commit - changes = [] - for task_id in tuple(task_ids): - parent_id = parent_id_by_entity_id[task_id] - for attr_id in hier_attr_ids: - attr_key = attr_key_by_id[attr_id] - nonhier_id = nonhier_id_by_key[attr_key] - - # Real value of hierarchical attribute on parent - # - If is none then should be unset - real_parent_value = values_by_entity_id[parent_id][attr_id] - # Current hierarchical value of a task - # - Will be compared to real parent value - hier_value = hier_values_by_entity_id[task_id][attr_id] - - # Parent value that can be inherited from it's parent entity - parent_value = hier_values_by_entity_id[parent_id][attr_id] - # Task value of nonhierarchical custom attribute - nonhier_value = values_by_entity_id[task_id][nonhier_id] - - if real_parent_value != hier_value: - changes.append({ - "new_value": real_parent_value, - "attr_id": attr_id, - "entity_id": task_id, - "attr_key": attr_key - }) - - if parent_value != nonhier_value: - changes.append({ - "new_value": parent_value, - "attr_id": nonhier_id, - "entity_id": task_id, - "attr_key": attr_key - }) - - self._commit_changes(session, changes) - - def _commit_changes(self, session, changes): - uncommited_changes = False - for idx, item in enumerate(changes): - new_value = item["new_value"] - old_value = item["old_value"] - attr_id = item["attr_id"] - entity_id = item["entity_id"] - attr_key = item["attr_key"] - - entity_key = collections.OrderedDict(( + entity_key = collections.OrderedDict([ ("configuration_id", attr_id), ("entity_id", entity_id) - )) - self._cached_changes.append({ - "attr_key": attr_key, - "entity_id": entity_id, - "value": new_value, - "time": datetime.datetime.now() - }) - old_value_is_set = ( - old_value is not ftrack_api.symbol.NOT_SET - and old_value is not None - ) - if new_value is None: - if not old_value_is_set: - continue - op = ftrack_api.operation.DeleteEntityOperation( - "CustomAttributeValue", - entity_key - ) - - elif old_value_is_set: + ]) + if do_update: op = ftrack_api.operation.UpdateEntityOperation( "CustomAttributeValue", entity_key, @@ -446,449 +383,116 @@ class PushFrameValuesToTaskEvent(BaseEvent): ) session.recorded_operations.push(op) - self.log.info(( - "Changing Custom Attribute \"{}\" to value" - " \"{}\" on entity: {}" - ).format(attr_key, new_value, entity_id)) - - if (idx + 1) % 20 == 0: - uncommited_changes = False - try: - session.commit() - except Exception: - session.rollback() - self.log.warning( - "Changing of values failed.", exc_info=True - ) - else: - uncommited_changes = True - if uncommited_changes: - try: + if len(session.recorded_operations) > 100: session.commit() - except Exception: - session.rollback() - self.log.warning("Changing of values failed.", exc_info=True) + session.commit() - def process_attribute_changes( + def process_by_project( self, - session, - object_types_by_name, - interesting_data, - changed_keys_by_object_id, - interest_entity_types, - interest_attributes, - added_entity_ids + session: ftrack_api.Session, + event: ftrack_api.event.base.Event, + project_id: str, + entities_info: list[dict[str, Any]] ): - # Prepare task object id - task_object_id = object_types_by_name["task"]["id"] + """Proces changes in single project. - # Collect object type ids based on settings - interest_object_ids = [] - for entity_type in interest_entity_types: - _entity_type = entity_type.lower() - object_type = object_types_by_name.get(_entity_type) - if not object_type: - self.log.warning("Couldn't find object type \"{}\"".format( - entity_type - )) + Args: + session (ftrack_api.Session): Ftrack session. + event (ftrack_api.event.base.Event): Event which has all changes + information. + project_id (str): Project id related to changes. + entities_info (list[dict[str, Any]]): Changes of entities. + """ - interest_object_ids.append(object_type["id"]) - - # Query entities by filtered data and object ids - entities = self.get_entities( - session, interesting_data, interest_object_ids - ) - if not entities: + ( + interest_attributes, + interest_entity_types + ) = self._get_handler_project_settings(session, event, project_id) + if not interest_attributes or not interest_entity_types: return - # Pop not found entities from interesting data - entity_ids = set( - entity["id"] - for entity in entities + entities_info: list[dict[str, Any]] = ( + self._entities_filter_by_settings( + entities_info, + interest_attributes, + interest_entity_types + ) ) - for entity_id in tuple(interesting_data.keys()): - if entity_id not in entity_ids: - interesting_data.pop(entity_id) - - # Add task object type to list - attr_obj_ids = list(interest_object_ids) - attr_obj_ids.append(task_object_id) - - attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, attr_obj_ids, interest_attributes - ) - - task_attrs = attrs_by_obj_id.get(task_object_id) - - changed_keys = set() - # Skip keys that are not both in hierachical and type specific - for object_id, keys in changed_keys_by_object_id.items(): - changed_keys |= set(keys) - object_id_attrs = attrs_by_obj_id.get(object_id) - for key in keys: - if key not in hier_attrs: - attrs_by_obj_id[object_id].pop(key) - continue - - if ( - (not object_id_attrs or key not in object_id_attrs) - and (not task_attrs or key not in task_attrs) - ): - hier_attrs.pop(key) - - # Clean up empty values - for key, value in tuple(attrs_by_obj_id.items()): - if not value: - attrs_by_obj_id.pop(key) - - if not attrs_by_obj_id: - self.log.warning(( - "There is not created Custom Attributes {} " - " for entity types: {}" - ).format( - self.join_query_keys(interest_attributes), - self.join_query_keys(interest_entity_types) - )) + if not entities_info: return - # Prepare task entities - task_entities = [] - # If task entity does not contain changed attribute then skip - if task_attrs: - task_entities = self.get_task_entities(session, interesting_data) - - task_entity_ids = set() - parent_id_by_task_id = {} - for task_entity in task_entities: - task_id = task_entity["id"] - task_entity_ids.add(task_id) - parent_id_by_task_id[task_id] = task_entity["parent_id"] - - self.finalize_attribute_changes( - session, - interesting_data, - changed_keys, - attrs_by_obj_id, - hier_attrs, - task_entity_ids, - parent_id_by_task_id, - added_entity_ids - ) - - def finalize_attribute_changes( - self, - session, - interesting_data, - changed_keys, - attrs_by_obj_id, - hier_attrs, - task_entity_ids, - parent_id_by_task_id, - added_entity_ids - ): - attr_id_to_key = {} - for attr_confs in attrs_by_obj_id.values(): - for key in changed_keys: - custom_attr_id = attr_confs.get(key) - if custom_attr_id: - attr_id_to_key[custom_attr_id] = key - - for key in changed_keys: - custom_attr_id = hier_attrs.get(key) - if custom_attr_id: - attr_id_to_key[custom_attr_id] = key - - entity_ids = ( - set(interesting_data.keys()) | task_entity_ids - ) - attr_ids = set(attr_id_to_key.keys()) - - current_values_by_id = self.get_current_values( - session, - attr_ids, - entity_ids, - task_entity_ids, - hier_attrs - ) - - changes = [] - for entity_id, current_values in current_values_by_id.items(): - parent_id = parent_id_by_task_id.get(entity_id) - if not parent_id: - parent_id = entity_id - values = interesting_data[parent_id] - - added_entity = entity_id in added_entity_ids - for attr_id, old_value in current_values.items(): - if added_entity and attr_id in hier_attrs: - continue - - attr_key = attr_id_to_key.get(attr_id) - if not attr_key: - continue - - # Convert new value from string - new_value = values.get(attr_key) - new_value_is_valid = ( - old_value is not ftrack_api.symbol.NOT_SET - and new_value is not None - ) - if added_entity and not new_value_is_valid: - continue - - if new_value is not None and new_value_is_valid: - try: - new_value = type(old_value)(new_value) - except Exception: - self.log.warning(( - "Couldn't convert from {} to {}." - " Skipping update values." - ).format(type(new_value), type(old_value))) - if new_value == old_value: - continue - - changes.append({ - "new_value": new_value, - "attr_id": attr_id, - "old_value": old_value, - "entity_id": entity_id, - "attr_key": attr_key - }) - self._commit_changes(session, changes) - - def filter_changes( - self, session, event, entities_info, interest_attributes - ): - session_user_id = self.session_user_id(session) - user_data = event["data"].get("user") - changed_by_session = False - if user_data and user_data.get("userid") == session_user_id: - changed_by_session = True - - current_time = datetime.datetime.now() - - interesting_data = {} - changed_keys_by_object_id = {} - - for entity_info in entities_info: - # Care only about changes if specific keys - entity_changes = {} - changes = entity_info["changes"] - for key in interest_attributes: - if key in changes: - entity_changes[key] = changes[key]["new"] - - entity_id = entity_info["entityId"] - if changed_by_session: - for key, new_value in tuple(entity_changes.items()): - for cached in tuple(self._cached_changes): - if ( - cached["entity_id"] != entity_id - or cached["attr_key"] != key - ): - continue - - cached_value = cached["value"] - try: - new_value = type(cached_value)(new_value) - except Exception: - pass - - if cached_value == new_value: - self._cached_changes.remove(cached) - entity_changes.pop(key) - break - - delta = (current_time - cached["time"]).seconds - if delta > self._max_delta: - self._cached_changes.remove(cached) - - if not entity_changes: - continue - - entity_id = entity_info["entityId"] - object_id = entity_info["objectTypeId"] - interesting_data[entity_id] = entity_changes - if object_id not in changed_keys_by_object_id: - changed_keys_by_object_id[object_id] = set() - changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) - - return interesting_data, changed_keys_by_object_id - - def interesting_data_for_added( - self, - session, - added_entities, - interest_attributes, - interesting_data, - changed_keys_by_object_id - ): - if not added_entities or not interest_attributes: - return - - object_type_ids = set() - entity_ids = set() - all_entity_ids = set() - object_id_by_entity_id = {} - project_id = None - entity_ids_by_parent_id = collections.defaultdict(set) - for entity_info in added_entities: - object_id = entity_info["objectTypeId"] - entity_id = entity_info["entityId"] - object_type_ids.add(object_id) - entity_ids.add(entity_id) - object_id_by_entity_id[entity_id] = object_id - - for item in entity_info["parents"]: - entity_id = item["entityId"] - all_entity_ids.add(entity_id) - parent_id = item["parentId"] - if not parent_id: - project_id = entity_id - else: - entity_ids_by_parent_id[parent_id].add(entity_id) - - hier_attrs = self.get_hierarchical_configurations( + attrs_by_obj_id, hier_attrs = self._get_attrs_configurations( session, interest_attributes ) - if not hier_attrs: + # Skip if attributes are not available + # - there is nothing to sync + if not attrs_by_obj_id or not hier_attrs: return - hier_attrs_key_by_id = { - attr_conf["id"]: attr_conf["key"] - for attr_conf in hier_attrs - } - default_values_by_key = { - attr_conf["key"]: attr_conf["default"] - for attr_conf in hier_attrs - } + entity_ids_by_parent_id = collections.defaultdict(set) + all_entity_ids = set() + for entity_info in entities_info: + entity_id = None + for item in entity_info["parents"]: + item_id = item["entityId"] + all_entity_ids.add(item_id) + if entity_id is not None: + entity_ids_by_parent_id[item_id].add(entity_id) + entity_id = item_id - values = query_custom_attributes( - session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + attr_ids = {attr["id"] for attr in hier_attrs} + for attrs in attrs_by_obj_id.values(): + attr_ids |= {attr["id"] for attr in attrs} + + # Query real custom attribute values + # - we have to know what are the real values, if are set and to what + # value + value_items = query_custom_attributes( + session, attr_ids, all_entity_ids, True ) - values_per_entity_id = {} - for entity_id in all_entity_ids: - values_per_entity_id[entity_id] = {} - for attr_name in interest_attributes: - values_per_entity_id[entity_id][attr_name] = None - - for item in values: - entity_id = item["entity_id"] - key = hier_attrs_key_by_id[item["configuration_id"]] - values_per_entity_id[entity_id][key] = item["value"] - - fill_queue = collections.deque() - fill_queue.append((project_id, default_values_by_key)) - while fill_queue: - item = fill_queue.popleft() - entity_id, values_by_key = item - entity_values = values_per_entity_id[entity_id] - new_values_by_key = copy.deepcopy(values_by_key) - for key, value in values_by_key.items(): - current_value = entity_values[key] - if current_value is None: - entity_values[key] = value - else: - new_values_by_key[key] = current_value - - for child_id in entity_ids_by_parent_id[entity_id]: - fill_queue.append((child_id, new_values_by_key)) - - for entity_id in entity_ids: - entity_changes = {} - for key, value in values_per_entity_id[entity_id].items(): - if value is not None: - entity_changes[key] = value - - if not entity_changes: - continue - - interesting_data[entity_id] = entity_changes - object_id = object_id_by_entity_id[entity_id] - if object_id not in changed_keys_by_object_id: - changed_keys_by_object_id[object_id] = set() - changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) - - def get_current_values( - self, - session, - attr_ids, - entity_ids, - task_entity_ids, - hier_attrs - ): - current_values_by_id = {} - if not attr_ids or not entity_ids: - return current_values_by_id - - for entity_id in entity_ids: - current_values_by_id[entity_id] = {} - for attr_id in attr_ids: - current_values_by_id[entity_id][attr_id] = ( - ftrack_api.symbol.NOT_SET - ) - - values = query_custom_attributes( - session, attr_ids, entity_ids, True - ) - - for item in values: + real_values_by_entity_id = collections.defaultdict(dict) + for item in value_items: entity_id = item["entity_id"] attr_id = item["configuration_id"] - if entity_id in task_entity_ids and attr_id in hier_attrs: - continue + real_values_by_entity_id[entity_id][attr_id] = item["value"] - if entity_id not in current_values_by_id: - current_values_by_id[entity_id] = {} - current_values_by_id[entity_id][attr_id] = item["value"] - return current_values_by_id + hier_values_by_entity_id = {} + default_values = { + attr["id"]: attr["default"] + for attr in hier_attrs + } + hier_queue = collections.deque() + hier_queue.append((default_values, [project_id])) + while hier_queue: + parent_values, entity_ids = hier_queue.popleft() + for entity_id in entity_ids: + entity_values = copy.deepcopy(parent_values) + real_values = real_values_by_entity_id[entity_id] + for attr_id, value in real_values.items(): + entity_values[attr_id] = value + hier_values_by_entity_id[entity_id] = entity_values + hier_queue.append( + (entity_values, entity_ids_by_parent_id[entity_id]) + ) - def get_entities(self, session, interesting_data, interest_object_ids): - return session.query(( - "select id from TypedContext" - " where id in ({}) and object_type_id in ({})" - ).format( - self.join_query_keys(interesting_data.keys()), - self.join_query_keys(interest_object_ids) - )).all() - - def get_task_entities(self, session, interesting_data): - return session.query( - "select id, parent_id from Task where parent_id in ({})".format( - self.join_query_keys(interesting_data.keys()) - ) - ).all() - - def attrs_configurations(self, session, object_ids, interest_attributes): - attrs = session.query(self.cust_attrs_query.format( - self.join_query_keys(interest_attributes), - self.join_query_keys(object_ids) - )).all() - - output = {} - hiearchical = {} - for attr in attrs: - if attr["is_hierarchical"]: - hiearchical[attr["key"]] = attr["id"] - continue - obj_id = attr["object_type_id"] - if obj_id not in output: - output[obj_id] = {} - output[obj_id][attr["key"]] = attr["id"] - return output, hiearchical - - def get_hierarchical_configurations(self, session, interest_attributes): - hier_attr_query = ( - "select id, key, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where key in ({}) and is_hierarchical is true" + self.propagate_attribute_changes( + session, + interest_attributes, + entities_info, + attrs_by_obj_id, + hier_attrs, + real_values_by_entity_id, + hier_values_by_entity_id, ) - if not interest_attributes: - return [] - return list(session.query(hier_attr_query.format( - self.join_query_keys(interest_attributes), - )).all()) + + def launch(self, session, event): + filtered_entities_info = self.filter_entities_info(event) + if not filtered_entities_info: + return + + for project_id, entities_info in filtered_entities_info.items(): + self.process_by_project(session, event, project_id, entities_info) def register(session): - PushFrameValuesToTaskEvent(session).register() + PushHierValuesToNonHierEvent(session).register() diff --git a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py index 6be14b3bdf..f8e56377bb 100644 --- a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py +++ b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py @@ -14,7 +14,10 @@ class IntegrateKitsuNote(pyblish.api.ContextPlugin): # status settings set_status_note = False note_status_shortname = "wfa" - status_conditions = list() + status_change_conditions = { + "status_conditions": [], + "family_requirements": [], + } # comment settings custom_comment_template = { @@ -55,7 +58,7 @@ class IntegrateKitsuNote(pyblish.api.ContextPlugin): continue kitsu_task = instance.data.get("kitsu_task") - if kitsu_task is None: + if not kitsu_task: continue # Get note status, by default uses the task status for the note @@ -65,13 +68,39 @@ class IntegrateKitsuNote(pyblish.api.ContextPlugin): # Check if any status condition is not met allow_status_change = True - for status_cond in self.status_conditions: + for status_cond in self.status_change_conditions[ + "status_conditions" + ]: condition = status_cond["condition"] == "equal" match = status_cond["short_name"].upper() == shortname if match and not condition or condition and not match: allow_status_change = False break + if allow_status_change: + # Get families + families = { + instance.data.get("family") + for instance in context + if instance.data.get("publish") + } + + # Check if any family requirement is met + for family_requirement in self.status_change_conditions[ + "family_requirements" + ]: + condition = family_requirement["condition"] == "equal" + + for family in families: + match = family_requirement["family"].lower() == family + if match and not condition or condition and not match: + allow_status_change = False + break + + if allow_status_change: + break + + # Set note status if self.set_status_note and allow_status_change: kitsu_status = gazu.task.get_task_status_by_short_name( self.note_status_shortname diff --git a/openpype/pipeline/publish/contants.py b/openpype/pipeline/publish/contants.py index 169eca2e5c..c5296afe9a 100644 --- a/openpype/pipeline/publish/contants.py +++ b/openpype/pipeline/publish/contants.py @@ -1,2 +1,3 @@ DEFAULT_PUBLISH_TEMPLATE = "publish" DEFAULT_HERO_PUBLISH_TEMPLATE = "hero" +TRANSIENT_DIR_TEMPLATE = "transient" diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index 0e329a7351..89c229f39b 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -20,13 +20,15 @@ from openpype.settings import ( get_system_settings, ) from openpype.pipeline import ( - tempdir + tempdir, + Anatomy ) from openpype.pipeline.plugin_discover import DiscoverResult from .contants import ( DEFAULT_PUBLISH_TEMPLATE, DEFAULT_HERO_PUBLISH_TEMPLATE, + TRANSIENT_DIR_TEMPLATE ) @@ -692,6 +694,82 @@ def get_publish_repre_path(instance, repre, only_published=False): return None +def get_custom_staging_dir_info(project_name, host_name, family, task_name, + task_type, subset_name, + project_settings=None, + anatomy=None, log=None): + """Checks profiles if context should use special custom dir as staging. + + Args: + project_name (str) + host_name (str) + family (str) + task_name (str) + task_type (str) + subset_name (str) + project_settings(Dict[str, Any]): Prepared project settings. + anatomy (Dict[str, Any]) + log (Logger) (optional) + + Returns: + (tuple) + Raises: + ValueError - if misconfigured template should be used + """ + settings = project_settings or get_project_settings(project_name) + custom_staging_dir_profiles = (settings["global"] + ["tools"] + ["publish"] + ["custom_staging_dir_profiles"]) + if not custom_staging_dir_profiles: + return None, None + + if not log: + log = Logger.get_logger("get_custom_staging_dir_info") + + filtering_criteria = { + "hosts": host_name, + "families": family, + "task_names": task_name, + "task_types": task_type, + "subsets": subset_name + } + profile = filter_profiles(custom_staging_dir_profiles, + filtering_criteria, + logger=log) + + if not profile or not profile["active"]: + return None, None + + if not anatomy: + anatomy = Anatomy(project_name) + + template_name = profile["template_name"] or TRANSIENT_DIR_TEMPLATE + _validate_transient_template(project_name, template_name, anatomy) + + custom_staging_dir = anatomy.templates[template_name]["folder"] + is_persistent = profile["custom_staging_dir_persistent"] + + return custom_staging_dir, is_persistent + + +def _validate_transient_template(project_name, template_name, anatomy): + """Check that transient template is correctly configured. + + Raises: + ValueError - if misconfigured template + """ + if template_name not in anatomy.templates: + raise ValueError(("Anatomy of project \"{}\" does not have set" + " \"{}\" template key!" + ).format(project_name, template_name)) + + if "folder" not in anatomy.templates[template_name]: + raise ValueError(("There is not set \"folder\" template in \"{}\" anatomy" # noqa + " for project \"{}\"." + ).format(template_name, project_name)) + + def get_published_workfile_instance(context): """Find workfile instance in context""" for i in context: diff --git a/openpype/plugins/publish/cleanup.py b/openpype/plugins/publish/cleanup.py index ef312e391f..b90c88890d 100644 --- a/openpype/plugins/publish/cleanup.py +++ b/openpype/plugins/publish/cleanup.py @@ -93,6 +93,10 @@ class CleanUp(pyblish.api.InstancePlugin): self.log.info("No staging directory found: %s" % staging_dir) return + if instance.data.get("stagingDir_persistent"): + self.log.info("Staging dir: %s should be persistent" % staging_dir) + return + self.log.info("Removing staging directory {}".format(staging_dir)) shutil.rmtree(staging_dir) diff --git a/openpype/plugins/publish/cleanup_farm.py b/openpype/plugins/publish/cleanup_farm.py index b87d4698a2..8052f13734 100644 --- a/openpype/plugins/publish/cleanup_farm.py +++ b/openpype/plugins/publish/cleanup_farm.py @@ -37,7 +37,7 @@ class CleanUpFarm(pyblish.api.ContextPlugin): dirpaths_to_remove = set() for instance in context: staging_dir = instance.data.get("stagingDir") - if staging_dir: + if staging_dir and not instance.data.get("stagingDir_persistent"): dirpaths_to_remove.add(os.path.normpath(staging_dir)) if "representations" in instance.data: diff --git a/openpype/plugins/publish/collect_custom_staging_dir.py b/openpype/plugins/publish/collect_custom_staging_dir.py new file mode 100644 index 0000000000..72ab0fe34d --- /dev/null +++ b/openpype/plugins/publish/collect_custom_staging_dir.py @@ -0,0 +1,67 @@ +""" +Requires: + anatomy + + +Provides: + instance.data -> stagingDir (folder path) + -> stagingDir_persistent (bool) +""" +import copy +import os.path + +import pyblish.api + +from openpype.pipeline.publish.lib import get_custom_staging_dir_info + + +class CollectCustomStagingDir(pyblish.api.InstancePlugin): + """Looks through profiles if stagingDir should be persistent and in special + location. + + Transient staging dir could be useful in specific use cases where is + desirable to have temporary renders in specific, persistent folders, could + be on disks optimized for speed for example. + + It is studio responsibility to clean up obsolete folders with data. + + Location of the folder is configured in `project_anatomy/templates/others`. + ('transient' key is expected, with 'folder' key) + + Which family/task type/subset is applicable is configured in: + `project_settings/global/tools/publish/custom_staging_dir_profiles` + + """ + label = "Collect Custom Staging Directory" + order = pyblish.api.CollectorOrder + 0.4990 + + template_key = "transient" + + def process(self, instance): + family = instance.data["family"] + subset_name = instance.data["subset"] + host_name = instance.context.data["hostName"] + project_name = instance.context.data["projectName"] + + anatomy = instance.context.data["anatomy"] + anatomy_data = copy.deepcopy(instance.data["anatomyData"]) + task = anatomy_data.get("task", {}) + + transient_tml, is_persistent = get_custom_staging_dir_info( + project_name, host_name, family, task.get("name"), + task.get("type"), subset_name, anatomy=anatomy, log=self.log) + result_str = "Not adding" + if transient_tml: + anatomy_data["root"] = anatomy.roots + scene_name = instance.context.data.get("currentFile") + if scene_name: + anatomy_data["scene_name"] = os.path.basename(scene_name) + transient_dir = transient_tml.format(**anatomy_data) + instance.data["stagingDir"] = transient_dir + + instance.data["stagingDir_persistent"] = is_persistent + result_str = "Adding '{}' as".format(transient_dir) + + self.log.info("{} custom staging dir for instance with '{}'".format( + result_str, family + )) diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index 44ba4a5025..95575444b2 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -252,6 +252,9 @@ class ExtractBurnin(publish.Extractor): # Add context data burnin_data. burnin_data["custom"] = custom_data + # Add data members. + burnin_data.update(instance.data.get("burninDataMembers", {})) + # Add source camera name to burnin data camera_name = repre.get("camera_name") if camera_name: diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index ef449f4f74..d0a4266941 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -4,8 +4,10 @@ import re import subprocess import platform import json -import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins +import tempfile +from string import Formatter +import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from openpype.lib import ( get_ffmpeg_tool_path, get_ffmpeg_codec_args, @@ -23,7 +25,7 @@ FFMPEG = ( ).format(ffmpeg_path) DRAWTEXT = ( - "drawtext=fontfile='%(font)s':text=\\'%(text)s\\':" + "drawtext@'%(label)s'=fontfile='%(font)s':text=\\'%(text)s\\':" "x=%(x)s:y=%(y)s:fontcolor=%(color)s@%(opacity).1f:fontsize=%(size)d" ) TIMECODE = ( @@ -39,6 +41,45 @@ TIMECODE_KEY = "{timecode}" SOURCE_TIMECODE_KEY = "{source_timecode}" +def convert_list_to_command(list_to_convert, fps, label=""): + """Convert a list of values to a drawtext command file for ffmpeg `sendcmd` + + The list of values is expected to have a value per frame. If the video + file ends up being longer than the amount of samples per frame than the + last value will be held. + + Args: + list_to_convert (list): List of values per frame. + fps (float or int): The expected frame per seconds of the output file. + label (str): Label for the drawtext, if specific drawtext filter is + required + + Returns: + str: Filepath to the temporary drawtext command file. + + """ + + with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: + for i, value in enumerate(list_to_convert): + seconds = i / fps + + # Escape special character + value = str(value).replace(":", "\\:") + + filter = "drawtext" + if label: + filter += "@" + label + + line = ( + "{start} {filter} reinit text='{value}';" + "\n".format(start=seconds, filter=filter, value=value) + ) + + f.write(line) + f.flush() + return f.name + + def _get_ffprobe_data(source): """Reimplemented from otio burnins to be able use full path to ffprobe :param str source: source media file @@ -144,7 +185,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): self.options_init.update(options_init) def add_text( - self, text, align, frame_start=None, frame_end=None, options=None + self, + text, + align, + frame_start=None, + frame_end=None, + options=None, + cmd="" ): """ Adding static text to a filter. @@ -165,7 +212,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if frame_end is not None: options["frame_end"] = frame_end - self._add_burnin(text, align, options, DRAWTEXT) + draw_text = DRAWTEXT + if cmd: + draw_text = "{}, {}".format(cmd, DRAWTEXT) + + options["label"] = align + + self._add_burnin(text, align, options, draw_text) def add_timecode( self, align, frame_start=None, frame_end=None, frame_start_tc=None, @@ -408,11 +461,13 @@ def burnins_from_data( True by default. Presets must be set separately. Should be dict with 2 keys: - - "options" - sets look of burnins - colors, opacity,...(more info: ModifiedBurnins doc) + - "options" - sets look of burnins - colors, opacity,... + (more info: ModifiedBurnins doc) - *OPTIONAL* default values are used when not included - "burnins" - contains dictionary with burnins settings - *OPTIONAL* burnins won't be added (easier is not to use this) - - each key of "burnins" represents Alignment, there are 6 possibilities: + - each key of "burnins" represents Alignment, + there are 6 possibilities: TOP_LEFT TOP_CENTERED TOP_RIGHT BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT - value must be string with text you want to burn-in @@ -491,13 +546,14 @@ def burnins_from_data( if source_timecode is not None: data[SOURCE_TIMECODE_KEY[1:-1]] = SOURCE_TIMECODE_KEY + clean_up_paths = [] for align_text, value in burnin_values.items(): if not value: continue - if isinstance(value, (dict, list, tuple)): + if isinstance(value, dict): raise TypeError(( - "Expected string or number type." + "Expected string, number or list type." " Got: {} - \"{}\"" " (Make sure you have new burnin presets)." ).format(str(type(value)), str(value))) @@ -533,8 +589,48 @@ def burnins_from_data( print("Source does not have set timecode value.") value = value.replace(SOURCE_TIMECODE_KEY, MISSING_KEY_VALUE) - key_pattern = re.compile(r"(\{.*?[^{0]*\})") + # Convert lists. + cmd = "" + text = None + keys = [i[1] for i in Formatter().parse(value) if i[1] is not None] + list_to_convert = [] + # Warn about nested dictionary support for lists. Ei. we dont support + # it. + if "[" in "".join(keys): + print( + "We dont support converting nested dictionaries to lists," + " so skipping {}".format(value) + ) + else: + for key in keys: + data_value = data[key] + + # Multiple lists are not supported. + if isinstance(data_value, list) and list_to_convert: + raise ValueError( + "Found multiple lists to convert, which is not " + "supported: {}".format(value) + ) + + if isinstance(data_value, list): + print("Found list to convert: {}".format(data_value)) + for v in data_value: + data[key] = v + list_to_convert.append(value.format(**data)) + + if list_to_convert: + value = list_to_convert[0] + path = convert_list_to_command( + list_to_convert, data["fps"], label=align + ) + cmd = "sendcmd=f='{}'".format(path) + cmd = cmd.replace("\\", "/") + cmd = cmd.replace(":", "\\:") + clean_up_paths.append(path) + + # Failsafe for missing keys. + key_pattern = re.compile(r"(\{.*?[^{0]*\})") missing_keys = [] for group in key_pattern.findall(value): try: @@ -568,7 +664,8 @@ def burnins_from_data( continue text = value.format(**data) - burnin.add_text(text, align, frame_start, frame_end) + + burnin.add_text(text, align, frame_start, frame_end, cmd=cmd) ffmpeg_args = [] if codec_data: @@ -599,6 +696,8 @@ def burnins_from_data( burnin.render( output_path, args=ffmpeg_args_str, overwrite=overwrite, **data ) + for path in clean_up_paths: + os.remove(path) if __name__ == "__main__": diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index 02c0e35377..e5e535bf19 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -58,12 +58,16 @@ "file": "{originalBasename}.{ext}", "path": "{@folder}/{@file}" }, + "transient": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/work/{family}/{subset}" + }, "__dynamic_keys_labels__": { "maya2unreal": "Maya to Unreal", "simpleUnrealTextureHero": "Simple Unreal Texture - Hero", "simpleUnrealTexture": "Simple Unreal Texture", "online": "online", - "source": "source" + "source": "source", + "transient": "transient" } } } diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 0cbd323299..fdd70f1a44 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -43,10 +43,7 @@ "use_published": true, "priority": 50, "chunk_size": 10, - "group": "none", - "deadline_pool": "", - "deadline_pool_secondary": "", - "framePerTask": 1 + "group": "none" }, "NukeSubmitDeadline": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index aad17d54da..30e56300d1 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -249,6 +249,29 @@ } } } + }, + { + "families": [], + "hosts": [ + "maya" + ], + "task_types": [], + "task_names": [], + "subsets": [], + "burnins": { + "maya_burnin": { + "TOP_LEFT": "{yy}-{mm}-{dd}", + "TOP_CENTERED": "{focalLength:.2f} mm", + "TOP_RIGHT": "{anatomy[version]}", + "BOTTOM_LEFT": "{username}", + "BOTTOM_CENTERED": "{asset}", + "BOTTOM_RIGHT": "{frame_start}-{current_frame}-{frame_end}", + "filter": { + "families": [], + "tags": [] + } + } + } } ] }, @@ -591,7 +614,8 @@ "task_names": [], "template_name": "simpleUnrealTextureHero" } - ] + ], + "custom_staging_dir_profiles": [] } }, "project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets\": {\"characters\": {}, \"locations\": {}}, \"shots\": {}}}", diff --git a/openpype/settings/defaults/project_settings/kitsu.json b/openpype/settings/defaults/project_settings/kitsu.json index 0638450595..59a36d8b97 100644 --- a/openpype/settings/defaults/project_settings/kitsu.json +++ b/openpype/settings/defaults/project_settings/kitsu.json @@ -8,7 +8,10 @@ "IntegrateKitsuNote": { "set_status_note": false, "note_status_shortname": "wfa", - "status_conditions": [], + "status_change_conditions": { + "status_conditions": [], + "family_requirements": [] + }, "custom_comment_template": { "enabled": false, "comment_template": "{comment}\n\n| | |\n|--|--|\n| version| `{version}` |\n| family | `{family}` |\n| name | `{name}` |" diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 2aa95fd1be..e914eb29f9 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -790,7 +790,7 @@ "ExtractPlayblast": { "capture_preset": { "Codec": { - "compression": "jpg", + "compression": "png", "format": "image", "quality": 95 }, @@ -817,7 +817,8 @@ }, "Generic": { "isolate_view": true, - "off_screen": true + "off_screen": true, + "pan_zoom": false }, "Renderer": { "rendererName": "vp2Renderer" diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json index 9173a8c3d5..1671748e97 100644 --- a/openpype/settings/defaults/project_settings/tvpaint.json +++ b/openpype/settings/defaults/project_settings/tvpaint.json @@ -42,6 +42,7 @@ "default_variants": [] }, "auto_detect_render": { + "enabled": false, "allow_group_rename": true, "group_name_template": "L{group_index}", "group_idx_offset": 10, diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json index 5fd9b926fb..eb3a88ce66 100644 --- a/openpype/settings/defaults/system_settings/applications.json +++ b/openpype/settings/defaults/system_settings/applications.json @@ -133,7 +133,7 @@ "linux": [] }, "arguments": { - "windows": [], + "windows": ["-U MAXScript {OPENPYPE_ROOT}\\openpype\\hosts\\max\\startup\\startup.ms"], "darwin": [], "linux": [] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index 9906939cc7..d8b5e4dc1f 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -239,27 +239,12 @@ { "type": "number", "key": "chunk_size", - "label": "Chunk Size" + "label": "Frame per Task" }, { "type": "text", "key": "group", "label": "Group Name" - }, - { - "type": "text", - "key": "deadline_pool", - "label": "Deadline pool" - }, - { - "type": "text", - "key": "deadline_pool_secondary", - "label": "Deadline pool (secondary)" - }, - { - "type": "number", - "key": "framePerTask", - "label": "Frame Per Task" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json b/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json index ee309f63a7..8aeed00542 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json @@ -54,30 +54,62 @@ "label": "Note shortname" }, { - "type": "list", - "key": "status_conditions", - "label": "Status conditions", - "object_type": { - "type": "dict", - "key": "conditions_dict", - "children": [ - { - "type": "enum", - "key": "condition", - "label": "Condition", - "enum_items": [ - {"equal": "Equal"}, - {"not_equal": "Not equal"} + "type": "dict", + "collapsible": true, + "key": "status_change_conditions", + "label": "Status change conditions", + "children": [ + { + "type": "list", + "key": "status_conditions", + "label": "Status conditions", + "object_type": { + "type": "dict", + "key": "condition_dict", + "children": [ + { + "type": "enum", + "key": "condition", + "label": "Condition", + "enum_items": [ + {"equal": "Equal"}, + {"not_equal": "Not equal"} + ] + }, + { + "type": "text", + "key": "short_name", + "label": "Short name" + } ] - }, - { - "type": "text", - "key": "short_name", - "label": "Short name" } - ] - }, - "label": "Status shortname" + }, + { + "type": "list", + "key": "family_requirements", + "label": "Family requirements", + "object_type": { + "type": "dict", + "key": "requirement_dict", + "children": [ + { + "type": "enum", + "key": "condition", + "label": "Condition", + "enum_items": [ + {"equal": "Equal"}, + {"not_equal": "Not equal"} + ] + }, + { + "type": "text", + "key": "family", + "label": "Family" + } + ] + } + } + ] }, { "type": "dict", diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json index 708b688ba5..1094595851 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json @@ -202,7 +202,13 @@ "key": "auto_detect_render", "label": "Auto-Detect Create Render", "is_group": true, + "checkbox_key": "enabled", "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { "type": "label", "label": "The creator tries to auto-detect Render Layers and Render Passes in scene. For Render Layers is used group name as a variant and for Render Passes is used TVPaint layer name.

Group names can be renamed by their used order in scene. The renaming template where can be used {group_index} formatting key which is filled by \"used position index of group\".
- Template: L{group_index}
- Group offset: 10
- Group padding: 3
Would create group names \"L010\", \"L020\", ..." diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json index 962008d476..85ec482e73 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json @@ -408,6 +408,71 @@ } ] } + }, + { + "type": "list", + "key": "custom_staging_dir_profiles", + "label": "Custom Staging Dir Profiles", + "use_label_wrap": true, + "docstring": "Profiles to specify special location and persistence for staging dir. Could be used in Creators and Publish phase!", + "object_type": { + "type": "dict", + "children": [ + { + "type": "boolean", + "key": "active", + "label": "Is active", + "default": true + }, + { + "type": "separator" + }, + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "key": "subsets", + "label": "Subset names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "custom_staging_dir_persistent", + "label": "Custom Staging Folder Persistent", + "type": "boolean", + "default": false + }, + { + "key": "template_name", + "label": "Template Name", + "type": "text", + "placeholder": "transient" + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json index 1f0e4eeffb..416e530db2 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json @@ -19,12 +19,12 @@ { "type": "text", "key": "compression", - "label": "Compression type" + "label": "Encoding" }, { "type": "text", "key": "format", - "label": "Data format" + "label": "Format" }, { "type": "number", @@ -91,6 +91,11 @@ "type": "boolean", "key": "off_screen", "label": " Off Screen" + }, + { + "type": "boolean", + "key": "pan_zoom", + "label": " 2D Pan/Zoom" } ] }, @@ -156,7 +161,7 @@ { "type": "boolean", "key": "override_viewport_options", - "label": "override_viewport_options" + "label": "Override Viewport Options" }, { "type": "enum", diff --git a/openpype/tools/project_manager/project_manager/view.py b/openpype/tools/project_manager/project_manager/view.py index b35491c5b2..2cf11b702d 100644 --- a/openpype/tools/project_manager/project_manager/view.py +++ b/openpype/tools/project_manager/project_manager/view.py @@ -72,8 +72,8 @@ class HierarchyView(QtWidgets.QTreeView): column_delegate_defs = { "name": NameDef(), "type": TypeDef(), - "frameStart": NumberDef(1), - "frameEnd": NumberDef(1), + "frameStart": NumberDef(0), + "frameEnd": NumberDef(0), "fps": NumberDef(1, decimals=3, step=1), "resolutionWidth": NumberDef(0), "resolutionHeight": NumberDef(0), diff --git a/openpype/tools/utils/lib.py b/openpype/tools/utils/lib.py index 8d38f03b8d..950c782727 100644 --- a/openpype/tools/utils/lib.py +++ b/openpype/tools/utils/lib.py @@ -862,11 +862,11 @@ class WrappedCallbackItem: return self._result def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ if self.done: self.log.warning("- item is already processed") diff --git a/openpype/version.py b/openpype/version.py index 5b6db12b5e..4d6ee5590e 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.15.3-nightly.3" +__version__ = "3.15.4-nightly.1" diff --git a/pyproject.toml b/pyproject.toml index 02370a4f10..42ce5aa32c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.15.2" # OpenPype +version = "3.15.3" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" diff --git a/website/docs/admin_hosts_maya.md b/website/docs/admin_hosts_maya.md index 23cacb4193..68b402c5e0 100644 --- a/website/docs/admin_hosts_maya.md +++ b/website/docs/admin_hosts_maya.md @@ -110,6 +110,41 @@ or Deadlines **Draft Tile Assembler**. This is useful to fix some specific renderer glitches and advanced hacking of Maya Scene files. `Patch name` is label for patch for easier orientation. `Patch regex` is regex used to find line in file, after `Patch line` string is inserted. Note that you need to add line ending. +### Extract Playblast Settings (review) +These settings provide granular control over how the playblasts or reviews are produced in Maya. + +Some of these settings are also available on the instance itself, in which case these settings will become the default value when creating the review instance. + +![Extract Playblast Settings](assets/maya-admin_extract_playblast_settings.png) + +- **Compression type** which file encoding to use. +- **Data format** what format is the file encoding. +- **Quality** lets you control the compression value for the output. Results can vary depending on the compression you selected. Quality values can range from 0 to 100, with a default value of 95. +- **Background Color** the viewports background color. +- **Background Bottom** the viewports background bottom color. +- **Background Top** the viewports background top color. +- **Override display options** override the viewports display options to use what is set in the settings. +- **Isolate view** isolates the view to what is in the review instance. If only a camera is present in the review instance, all nodes are displayed in view. +- **Off Screen** records the playblast hidden from the user. +- **2D Pan/Zoom** enables the 2D Pan/Zoom functionality of the camera. +- **Renderer name** which renderer to use for playblasting. +- **Width** width of the output resolution. If this value is `0`, the asset's width is used. +- **Height** height of the output resolution. If this value is `0`, the asset's height is used. + +#### Viewport Options + +Most settings to override in the viewport are self explanatory and can be found in Maya. + +![Extract Playblast Settings](assets/maya-admin_extract_playblast_settings_viewport_options.png) + +- **Override Viewport Options** enable to use the settings below for the viewport when publishing the review. + +#### Camera Options + +These options are set on the camera shape when publishing the review. They correspond to attributes on the Maya camera shape node. + +![Extract Playblast Settings](assets/maya-admin_extract_playblast_settings_camera_options.png) + ## Custom Menu You can add your custom tools menu into Maya by extending definitions in **Maya -> Scripts Menu Definition**. ![Custom menu definition](assets/maya-admin_scriptsmenu.png) diff --git a/website/docs/assets/integrate_kitsu_note_settings.png b/website/docs/assets/integrate_kitsu_note_settings.png index 127e79ab80..9c59f0bf5c 100644 Binary files a/website/docs/assets/integrate_kitsu_note_settings.png and b/website/docs/assets/integrate_kitsu_note_settings.png differ diff --git a/website/docs/assets/maya-admin_extract_playblast_settings.png b/website/docs/assets/maya-admin_extract_playblast_settings.png new file mode 100644 index 0000000000..9312e30321 Binary files /dev/null and b/website/docs/assets/maya-admin_extract_playblast_settings.png differ diff --git a/website/docs/assets/maya-admin_extract_playblast_settings_camera_options.png b/website/docs/assets/maya-admin_extract_playblast_settings_camera_options.png new file mode 100644 index 0000000000..1a5561328e Binary files /dev/null and b/website/docs/assets/maya-admin_extract_playblast_settings_camera_options.png differ diff --git a/website/docs/assets/maya-admin_extract_playblast_settings_viewport_options.png b/website/docs/assets/maya-admin_extract_playblast_settings_viewport_options.png new file mode 100644 index 0000000000..5b801775d3 Binary files /dev/null and b/website/docs/assets/maya-admin_extract_playblast_settings_viewport_options.png differ diff --git a/website/docs/module_kitsu.md b/website/docs/module_kitsu.md index 23898fba2e..05cff87fcc 100644 --- a/website/docs/module_kitsu.md +++ b/website/docs/module_kitsu.md @@ -43,10 +43,11 @@ Task status can be automatically set during publish thanks to `Integrate Kitsu N `Admin -> Studio Settings -> Project Settings -> Kitsu -> Integrate Kitsu Note`. -There are three settings available: -- `Set status on note` -> turns on and off this integrator. +There are four settings available: +- `Set status on note` -> Turns on and off this integrator. - `Note shortname` -> Which status shortname should be set automatically (Case sensitive). -- `Status conditions` -> Conditions that need to be met for kitsu status to be changed. You can add as many conditions as you like. There are two fields to each conditions: `Condition` (Whether current status should be equal or not equal to the condition status) and `Short name` (Kitsu Shortname of the condition status). +- `Status change conditions - Status conditions` -> Conditions that need to be met for kitsu status to be changed. You can add as many conditions as you like. There are two fields to each conditions: `Condition` (Whether current status should be equal or not equal to the condition status) and `Short name` (Kitsu Shortname of the condition status). +- `Status change conditions - Family requirements` -> With this option you can add requirements to which families must be pushed or not in order to have the task status set by this integrator. There are two fields for each requirements: `Condition` (Same as the above) and `Family` (name of the family concerned by this requirement). For instance, adding one item set to `Not equal` and `workfile`, would mean the task status would change if a subset from another family than workfile is published (workfile can still be included), but not if you publish the workfile subset only. ![Integrate Kitsu Note project settings](assets/integrate_kitsu_note_settings.png) diff --git a/website/docs/project_settings/assets/global_tools_custom_staging_dir.png b/website/docs/project_settings/assets/global_tools_custom_staging_dir.png new file mode 100644 index 0000000000..f7e8660a88 Binary files /dev/null and b/website/docs/project_settings/assets/global_tools_custom_staging_dir.png differ diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md index 6c1a269b1f..821585ae21 100644 --- a/website/docs/project_settings/settings_project_global.md +++ b/website/docs/project_settings/settings_project_global.md @@ -82,8 +82,8 @@ All context filters are lists which may contain strings or Regular expressions ( - **`tasks`** - Currently processed task. `["modeling", "animation"]` :::important Filtering -Filters are optional. In case when multiple profiles match current context, profile with higher number of matched filters has higher priority that profile without filters. -(Eg. order of when filter is added doesn't matter, only the precision of matching does.) +Filters are optional. In case when multiple profiles match current context, profile with higher number of matched filters has higher priority than profile without filters. +(The order the profiles in settings doesn't matter, only the precision of matching does.) ::: ## Publish plugins @@ -94,7 +94,7 @@ Publish plugins used across all integrations. ### Extract Review Plugin responsible for automatic FFmpeg conversion to variety of formats. -Extract review is using [profile filtering](#profile-filters) to be able render different outputs for different situations. +Extract review uses [profile filtering](#profile-filters) to render different outputs for different situations. Applicable context filters: **`hosts`** - Host from which publishing was triggered. `["maya", "nuke"]` @@ -104,7 +104,7 @@ Applicable context filters: **Output Definitions** -Profile may generate multiple outputs from a single input. Each output must define unique name and output extension (use the extension without a dot e.g. **mp4**). All other settings of output definition are optional. +A profile may generate multiple outputs from a single input. Each output must define unique name and output extension (use the extension without a dot e.g. **mp4**). All other settings of output definition are optional. ![global_extract_review_output_defs](assets/global_extract_review_output_defs.png) - **`Tags`** @@ -118,7 +118,7 @@ Profile may generate multiple outputs from a single input. Each output must defi - **Output arguments** other FFmpeg output arguments like codec definition. - **`Output width`** and **`Output height`** - - it is possible to rescale output to specified resolution and keep aspect ratio. + - It is possible to rescale output to specified resolution and keep aspect ratio. - If value is set to 0, source resolution will be used. - **`Overscan crop`** @@ -230,10 +230,10 @@ Applicable context filters: ## Tools Settings for OpenPype tools. -## Creator +### Creator Settings related to [Creator tool](artist_tools_creator). -### Subset name profiles +#### Subset name profiles ![global_tools_creator_subset_template](assets/global_tools_creator_subset_template.png) Subset name helps to identify published content. More specific name helps with organization and avoid mixing of published content. Subset name is defined using one of templates defined in **Subset name profiles settings**. The template is filled with context information at the time of creation. @@ -263,10 +263,31 @@ Template may look like `"{family}{Task}{Variant}"`. Some creators may have other keys as their context may require more information or more specific values. Make sure you've read documentation of host you're using. -## Workfiles +### Publish + +#### Custom Staging Directory Profiles +With this feature, users can specify a custom data folder path based on presets, which can be used during the creation and publishing stages. + +![global_tools_custom_staging_dir](assets/global_tools_custom_staging_dir.png) + +Staging directories are used as a destination for intermediate files (as renders) before they are renamed and copied to proper location during the integration phase. They could be created completely dynamically in the temp folder or for some DCCs in the `work` area. +Example could be Nuke where artist might want to temporarily render pictures into `work` area to check them before they get published with the choice of "Use existing frames" on the write node. + +One of the key advantages of this feature is that it allows users to choose the folder for writing such intermediate files to take advantage of faster storage for rendering, which can help improve workflow efficiency. Additionally, this feature allows users to keep their intermediate extracted data persistent, and use their own infrastructure for regular cleaning. + +In some cases, these DCCs (Nuke, Houdini, Maya) automatically add a rendering path during the creation stage, which is then used in publishing. Creators and extractors of such DCCs need to use these profiles to fill paths in DCC's nodes to use this functionality. + +The custom staging folder uses a path template configured in `project_anatomy/templates/others` with `transient` being a default example path that could be used. The template requires a 'folder' key for it to be usable as custom staging folder. + +##### Known issues +- Any DCC that uses prefilled paths and store them inside of workfile nodes needs to implement resolving these paths with a configured profiles. +- If studio uses Site Sync remote artists need to have access to configured custom staging folder! +- Each node on the rendering farm must have access to configured custom staging folder! + +### Workfiles All settings related to Workfile tool. -### Open last workfile at launch +#### Open last workfile at launch This feature allows you to define a rule for each task/host or toggle the feature globally to all tasks as they are visible in the picture. ![global_tools_workfile_open_last_version](assets/global_tools_workfile_open_last_version.png) diff --git a/website/docs/pype2/admin_presets_plugins.md b/website/docs/pype2/admin_presets_plugins.md index fcdc09439b..44c2a28dec 100644 --- a/website/docs/pype2/admin_presets_plugins.md +++ b/website/docs/pype2/admin_presets_plugins.md @@ -36,7 +36,7 @@ All context filters are lists which may contain strings or Regular expressions ( - **families** - Main family of processed instance. `["plate", "model"]` :::important Filtering -Filters are optional and may not be set. In case when multiple profiles match current context, profile with filters has higher priority that profile without filters. +Filters are optional and may not be set. In case when multiple profiles match current context, profile with filters has higher priority than profile without filters. ::: #### Profile outputs @@ -293,6 +293,7 @@ If source representation has suffix **"h264"** and burnin suffix is **"client"** - It is allowed to use [Anatomy templates](admin_config#anatomy) themselves in burnins if they can be filled with available data. - Additional keys in burnins: + | Burnin key | Description | | --- | --- | | frame_start | First frame number. | @@ -303,6 +304,7 @@ If source representation has suffix **"h264"** and burnin suffix is **"client"** | resolution_height | Resolution height. | | fps | Fps of an output. | | timecode | Timecode by frame start and fps. | + | focalLength | **Only available in Maya**

Camera focal length per frame. Use syntax `{focalLength:.2f}` for decimal truncating. Eg. `35.234985` with `{focalLength:.2f}` would produce `35.23`, whereas `{focalLength:.0f}` would produce `35`. | :::warning `timecode` is specific key that can be **only at the end of content**. (`"BOTTOM_RIGHT": "TC: {timecode}"`)