diff --git a/.github/workflows/miletone_release_trigger.yml b/.github/workflows/miletone_release_trigger.yml new file mode 100644 index 0000000000..b5b8aab1dc --- /dev/null +++ b/.github/workflows/miletone_release_trigger.yml @@ -0,0 +1,47 @@ +name: Milestone Release [trigger] + +on: + workflow_dispatch: + inputs: + milestone: + required: true + release-type: + type: choice + description: What release should be created + options: + - release + - pre-release + milestone: + types: closed + + +jobs: + milestone-title: + runs-on: ubuntu-latest + outputs: + milestone: ${{ steps.milestoneTitle.outputs.value }} + steps: + - name: Switch input milestone + uses: haya14busa/action-cond@v1 + id: milestoneTitle + with: + cond: ${{ inputs.milestone == '' }} + if_true: ${{ github.event.milestone.title }} + if_false: ${{ inputs.milestone }} + - name: Print resulted milestone + run: | + echo "${{ steps.milestoneTitle.outputs.value }}" + + call-ci-tools-milestone-release: + needs: milestone-title + uses: ynput/ci-tools/.github/workflows/milestone_release_ref.yml@main + with: + milestone: ${{ needs.milestone-title.outputs.milestone }} + repo-owner: ${{ github.event.repository.owner.login }} + repo-name: ${{ github.event.repository.name }} + version-py-path: "./openpype/version.py" + pyproject-path: "./pyproject.toml" + secrets: + token: ${{ secrets.YNPUT_BOT_TOKEN }} + user_email: ${{ secrets.CI_EMAIL }} + user_name: ${{ secrets.CI_USER }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 0b4c8af2c7..0000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,76 +0,0 @@ -name: Stable Release - -on: - release: - types: - - prereleased - -jobs: - create_release: - runs-on: ubuntu-latest - if: github.actor != 'pypebot' - - steps: - - name: 🚛 Checkout Code - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.9 - - name: Install Python requirements - run: pip install gitpython semver PyGithub - - - name: 💉 Inject new version into files - id: version - run: | - NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/}) - LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release) - - echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT - echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT - echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT - - - name: 💾 Commit and Tag - id: git_commit - if: steps.version.outputs.release_tag != 'skip' - run: | - git config user.email ${{ secrets.CI_EMAIL }} - git config user.name ${{ secrets.CI_USER }} - git add . - git commit -m "[Automated] Release" - tag_name="${{ steps.version.outputs.release_tag }}" - git tag -a $tag_name -m "stable release" - - - name: 🔏 Push to protected main branch - if: steps.version.outputs.release_tag != 'skip' - uses: CasperWA/push-protected@v2.10.0 - with: - token: ${{ secrets.YNPUT_BOT_TOKEN }} - branch: main - tags: true - unprotect_reviews: true - - - name: 🚀 Github Release - if: steps.version.outputs.release_tag != 'skip' - uses: ncipollo/release-action@v1 - with: - tag: ${{ steps.version.outputs.release_tag }} - token: ${{ secrets.YNPUT_BOT_TOKEN }} - - - name: ☠ Delete Pre-release - if: steps.version.outputs.release_tag != 'skip' - uses: cb80/delrel@latest - with: - tag: "${{ steps.version.outputs.current_version }}" - - - name: 🔁 Merge main back to develop - if: steps.version.outputs.release_tag != 'skip' - uses: everlytic/branch-merge@1.1.0 - with: - github_token: ${{ secrets.YNPUT_BOT_TOKEN }} - source_ref: 'main' - target_branch: 'develop' - commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}' diff --git a/CHANGELOG.md b/CHANGELOG.md index 0da167763b..c7ecbc83bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,1187 @@ # Changelog -## [3.15.0](https://github.com/ynput/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...HEAD) +## [3.15.1](https://github.com/ynput/OpenPype/tree/3.15.1) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.0...3.15.1) + +### **🆕 New features** + + + + +
+Maya: Xgen (3d / maya ) - #4256 + +___ + +#### Brief description + +Initial Xgen implementation. + + + +#### Description + +Client request of Xgen pipeline. + + + + +___ + +
+ + + +
+Data exchange cameras for 3d Studio Max (3d / 3dsmax ) - #4376 + +___ + +#### Brief description + +Add Camera Family into the 3d Studio Max + + + +#### Description + +Adding Camera Extractors(extract abc camera and extract fbx camera) and validators(for camera contents) into 3dMaxAlso add the extractor for exporting 3d max raw scene (which is also related to 3dMax Scene Family) for camera family + + + + +___ + +
+ + +### **🚀 Enhancements** + + + + +
+Adding path validator for non-maya nodes (3d / maya ) - #4271 + +___ + +#### Brief description + +Adding a path validator for filepaths from non-maya nodes, which are created by plugins such as Renderman, Yeti and abcImport. + + + +#### Description + +As File Path Editor cannot catch the wrong filenpaths from non-maya nodes such as AlembicNodes, It is neccessary to have a new validator to ensure the existence of the filepaths from the nodes. + + + + +___ + +
+ + + +
+Deadline: Allow disabling strict error check in Maya submissions (3d / maya / deadline ) - #4420 + +___ + +#### Brief description + +DL by default has Strict error checking, but some errors are not fatal. + + + +#### Description + +This allows to set profile based on Task and Subset values to temporarily disable Strict Error Checks.Subset and task names should support regular expressions. (not wildcard notation though). + + + + +___ + +
+ + + +
+Houdini: New publisher code tweak (3d / houdini ) - #4374 + +___ + +#### Brief description + +This is cosmetics only - the previous code to me felt quite unreadable due to the lengthy strings being used. + + + +#### Description + +Code should do roughly the same, but just be reformatted. + + + + +___ + +
+ + + +
+3dsmax: enhance alembic loader update function (3d / 3dsmax ) - #4387 + +___ + +## Enhancement + + + +This PR is adding update/switch ability to pointcache/alembic loader in 3dsmax and fixing wrong tool shown when clicking on "Manage" item on OpenPype menu, that is now correctly Scene Inventory (but was Subset Manager). + + + +Alembic update has still one caveat - it doesn't cope with changed number of object inside alembic, since loading alembic in max involves creating all those objects as first class nodes. So it will keep the objects in scene, just update path to alembic file on them. +___ + +
+ + + +
+Global: supporting `OPENPYPE_TMPDIR` in staging dir maker (editorial / hiero ) - #4398 + +___ + +#### Brief description + +Productions can use OPENPYPE_TMPDIR for staging temp publishing directory + + + +#### Description + +Studios were demanding to be able to configure their own shared storages as temporary staging directories. Template formatting is also supported with optional keys formatting and following anatomy keys: - root[work | ] - project[name | code] + + + + +___ + +
+ + + +
+General: Functions for current context (other ) - #4324 + +___ + +#### Brief description + +Defined more functions to receive current context information and added the methods to host integration so host can affect the result. + + + +#### Description + +This is one of steps to reduce usage of `legacy_io.Session`. This change define how to receive current context information -> call functions instead of accessing `legacy_io.Session` or `os.environ` directly. Plus, direct access on session or environments is unfortunatelly not enough for some DCCs where multiple workfiles can be opened at one time which can heavily affect the context but host integration sometimes can't affect that at all.`HostBase` already had implemented `get_current_context`, that was enhanced by adding more specific methods `get_current_project_name`, `get_current_asset_name` and `get_current_task_name`. The same functions were added to `~/openpype/pipeline/cotext_tools.py`. The functions in context tools are calling host integration methods (if are available) otherwise are using environent variables as default implementation does. Also was added `get_current_host_name` to receive host name from registered host if is available or from environment variable. + + + + +___ + +
+ + + +
+Houdini: Do not visualize the hidden OpenPypeContext node (other / houdini ) - #4382 + +___ + +#### Brief description + +Using the new publisher UI would generate a visible 'null' locator at the origin. It's confusing to the user since it's supposed to be 'hidden'. + + + +#### Description + +Before this PR the user would see a locator/null at the origin which was the 'hidden' `/obj/OpenPypeContext` node. This null would suddenly appear if the user would've ever opened the Publisher UI once.After this PR it will not show:Nice and tidy. + + + + +___ + +
+ + + +
+Maya + Blender: Pyblish plugins removed unused `version` and `category` attributes (other ) - #4402 + +___ + +#### Brief description + +Once upon a time in a land far far away there lived a few plug-ins who felt like they didn't belong in generic boxes and felt they needed to be versioned well above others. They tried, but with no success. + + + +#### Description + +Even though they now lived in a universe with elaborate `version` and `category` attributes embedded into their tiny little plug-in DNA this particular deviation has been greatly unused. There is nothing special about the version, nothing special about the category.It does nothing. + + + + +___ + +
+ + + +
+General: Fix original basename frame issues (other ) - #4452 + +___ + +#### Brief description + +Treat `{originalBasename}` in different way then standard files processing. In case template should use `{originalBasename}` the transfers will use them as they are without any changes or handling of frames. + + + +#### Description + +Frames handling is problematic with original basename because their padding can't be defined to match padding in source filenames. Also it limits the usage of functionality to "must have frame at end of fiename". This is proposal how that could be solved by simply ignoring frame handling and using filenames as are on representation. First frame is still stored to representation context but is not used in formatting part. This way we don't have to care about padding of frames at all. + + + + +___ + +
+ + + +
+Publisher: Report also crashed creators and convertors (other ) - #4473 + +___ + +#### Brief description + +Added crashes of creators and convertos discovery (lazy solution). + + + +#### Description + +Report in Publisher also contains information about crashed files caused during creator plugin discovery and convertor plugin discovery. They're not separated into categroies and there is no other information in the report about them, but this helps a lot during development. This change does not need to change format/schema of the report nor UI logic. + + + + +___ + +
+ + +### **🐛 Bug fixes** + + + + +
+Maya: Fix Validate Attributes plugin (3d / maya ) - #4401 + +___ + +#### Brief description + +Code was broken. So either plug-in was unused or it had gone unnoticed. + + + +#### Description + +Looking at the commit history of the plug-in itself it seems this might have been broken somewhere between two to three years. I think it's broken since two years since this commit.Should this plug-in be removed completely?@tokejepsen Is there still a use case where we should have this plug-in? (You created the original one) + + + + +___ + +
+ + + +
+Maya: Ignore workfile lock in Untitled scene (3d / maya ) - #4414 + +___ + +#### Brief description + +Skip workfile lock check if current scene is 'Untitled'. + + + + +___ + +
+ + + +
+Maya: fps rounding - OP-2549 (3d / maya ) - #4424 + +___ + +#### Brief description + +When FPS is registered in for example Ftrack and round either down or up (floor/ceil), comparing to Maya FPS can fail. Example:23.97 (Ftrack/Mongo) != 23.976023976023978 (Maya) + + + +#### Description + +Since Maya only has a select number of supported framerates, I've taken the approach of converting any fps to supported framerates in Maya. We validate the input fps to make sure they are supported in Maya in two ways:Whole Numbers - are validated straight against the supported framerates in Maya.Demical Numbers - we find the closest supported framerate in Maya. If the difference to the closest supported framerate, is more than 0.5 we'll throw an error.If Maya ever supports arbitrary framerates, then we might have a problem but I'm not holding my breath... + + + + +___ + +
+ + + +
+Strict Error Checking Default (3d / maya ) - #4457 + +___ + +#### Brief description + +Provide default of strict error checking for instances created prior to PR. + + + + +___ + +
+ + + +
+Create: Enhance instance & context changes (3d / houdini,after effects,3dsmax ) - #4375 + +___ + +#### Brief description + +Changes of instances and context have complex, hard to get structure. The structure did not change but instead of complex dictionaries are used objected data. + + + +#### Description + +This is poposal of changes data improvement for creators. Implemented `TrackChangesItem` which handles the changes for us. The item is creating changes based on old and new value and can provide information about changed keys or access to full old or new value. Can give the values on any "sub-dictionary".Used this new approach to fix change in houdini and 3ds max and also modified one aftereffects plugin using changes. + + + + +___ + +
+ + + +
+Houdini: hotfix condition (3d / houdini ) - #4391 + +___ + +## Hotfix + + + +This is fixing bug introduced int #4374 +___ + +
+ + + +
+Houdini: Houdini shelf tools fixes (3d / houdini ) - #4428 + +___ + +#### Brief description + +Fix Houdini shelf tools. + + + +#### Description + +Use `label` as mandatory key instead of `name`. Changed how shelves are created. If the script is empty it is gracefully skipping it instead of crashing. + + + + +___ + +
+ + + +
+3dsmax: startup fixes (3d / 3dsmax ) - #4412 + +___ + +#### Brief description + +This is fixing various issues that can occur on some of the 3dsmax versions. + + + +#### Description + +On displays with +4K resolution UI was broken, some 3dsmax versions couldn't process `PYTHONPATH` correctly. This PR is forcing `sys.path` and disabling `QT_AUTO_SCREEN_SCALE_FACTOR` + + + + +___ + +
+ + + +
+Fix features for gizmo menu (2d / nuke ) - #4280 + +___ + +#### Brief description + +Fix features for the Gizmo Menu project settings (shortcut for python type of usage and file type of usage functionality) + + + + +___ + +
+ + + +
+Photoshop: fix missing legacy io for legacy instances (2d / photoshop,after effects ) - #4467 + +___ + +#### Brief description + +`legacy_io` import was removed, but usage stayed. + + + +#### Description + +Usage of `legacy_io` should be eradicated, in creators it should be replaced by `self.create_context.get_current_project_name/asset_name/task_name`. + + + + +___ + +
+ + + +
+Fix - addSite loader handles hero version (other / sitesync ) - #4359 + +___ + +#### Brief description + +If adding site to representation presence of hero version is checked, if found hero version is marked to be donwloaded too.Replacing https://github.com/ynput/OpenPype/pull/4191 + + + + +___ + +
+ + + +
+Remove OIIO build for macos (other ) - #4381 + +___ + +## Fix + + + +Since we are not able to provide OpenImageIO tools binaries for macos, we should remove the item from th `pyproject.toml`. This PR is taking care of it. + + + +It is also changing the way `fetch_thirdparty_libs` script works in that it doesn't crash when lib cannot be processed, it only issue warning. + + + + + +Resolves #3858 +___ + +
+ + + +
+General: Attribute definitions fixes (other ) - #4392 + +___ + +#### Brief description + +Fix possible issues with attribute definitions in publisher if there is unknown attribute on an instance. + + + +#### Description + +Source of the issue is that attribute definitions from creator plugin could be "expanded" during `CreatedInstance` initialization. Which would affect all other instances using the same list of attributes -> literally object of list. If the same list object is used in "BaseClass" for other creators it would affect all instances (because of 1 instance). There had to be implemented other changes to fix the issue and keep behavior the same.Object of `CreatedInstance` can be created without reference to creator object. `CreatedInstance` is responsible to give UI attribute definitions (technically is prepared for cases when each instance may have different attribute definitions -> not yet).Attribute definition has added more conditions for `__eq__` method and have implemented `__ne__` method (which is required for Py 2 compatibility). Renamed `AbtractAttrDef` to `AbstractAttrDef` (fix typo). + + + + +___ + +
+ + + +
+Ftrack: Don't force ftrackapp endpoint (other / ftrack ) - #4411 + +___ + +#### Brief description + +Auto-fill of ftrack url don't break custom urls. Custom urls couldn't be used as `ftrackapp.com` is added if is not in the url. + + + +#### Description + +The code was changed in a way that auto-fill is still supported but before `ftrackapp` is added it will try to use url as is. If the connection works as is it is used. + + + + +___ + +
+ + + +
+Fix: DL on MacOS (other ) - #4418 + +___ + +#### Brief description + +This works if DL Openpype plugin Installation Directories is set to level of app bundle (eg. '/Applications/OpenPype 3.15.0.app') + + + + +___ + +
+ + + +
+Photoshop: make usage of layer name in subset name more controllable (other ) - #4432 + +___ + +#### Brief description + +Layer name was previously used in subset name only if multiple instances were being created in single step. This adds explicit toggle. + + + +#### Description + +Toggling this button allows to use layer name in created subset name even if single instance is being created.This follows more closely implementation if AE. + + + + +___ + +
+ + + +
+SiteSync: fix dirmap (other ) - #4436 + +___ + +#### Brief description + +Fixed issue in dirmap in Maya and Nuke + + + +#### Description + +Loads of error were thrown in Nuke console about dictionary value.`AttributeError: 'dict' object has no attribute 'lower'` + + + + +___ + +
+ + + +
+General: Ignore decode error of stdout/stderr in run_subprocess (other ) - #4446 + +___ + +#### Brief description + +Ignore decode errors and replace invalid character (byte) with escaped byte character. + + + +#### Description + +Calling of `run_subprocess` may cause crashes if output contains some unicode character which (for example Polish name of encoder handler). + + + + +___ + +
+ + + +
+Publisher: Fix reopen bug (other ) - #4463 + +___ + +#### Brief description + +Use right name of constant 'ActiveWindow' -> 'WindowActive'. + + + + +___ + +
+ + + +
+Publisher: Fix compatibility of QAction in Publisher (other ) - #4474 + +___ + +#### Brief description + +Fix `QAction` for older version of Qt bindings where QAction requires a parent on initialization. + + + +#### Description + +This bug was discovered in Nuke 11. Fixed by creating QAction when QMenu is already available and can be used as parent. + + + + +___ + +
+ + +### **🔀 Refactored code** + + + + +
+General: Remove 'openpype.api' (other ) - #4413 + +___ + +#### Brief description + +PR is removing `openpype/api.py` file which is causing a lot of troubles and cross-imports. + + + +#### Description + +I wanted to remove the file slowly function by function but it always reappear somewhere in codebase even if most of the functionality imported from there is triggering deprecation warnings. This is small change which may have huge impact.There shouldn't be anything in openpype codebase which is using `openpype.api` anymore so only possible issues are in customized repositories or custom addons. + + + + +___ + +
+ + +### **📃 Documentation** + + + + +
+docs-user-Getting Started adjustments (other ) - #4365 + +___ + +#### Brief description + +Small typo fixes here and there, additional info on install/ running OP. + + + + +___ + +
+ + +### **Merged pull requests** + + + + +
+Renderman support for sample and display filters (3d / maya ) - #4003 + +___ + +#### Brief description + +User can set up both sample and display filters in Openpype settings if they are using Renderman as renderer. + + + +#### Description + +You can preset which sample and display filters for renderman , including the cryptomatte renderpass, in Openpype settings. Once you select which filters to be included in openpype settings and then create render instance for your camera in maya, it would automatically tell the system to generate your selected filters in render settings.The place you can find for setting up the filters: _Maya > Render Settings > Renderman Renderer > Display Filters/ Sample Filters_ + + + + +___ + +
+ + + +
+Maya: Create Arnold options on repair. (3d / maya ) - #4448 + +___ + +#### Brief description + +When validating/repairing we previously required users to open render settings to create the Arnold options. This is done through code now. + + + + +___ + +
+ + + +
+Update Asset field of creator Instances in Maya Template Builder (3d / maya ) - #4470 + +___ + +#### Brief description + +When we build a template with Maya Template Builder, it will update the asset field of the sets (creator instances) that are imported from the template. + + + +#### Description + +When building a template, we also want to define the publishable content in advance: create an instance of a model, or look, etc., to speed up the workflow and reduce the number of questions we are asked. After building a work file from a saved template that contains pre-created instances, the template builder should update the asset field to the current asset. + + + + +___ + +
+ + + +
+Blender: fix import workfile all families (3d / blender ) - #4405 + +___ + +#### Brief description + +Having this feature related to workfile available for any family is absurd. + + + + +___ + +
+ + + +
+Nuke: update rendered frames in latest version (2d / nuke ) - #4362 + +___ + +#### Brief description + +Introduced new field to insert frame(s) to rerender only. + + + +#### Description + +Rendering is expensive, sometimes it is helpful only to re-render changed frames and reuse existing.Artists can in Publisher fill which frame(s) should be re-rendered.If there is already published version of currently publishing subset, all representation files are collected (currently for `render` family only) and then when Nuke is rendering (locally only for now), old published files are copied into into temporary render folder where will be rewritten only by frames explicitly set in new field.That way review/burnin process could also reuse old files and recreate reviews/burnins.New version is produced during this process! + + + + +___ + +
+ + + +
+Feature: Keep synced hero representations up-to-date. (other ) - #4343 + +___ + +#### Brief description + +Keep previously synchronized sites up-to-date by comparing old and new sites and adding old sites if missing in new ones.Fix #4331 + + + + +___ + +
+ + + +
+Maya: Fix template builder bug where assets are not put in the right hierarchy (other ) - #4367 + +___ + +#### Brief description + +When buiding scene from template, the assets loaded from the placeholders are not put in the hierarchy. Plus, the assets are loaded in double. + + + + +___ + +
+ + + +
+Bump ua-parser-js from 0.7.31 to 0.7.33 in /website (other ) - #4371 + +___ + +Bumps [ua-parser-js](https://github.com/faisalman/ua-parser-js) from 0.7.31 to 0.7.33. +
+Changelog +

Sourced from ua-parser-js's changelog.

+
+

Version 0.7.31 / 1.0.2

+
    +
  • Fix OPPO Reno A5 incorrect detection
  • +
  • Fix TypeError Bug
  • +
  • Use AST to extract regexes and verify them with safe-regex
  • +
+

Version 0.7.32 / 1.0.32

+
    +
  • Add new browser : DuckDuckGo, Huawei Browser, LinkedIn
  • +
  • Add new OS : HarmonyOS
  • +
  • Add some Huawei models
  • +
  • Add Sharp Aquos TV
  • +
  • Improve detection Xiaomi Mi CC9
  • +
  • Fix Sony Xperia 1 III misidentified as Acer tablet
  • +
  • Fix Detect Sony BRAVIA as SmartTV
  • +
  • Fix Detect Xiaomi Mi TV as SmartTV
  • +
  • Fix Detect Galaxy Tab S8 as tablet
  • +
  • Fix WeGame mistakenly identified as WeChat
  • +
  • Fix included commas in Safari / Mobile Safari version
  • +
  • Increase UA_MAX_LENGTH to 350
  • +
+

Version 0.7.33 / 1.0.33

+
    +
  • Add new browser : Cobalt
  • +
  • Identify Macintosh as an Apple device
  • +
  • Fix ReDoS vulnerability
  • +
+

Version 0.8

+

Version 0.8 was created by accident. This version is now deprecated and no longer maintained, please update to version 0.7 / 1.0.

+
+
+
+Commits + +
+
+ + +[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ua-parser-js&package-manager=npm_and_yarn&previous-version=0.7.31&new-version=0.7.33)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) + +Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. + +[//]: # (dependabot-automerge-start) +[//]: # (dependabot-automerge-end) + +--- + +
+Dependabot commands and options +
+ +You can trigger Dependabot actions by commenting on this PR: +- `@dependabot rebase` will rebase this PR +- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it +- `@dependabot merge` will merge this PR after your CI passes on it +- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it +- `@dependabot cancel merge` will cancel a previously requested merge and block automerging +- `@dependabot reopen` will reopen this PR if it is closed +- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually +- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) +- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language +- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language +- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language +- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language + +You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). + +
+___ + +
+ + + +
+Docs: Question about renaming in Kitsu (other ) - #4384 + +___ + +#### Brief description + +To keep memory of this discussion: https://discord.com/channels/517362899170230292/563751989075378201/1068112668491255818 + + + + +___ + +
+ + + +
+New Publisher: Fix Creator error typo (other ) - #4396 + +___ + +#### Brief description + +Fixes typo in error message. + + + + +___ + +
+ + + +
+Chore: pyproject.toml version because of Poetry (other ) - #4408 + +___ + +#### Brief description + +Automatization injects wrong format + + + + +___ + +
+ + + +
+Fix - remove minor part in toml (other ) - #4437 + +___ + +#### Brief description + +Causes issue in create_env and new Poetry + + + + +___ + +
+ + + +
+General: Add project code to anatomy (other ) - #4445 + +___ + +#### Brief description + +Added attribute `project_code` to `Anatomy` object. + + + +#### Description + +Anatomy already have access to almost all attributes from project anatomy except project code. This PR changing it. Technically `Anatomy` is everything what would be needed to get fill data of project. + +``` + +{ + + "project": { + + "name": anatomy.project_name, + + "code": anatomy.project_code + + } + +} + +``` + + +___ + +
+ + + +
+Maya: Arnold Scene Source overhaul - OP-4865 (other / maya ) - #4449 + +___ + +#### Brief description + +General overhaul of the Arnold Scene Source (ASS) workflow. + + + +#### Description + +This originally was to support static files (non-sequencial) ASS publishing, but digging deeper whole workflow needed an update to get ready for further issues. During this overhaul the following changes were made: + +- Generalized Arnold Standin workflow to a single loader. + +- Support multiple nodes as proxies. + +- Support proxies for `pointcache` family. + +- Generalized approach to proxies as resources, so they can be the same file format as the original.This workflow should allow further expansion to utilize operators and eventually USD. + + + + +___ + +
+ + + + +## [3.15.0](https://github.com/ynput/OpenPype/tree/3.15.0) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...3.15.0) **Deprecated:** diff --git a/README.md b/README.md index 485ae7f4ee..514ffb62c0 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,6 @@ OpenPype [![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2022-lightgrey?labelColor=303846) -this Introduction ------------ diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 10ded8b912..c20b0ec51b 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -6,8 +6,7 @@ from openpype.hosts.aftereffects import api from openpype.pipeline import ( Creator, CreatedInstance, - CreatorError, - legacy_io, + CreatorError ) from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances from openpype.lib import prepare_template_data @@ -127,7 +126,7 @@ class RenderCreator(Creator): subset_change = _changes.get("subset") if subset_change: api.get_stub().rename_item(created_inst.data["members"][0], - subset_change[1]) + subset_change.new_value) def remove_instances(self, instances): for instance in instances: @@ -195,7 +194,7 @@ class RenderCreator(Creator): instance_data.pop("uuid") if not instance_data.get("task"): - instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + instance_data["task"] = self.create_context.get_current_task_name() if not instance_data.get("creator_attributes"): is_old_farm = instance_data["family"] != "renderLocal" diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py index c698af896b..2e7b9d4a7e 100644 --- a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py +++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py @@ -2,8 +2,7 @@ import openpype.hosts.aftereffects.api as api from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, - CreatedInstance, - legacy_io, + CreatedInstance ) from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances @@ -38,10 +37,11 @@ class AEWorkfileCreator(AutoCreator): existing_instance = instance break - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - host_name = legacy_io.Session["AVALON_APP"] + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name if existing_instance is None: asset_doc = get_asset_by_name(project_name, asset_name) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index d5294d61c2..5082217db0 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor): # create staging dir path staging_dir = self.staging_dir(instance) + # append staging dir for later cleanup + instance.context.data["cleanupFullPaths"].append(staging_dir) + # add default preset type for thumbnail and reviewable video # update them with settings and override in case the same # are found in there @@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor): "Path `{}` is containing more that one clip".format(path) ) return clips[0] - - def staging_dir(self, instance): - """Provide a temporary directory in which to store extracted files - - Upon calling this method the staging directory is stored inside - the instance.data['stagingDir'] - """ - staging_dir = instance.data.get('stagingDir', None) - openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR") - - if not staging_dir: - if openpype_temp_dir and os.path.exists(openpype_temp_dir): - staging_dir = os.path.normpath( - tempfile.mkdtemp( - prefix="pyblish_tmp_", - dir=openpype_temp_dir - ) - ) - else: - staging_dir = os.path.normpath( - tempfile.mkdtemp(prefix="pyblish_tmp_") - ) - instance.data['stagingDir'] = staging_dir - - instance.context.data["cleanupFullPaths"].append(staging_dir) - - return staging_dir diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index a33e5cf289..88a3f0b49b 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -210,7 +210,8 @@ def switch_item(container, if any(not x for x in [asset_name, subset_name, representation_name]): repre_id = container["representation"] representation = get_representation_by_id(project_name, repre_id) - repre_parent_docs = get_representation_parents(representation) + repre_parent_docs = get_representation_parents( + project_name, representation) if repre_parent_docs: version, subset, asset, _ = repre_parent_docs else: diff --git a/openpype/hosts/fusion/hooks/pre_fusion_setup.py b/openpype/hosts/fusion/hooks/pre_fusion_setup.py index d043d54322..323b8b0029 100644 --- a/openpype/hosts/fusion/hooks/pre_fusion_setup.py +++ b/openpype/hosts/fusion/hooks/pre_fusion_setup.py @@ -36,7 +36,7 @@ class FusionPrelaunch(PreLaunchHook): "Make sure the environment in fusion settings has " "'FUSION_PYTHON3_HOME' set correctly and make sure " "Python 3 is installed in the given path." - f"\n\nPYTHON36: {fusion_python3_home}" + f"\n\nPYTHON PATH: {fusion_python3_home}" ) self.log.info(f"Setting {py3_var}: '{py3_dir}'...") diff --git a/openpype/hosts/fusion/plugins/publish/collect_instances.py b/openpype/hosts/fusion/plugins/publish/collect_instances.py index fe60b83827..7b0a1b6369 100644 --- a/openpype/hosts/fusion/plugins/publish/collect_instances.py +++ b/openpype/hosts/fusion/plugins/publish/collect_instances.py @@ -80,6 +80,7 @@ class CollectInstances(pyblish.api.ContextPlugin): "outputDir": os.path.dirname(path), "ext": ext, # todo: should be redundant "label": label, + "task": context.data["task"], "frameStart": context.data["frameStart"], "frameEnd": context.data["frameEnd"], "frameStartHandle": context.data["frameStartHandle"], diff --git a/openpype/hosts/fusion/plugins/publish/render_local.py b/openpype/hosts/fusion/plugins/publish/render_local.py index 79e458b40a..53d8eb64e1 100644 --- a/openpype/hosts/fusion/plugins/publish/render_local.py +++ b/openpype/hosts/fusion/plugins/publish/render_local.py @@ -1,6 +1,4 @@ import os -from pprint import pformat - import pyblish.api from openpype.hosts.fusion.api import comp_lock_and_undo_chunk @@ -23,23 +21,53 @@ class Fusionlocal(pyblish.api.InstancePlugin): # This plug-in runs only once and thus assumes all instances # currently will render the same frame range context = instance.context - key = "__hasRun{}".format(self.__class__.__name__) + key = f"__hasRun{self.__class__.__name__}" if context.data.get(key, False): return - else: - context.data[key] = True - current_comp = context.data["currentComp"] + context.data[key] = True + + self.render_once(context) + frame_start = context.data["frameStartHandle"] frame_end = context.data["frameEndHandle"] path = instance.data["path"] output_dir = instance.data["outputDir"] - ext = os.path.splitext(os.path.basename(path))[-1] + basename = os.path.basename(path) + head, ext = os.path.splitext(basename) + files = [ + f"{head}{str(frame).zfill(4)}{ext}" + for frame in range(frame_start, frame_end + 1) + ] + repre = { + 'name': ext[1:], + 'ext': ext[1:], + 'frameStart': f"%0{len(str(frame_end))}d" % frame_start, + 'files': files, + "stagingDir": output_dir, + } + + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(repre) + + # review representation + repre_preview = repre.copy() + repre_preview["name"] = repre_preview["ext"] = "mp4" + repre_preview["tags"] = ["review", "ftrackreview", "delete"] + instance.data["representations"].append(repre_preview) + + def render_once(self, context): + """Render context comp only once, even with more render instances""" + + current_comp = context.data["currentComp"] + frame_start = context.data["frameStartHandle"] + frame_end = context.data["frameEndHandle"] self.log.info("Starting render") - self.log.info("Start frame: {}".format(frame_start)) - self.log.info("End frame: {}".format(frame_end)) + self.log.info(f"Start frame: {frame_start}") + self.log.info(f"End frame: {frame_end}") with comp_lock_and_undo_chunk(current_comp): result = current_comp.Render({ @@ -48,26 +76,5 @@ class Fusionlocal(pyblish.api.InstancePlugin): "Wait": True }) - if "representations" not in instance.data: - instance.data["representations"] = [] - - collected_frames = os.listdir(output_dir) - repre = { - 'name': ext[1:], - 'ext': ext[1:], - 'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start, - 'files': collected_frames, - "stagingDir": output_dir, - } - instance.data["representations"].append(repre) - - # review representation - repre_preview = repre.copy() - repre_preview["name"] = repre_preview["ext"] = "mp4" - repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"] - instance.data["representations"].append(repre_preview) - - self.log.debug(f"_ instance.data: {pformat(instance.data)}") - if not result: raise RuntimeError("Comp render failed") diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 4b9849c190..686770b64e 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -126,10 +126,6 @@ def check_inventory(): def application_launch(event): """Event that is executed after Harmony is launched.""" - # FIXME: This is breaking server <-> client communication. - # It is now moved so it it manually called. - # ensure_scene_settings() - # check_inventory() # fills OPENPYPE_HARMONY_JS pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js" pype_harmony_js = pype_harmony_path.read_text() @@ -146,6 +142,9 @@ def application_launch(event): harmony.send({"script": script}) inject_avalon_js() + ensure_scene_settings() + check_inventory() + def export_template(backdrops, nodes, filepath): """Export Template to file. diff --git a/openpype/hosts/max/api/lib.py b/openpype/hosts/max/api/lib.py index 9256ca9ac1..4fb750d91b 100644 --- a/openpype/hosts/max/api/lib.py +++ b/openpype/hosts/max/api/lib.py @@ -120,3 +120,51 @@ def get_all_children(parent, node_type=None): return ([x for x in child_list if rt.superClassOf(x) == node_type] if node_type else child_list) + + +def get_current_renderer(): + """get current renderer""" + return rt.renderers.production + + +def get_default_render_folder(project_setting=None): + return (project_setting["max"] + ["RenderSettings"] + ["default_render_image_folder"]) + + +def set_framerange(start_frame, end_frame): + """ + Note: + Frame range can be specified in different types. Possible values are: + * `1` - Single frame. + * `2` - Active time segment ( animationRange ). + * `3` - User specified Range. + * `4` - User specified Frame pickup string (for example `1,3,5-12`). + + Todo: + Current type is hard-coded, there should be a custom setting for this. + """ + rt.rendTimeType = 4 + if start_frame is not None and end_frame is not None: + frame_range = "{0}-{1}".format(start_frame, end_frame) + rt.rendPickupFrames = frame_range + + +def get_multipass_setting(project_setting=None): + return (project_setting["max"] + ["RenderSettings"] + ["multipass"]) + + +def get_max_version(): + """ + Args: + get max version date for deadline + + Returns: + #(25000, 62, 0, 25, 0, 0, 997, 2023, "") + max_info[7] = max version date + """ + max_info = rt.maxversion() + return max_info[7] diff --git a/openpype/hosts/max/api/lib_renderproducts.py b/openpype/hosts/max/api/lib_renderproducts.py new file mode 100644 index 0000000000..a74a6a7426 --- /dev/null +++ b/openpype/hosts/max/api/lib_renderproducts.py @@ -0,0 +1,114 @@ +# Render Element Example : For scanline render, VRay +# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC +# arnold +# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html +import os +from pymxs import runtime as rt +from openpype.hosts.max.api.lib import ( + get_current_renderer, + get_default_render_folder +) +from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io + + +class RenderProducts(object): + + def __init__(self, project_settings=None): + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + def render_product(self, container): + folder = rt.maxFilePath + file = rt.maxFileName + folder = folder.replace("\\", "/") + setting = self._project_settings + render_folder = get_default_render_folder(setting) + filename, ext = os.path.splitext(file) + + output_file = os.path.join(folder, + render_folder, + filename, + container) + + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + full_render_list = [] + beauty = self.beauty_render_product(output_file, img_fmt) + full_render_list.append(beauty) + + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + + if renderer == "VUE_File_Renderer": + return full_render_list + + if renderer in [ + "ART_Renderer", + "Redshift_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + render_elem_list = self.render_elements_product(output_file, + img_fmt) + if render_elem_list: + full_render_list.extend(iter(render_elem_list)) + return full_render_list + + if renderer == "Arnold": + aov_list = self.arnold_render_product(output_file, + img_fmt) + if aov_list: + full_render_list.extend(iter(aov_list)) + return full_render_list + + def beauty_render_product(self, folder, fmt): + beauty_output = f"{folder}.####.{fmt}" + beauty_output = beauty_output.replace("\\", "/") + return beauty_output + + # TODO: Get the arnold render product + def arnold_render_product(self, folder, fmt): + """Get all the Arnold AOVs""" + aovs = [] + + amw = rt.MaxtoAOps.AOVsManagerWindow() + aov_mgr = rt.renderers.current.AOVManager + # Check if there is any aov group set in AOV manager + aov_group_num = len(aov_mgr.drivers) + if aov_group_num < 1: + return + for i in range(aov_group_num): + # get the specific AOV group + for aov in aov_mgr.drivers[i].aov_list: + render_element = f"{folder}_{aov.name}.####.{fmt}" + render_element = render_element.replace("\\", "/") + aovs.append(render_element) + # close the AOVs manager window + amw.close() + + return aovs + + def render_elements_product(self, folder, fmt): + """Get all the render element output files. """ + render_dirname = [] + + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + # get render elements from the renders + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + target, renderpass = str(renderlayer_name).split(":") + if renderlayer_name.enabled: + render_element = f"{folder}_{renderpass}.####.{fmt}" + render_element = render_element.replace("\\", "/") + render_dirname.append(render_element) + + return render_dirname + + def image_format(self): + return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa diff --git a/openpype/hosts/max/api/lib_rendersettings.py b/openpype/hosts/max/api/lib_rendersettings.py new file mode 100644 index 0000000000..4940265a23 --- /dev/null +++ b/openpype/hosts/max/api/lib_rendersettings.py @@ -0,0 +1,168 @@ +import os +from pymxs import runtime as rt +from openpype.lib import Logger +from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io +from openpype.pipeline.context_tools import get_current_project_asset + +from openpype.hosts.max.api.lib import ( + set_framerange, + get_current_renderer, + get_default_render_folder +) + + +class RenderSettings(object): + + log = Logger.get_logger("RenderSettings") + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + def __init__(self, project_settings=None): + """ + Set up the naming convention for the render + elements for the deadline submission + """ + + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + def set_render_camera(self, selection): + for sel in selection: + # to avoid Attribute Error from pymxs wrapper + found = False + if rt.classOf(sel) in rt.Camera.classes: + found = True + rt.viewport.setCamera(sel) + break + if not found: + raise RuntimeError("Camera not found") + + def render_output(self, container): + folder = rt.maxFilePath + # hard-coded, should be customized in the setting + file = rt.maxFileName + folder = folder.replace("\\", "/") + # hard-coded, set the renderoutput path + setting = self._project_settings + render_folder = get_default_render_folder(setting) + filename, ext = os.path.splitext(file) + output_dir = os.path.join(folder, + render_folder, + filename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + # hard-coded, should be customized in the setting + context = get_current_project_asset() + + # get project resolution + width = context["data"].get("resolutionWidth") + height = context["data"].get("resolutionHeight") + # Set Frame Range + frame_start = context["data"].get("frame_start") + frame_end = context["data"].get("frame_end") + set_framerange(frame_start, frame_end) + # get the production render + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + output = os.path.join(output_dir, container) + try: + aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["RenderSettings"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "." + output_filename = "{0}..{1}".format(output, img_fmt) + output_filename = output_filename.replace("{aov_separator}", + aov_separator) + rt.rendOutputFilename = output_filename + if renderer == "VUE_File_Renderer": + return + # TODO: Finish the arnold render setup + if renderer == "Arnold": + self.arnold_setup() + + if renderer in [ + "ART_Renderer", + "Redshift_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + self.render_element_layer(output, width, height, img_fmt) + + rt.rendSaveFile = True + + def arnold_setup(self): + # get Arnold RenderView run in the background + # for setting up renderable camera + arv = rt.MAXToAOps.ArnoldRenderView() + render_camera = rt.viewport.GetCamera() + arv.setOption("Camera", str(render_camera)) + + # TODO: add AOVs and extension + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + setup_cmd = ( + f""" + amw = MaxtoAOps.AOVsManagerWindow() + amw.close() + aovmgr = renderers.current.AOVManager + aovmgr.drivers = #() + img_fmt = "{img_fmt}" + if img_fmt == "png" then driver = ArnoldPNGDriver() + if img_fmt == "jpg" then driver = ArnoldJPEGDriver() + if img_fmt == "exr" then driver = ArnoldEXRDriver() + if img_fmt == "tif" then driver = ArnoldTIFFDriver() + if img_fmt == "tiff" then driver = ArnoldTIFFDriver() + append aovmgr.drivers driver + aovmgr.drivers[1].aov_list = #() + """) + + rt.execute(setup_cmd) + arv.close() + + def render_element_layer(self, dir, width, height, ext): + """For Renderers with render elements""" + rt.renderWidth = width + rt.renderHeight = height + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + target, renderpass = str(renderlayer_name).split(":") + aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext) + render_elem.SetRenderElementFileName(i, aov_name) + + def get_render_output(self, container, output_dir): + output = os.path.join(output_dir, container) + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + output_filename = "{0}..{1}".format(output, img_fmt) + return output_filename + + def get_render_element(self): + orig_render_elem = [] + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + + for i in range(render_elem_num): + render_element = render_elem.GetRenderElementFilename(i) + orig_render_elem.append(render_element) + + return orig_render_elem diff --git a/openpype/hosts/max/plugins/create/create_render.py b/openpype/hosts/max/plugins/create/create_render.py new file mode 100644 index 0000000000..269fff2e32 --- /dev/null +++ b/openpype/hosts/max/plugins/create/create_render.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating camera.""" +from openpype.hosts.max.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.hosts.max.api.lib_rendersettings import RenderSettings + + +class CreateRender(plugin.MaxCreator): + identifier = "io.openpype.creators.max.render" + label = "Render" + family = "maxrender" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + from pymxs import runtime as rt + sel_obj = list(rt.selection) + instance = super(CreateRender, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + container_name = instance.data.get("instance_node") + container = rt.getNodeByName(container_name) + # TODO: Disable "Add to Containers?" Panel + # parent the selected cameras into the container + for obj in sel_obj: + obj.parent = container + # for additional work on the node: + # instance_node = rt.getNodeByName(instance.get("instance_node")) + + # set viewport camera for rendering(mandatory for deadline) + RenderSettings().set_render_camera(sel_obj) + # set output paths for rendering(mandatory for deadline) + RenderSettings().render_output(container_name) diff --git a/openpype/hosts/max/plugins/publish/collect_render.py b/openpype/hosts/max/plugins/publish/collect_render.py new file mode 100644 index 0000000000..7c9e311c2f --- /dev/null +++ b/openpype/hosts/max/plugins/publish/collect_render.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +"""Collect Render""" +import os +import pyblish.api + +from pymxs import runtime as rt +from openpype.pipeline import get_current_asset_name +from openpype.hosts.max.api.lib import get_max_version +from openpype.hosts.max.api.lib_renderproducts import RenderProducts +from openpype.client import get_last_version_by_subset_name + + +class CollectRender(pyblish.api.InstancePlugin): + """Collect Render for Deadline""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect 3dsmax Render Layers" + hosts = ['max'] + families = ["maxrender"] + + def process(self, instance): + context = instance.context + folder = rt.maxFilePath + file = rt.maxFileName + current_file = os.path.join(folder, file) + filepath = current_file.replace("\\", "/") + + context.data['currentFile'] = current_file + asset = get_current_asset_name() + + render_layer_files = RenderProducts().render_product(instance.name) + folder = folder.replace("\\", "/") + + img_format = RenderProducts().image_format() + project_name = context.data["projectName"] + asset_doc = context.data["assetEntity"] + asset_id = asset_doc["_id"] + version_doc = get_last_version_by_subset_name(project_name, + instance.name, + asset_id) + + self.log.debug("version_doc: {0}".format(version_doc)) + version_int = 1 + if version_doc: + version_int += int(version_doc["name"]) + + self.log.debug(f"Setting {version_int} to context.") + context.data["version"] = version_int + + # setup the plugin as 3dsmax for the internal renderer + data = { + "subset": instance.name, + "asset": asset, + "publish": True, + "maxversion": str(get_max_version()), + "imageFormat": img_format, + "family": 'maxrender', + "families": ['maxrender'], + "source": filepath, + "expectedFiles": render_layer_files, + "plugin": "3dsmax", + "frameStart": context.data['frameStart'], + "frameEnd": context.data['frameEnd'], + "version": version_int + } + self.log.info("data: {0}".format(data)) + instance.data.update(data) diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py index 4a36406632..018340d86c 100644 --- a/openpype/hosts/maya/api/commands.py +++ b/openpype/hosts/maya/api/commands.py @@ -57,68 +57,6 @@ def edit_shader_definitions(): window.show() -def reset_frame_range(): - """Set frame range to current asset""" - # Set FPS first - fps = {15: 'game', - 24: 'film', - 25: 'pal', - 30: 'ntsc', - 48: 'show', - 50: 'palf', - 60: 'ntscf', - 23.98: '23.976fps', - 23.976: '23.976fps', - 29.97: '29.97fps', - 47.952: '47.952fps', - 47.95: '47.952fps', - 59.94: '59.94fps', - 44100: '44100fps', - 48000: '48000fps' - }.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal") - - cmds.currentUnit(time=fps) - - # Set frame start/end - project_name = legacy_io.active_project() - asset_name = legacy_io.Session["AVALON_ASSET"] - asset = get_asset_by_name(project_name, asset_name) - - frame_start = asset["data"].get("frameStart") - frame_end = asset["data"].get("frameEnd") - # Backwards compatibility - if frame_start is None or frame_end is None: - frame_start = asset["data"].get("edit_in") - frame_end = asset["data"].get("edit_out") - - if frame_start is None or frame_end is None: - cmds.warning("No edit information found for %s" % asset_name) - return - - handles = asset["data"].get("handles") or 0 - handle_start = asset["data"].get("handleStart") - if handle_start is None: - handle_start = handles - - handle_end = asset["data"].get("handleEnd") - if handle_end is None: - handle_end = handles - - frame_start -= int(handle_start) - frame_end += int(handle_end) - - cmds.playbackOptions(minTime=frame_start) - cmds.playbackOptions(maxTime=frame_end) - cmds.playbackOptions(animationStartTime=frame_start) - cmds.playbackOptions(animationEndTime=frame_end) - cmds.playbackOptions(minTime=frame_start) - cmds.playbackOptions(maxTime=frame_end) - cmds.currentTime(frame_start) - - cmds.setAttr("defaultRenderGlobals.startFrame", frame_start) - cmds.setAttr("defaultRenderGlobals.endFrame", frame_end) - - def _resolution_from_document(doc): if not doc or "data" not in doc: print("Entered document is not valid. \"{}\"".format(str(doc))) diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index 851028d0e5..509168278c 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -34,7 +34,6 @@ from openpype.pipeline import ( registered_host, ) from openpype.pipeline.context_tools import get_current_project_asset -from .commands import reset_frame_range self = sys.modules[__name__] @@ -1970,8 +1969,6 @@ def get_id_from_sibling(node, history_only=True): return first_id - -# Project settings def set_scene_fps(fps, update=True): """Set FPS from project configuration @@ -1984,30 +1981,23 @@ def set_scene_fps(fps, update=True): """ - fps_mapping = {'15': 'game', - '24': 'film', - '25': 'pal', - '30': 'ntsc', - '48': 'show', - '50': 'palf', - '60': 'ntscf', - '23.98': '23.976fps', - '23.976': '23.976fps', - '29.97': '29.97fps', - '47.952': '47.952fps', - '47.95': '47.952fps', - '59.94': '59.94fps', - '44100': '44100fps', - '48000': '48000fps'} + fps_mapping = { + '15': 'game', + '24': 'film', + '25': 'pal', + '30': 'ntsc', + '48': 'show', + '50': 'palf', + '60': 'ntscf', + '23.976023976023978': '23.976fps', + '29.97002997002997': '29.97fps', + '47.952047952047955': '47.952fps', + '59.94005994005994': '59.94fps', + '44100': '44100fps', + '48000': '48000fps' + } - # pull from mapping - # this should convert float string to float and int to int - # so 25.0 is converted to 25, but 23.98 will be still float. - dec, ipart = math.modf(fps) - if dec == 0.0: - fps = int(ipart) - - unit = fps_mapping.get(str(fps), None) + unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None) if unit is None: raise ValueError("Unsupported FPS value: `%s`" % fps) @@ -2074,6 +2064,54 @@ def set_scene_resolution(width, height, pixelAspect): cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect) +def reset_frame_range(): + """Set frame range to current asset""" + + fps = convert_to_maya_fps( + float(legacy_io.Session.get("AVALON_FPS", 25)) + ) + set_scene_fps(fps) + + # Set frame start/end + project_name = legacy_io.active_project() + asset_name = legacy_io.Session["AVALON_ASSET"] + asset = get_asset_by_name(project_name, asset_name) + + frame_start = asset["data"].get("frameStart") + frame_end = asset["data"].get("frameEnd") + # Backwards compatibility + if frame_start is None or frame_end is None: + frame_start = asset["data"].get("edit_in") + frame_end = asset["data"].get("edit_out") + + if frame_start is None or frame_end is None: + cmds.warning("No edit information found for %s" % asset_name) + return + + handles = asset["data"].get("handles") or 0 + handle_start = asset["data"].get("handleStart") + if handle_start is None: + handle_start = handles + + handle_end = asset["data"].get("handleEnd") + if handle_end is None: + handle_end = handles + + frame_start -= int(handle_start) + frame_end += int(handle_end) + + cmds.playbackOptions(minTime=frame_start) + cmds.playbackOptions(maxTime=frame_end) + cmds.playbackOptions(animationStartTime=frame_start) + cmds.playbackOptions(animationEndTime=frame_end) + cmds.playbackOptions(minTime=frame_start) + cmds.playbackOptions(maxTime=frame_end) + cmds.currentTime(frame_start) + + cmds.setAttr("defaultRenderGlobals.startFrame", frame_start) + cmds.setAttr("defaultRenderGlobals.endFrame", frame_end) + + def reset_scene_resolution(): """Apply the scene resolution from the project definition @@ -2125,7 +2163,9 @@ def set_context_settings(): asset_data = asset_doc.get("data", {}) # Set project fps - fps = asset_data.get("fps", project_data.get("fps", 25)) + fps = convert_to_maya_fps( + asset_data.get("fps", project_data.get("fps", 25)) + ) legacy_io.Session["AVALON_FPS"] = str(fps) set_scene_fps(fps) @@ -2147,15 +2187,12 @@ def validate_fps(): """ - fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"] - # TODO(antirotor): This is hack as for framerates having multiple - # decimal places. FTrack is ceiling decimal values on - # fps to two decimal places but Maya 2019+ is reporting those fps - # with much higher resolution. As we currently cannot fix Ftrack - # rounding, we have to round those numbers coming from Maya. - current_fps = float_round(mel.eval('currentTimeUnitToFPS()'), 2) + expected_fps = convert_to_maya_fps( + get_current_project_asset(fields=["data.fps"])["data"]["fps"] + ) + current_fps = mel.eval('currentTimeUnitToFPS()') - fps_match = current_fps == fps + fps_match = current_fps == expected_fps if not fps_match and not IS_HEADLESS: from openpype.widgets import popup @@ -2164,14 +2201,19 @@ def validate_fps(): dialog = popup.PopupUpdateKeys(parent=parent) dialog.setModal(True) dialog.setWindowTitle("Maya scene does not match project FPS") - dialog.setMessage("Scene %i FPS does not match project %i FPS" % - (current_fps, fps)) + dialog.setMessage( + "Scene {} FPS does not match project {} FPS".format( + current_fps, expected_fps + ) + ) dialog.setButtonText("Fix") # Set new text for button (add optional argument for the popup?) toggle = dialog.widgets["toggle"] update = toggle.isChecked() - dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update)) + dialog.on_clicked_state.connect( + lambda: set_scene_fps(expected_fps, update) + ) dialog.show() @@ -3356,6 +3398,88 @@ def get_attribute_input(attr): return connections[0] if connections else None +def convert_to_maya_fps(fps): + """Convert any fps to supported Maya framerates.""" + float_framerates = [ + 23.976023976023978, + # WTF is 29.97 df vs fps? + 29.97002997002997, + 47.952047952047955, + 59.94005994005994 + ] + # 44100 fps evaluates as 41000.0. Why? Omitting for now. + int_framerates = [ + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 15, + 16, + 20, + 24, + 25, + 30, + 40, + 48, + 50, + 60, + 75, + 80, + 90, + 100, + 120, + 125, + 150, + 200, + 240, + 250, + 300, + 375, + 400, + 500, + 600, + 750, + 1200, + 1500, + 2000, + 3000, + 6000, + 48000 + ] + + # If input fps is a whole number we'll return. + if float(fps).is_integer(): + # Validate fps is part of Maya's fps selection. + if int(fps) not in int_framerates: + raise ValueError( + "Framerate \"{}\" is not supported in Maya".format(fps) + ) + return int(fps) + else: + # Differences to supported float frame rates. + differences = [] + for i in float_framerates: + differences.append(abs(i - fps)) + + # Validate difference does not stray too far from supported framerates. + min_difference = min(differences) + min_index = differences.index(min_difference) + supported_framerate = float_framerates[min_index] + if min_difference > 0.1: + raise ValueError( + "Framerate \"{}\" strays too far from any supported framerate" + " in Maya. Closest supported framerate is \"{}\"".format( + fps, supported_framerate + ) + ) + + return supported_framerate + + def write_xgen_file(data, filepath): """Overwrites data in .xgen files. diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index 0eecedd231..60090e9f6d 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -797,6 +797,11 @@ class RenderProductsVray(ARenderProducts): if default_ext in {"exr (multichannel)", "exr (deep)"}: default_ext = "exr" + # Define multipart. + multipart = False + if image_format_str == "exr (multichannel)": + multipart = True + products = [] # add beauty as default when not disabled @@ -804,23 +809,28 @@ class RenderProductsVray(ARenderProducts): if not dont_save_rgb: for camera in cameras: products.append( - RenderProduct(productName="", - ext=default_ext, - camera=camera)) + RenderProduct( + productName="", + ext=default_ext, + camera=camera, + multipart=multipart + ) + ) # separate alpha file separate_alpha = self._get_attr("vraySettings.separateAlpha") if separate_alpha: for camera in cameras: products.append( - RenderProduct(productName="Alpha", - ext=default_ext, - camera=camera) + RenderProduct( + productName="Alpha", + ext=default_ext, + camera=camera, + multipart=multipart + ) ) - - if image_format_str == "exr (multichannel)": + if multipart: # AOVs are merged in m-channel file, only main layer is rendered - self.multipart = True return products # handle aovs from references diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py index 6190a49401..2a730100de 100644 --- a/openpype/hosts/maya/api/lib_rendersettings.py +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -14,7 +14,7 @@ from openpype.settings import ( from openpype.pipeline import legacy_io from openpype.pipeline import CreatorError from openpype.pipeline.context_tools import get_current_project_asset -from openpype.hosts.maya.api.commands import reset_frame_range +from openpype.hosts.maya.api.lib import reset_frame_range class RenderSettings(object): diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 791475173f..0f48a133a6 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -12,7 +12,6 @@ from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools from openpype.hosts.maya.api import lib, lib_rendersettings from .lib import get_main_window, IS_HEADLESS -from .commands import reset_frame_range from .workfile_template_builder import ( create_placeholder, @@ -113,7 +112,7 @@ def install(): cmds.menuItem( "Reset Frame Range", - command=lambda *args: reset_frame_range() + command=lambda *args: lib.reset_frame_range() ) cmds.menuItem( diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py index 3416c98793..2f550e787a 100644 --- a/openpype/hosts/maya/api/workfile_template_builder.py +++ b/openpype/hosts/maya/api/workfile_template_builder.py @@ -2,7 +2,7 @@ import json from maya import cmds -from openpype.pipeline import registered_host +from openpype.pipeline import registered_host, get_current_asset_name from openpype.pipeline.workfile.workfile_template_builder import ( TemplateAlreadyImported, AbstractTemplateBuilder, @@ -41,10 +41,27 @@ class MayaTemplateBuilder(AbstractTemplateBuilder): )) cmds.sets(name=PLACEHOLDER_SET, empty=True) - cmds.file(path, i=True, returnNewNodes=True) + new_nodes = cmds.file(path, i=True, returnNewNodes=True) cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True) + imported_sets = cmds.ls(new_nodes, set=True) + if not imported_sets: + return True + + # update imported sets information + asset_name = get_current_asset_name() + for node in imported_sets: + if not cmds.attributeQuery("id", node=node, exists=True): + continue + if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance": + continue + if not cmds.attributeQuery("asset", node=node, exists=True): + continue + + cmds.setAttr( + "{}.asset".format(node), asset_name, type="string") + return True diff --git a/openpype/hosts/maya/plugins/create/create_ass.py b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py similarity index 84% rename from openpype/hosts/maya/plugins/create/create_ass.py rename to openpype/hosts/maya/plugins/create/create_arnold_scene_source.py index 935a068ca5..2afb897e94 100644 --- a/openpype/hosts/maya/plugins/create/create_ass.py +++ b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -6,7 +6,7 @@ from openpype.hosts.maya.api import ( from maya import cmds -class CreateAss(plugin.Creator): +class CreateArnoldSceneSource(plugin.Creator): """Arnold Scene Source""" name = "ass" @@ -29,7 +29,7 @@ class CreateAss(plugin.Creator): maskOperator = False def __init__(self, *args, **kwargs): - super(CreateAss, self).__init__(*args, **kwargs) + super(CreateArnoldSceneSource, self).__init__(*args, **kwargs) # Add animation data self.data.update(lib.collect_animation_data()) @@ -52,7 +52,7 @@ class CreateAss(plugin.Creator): self.data["maskOperator"] = self.maskOperator def process(self): - instance = super(CreateAss, self).process() + instance = super(CreateArnoldSceneSource, self).process() nodes = [] @@ -61,6 +61,6 @@ class CreateAss(plugin.Creator): cmds.sets(nodes, rm=instance) - assContent = cmds.sets(name="content_SET") - assProxy = cmds.sets(name="proxy_SET", empty=True) + assContent = cmds.sets(name=instance + "_content_SET") + assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True) cmds.sets([assContent, assProxy], forceElement=instance) diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index cdec140ea8..63c0490dc7 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -1,3 +1,5 @@ +from maya import cmds + from openpype.hosts.maya.api import ( lib, plugin @@ -37,3 +39,9 @@ class CreatePointCache(plugin.Creator): # Default to not send to farm. self.data["farm"] = False self.data["priority"] = 50 + + def process(self): + instance = super(CreatePointCache, self).process() + + assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True) + cmds.sets(assProxy, forceElement=instance) diff --git a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py b/openpype/hosts/maya/plugins/load/load_abc_to_standin.py deleted file mode 100644 index 70866a3ba6..0000000000 --- a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py +++ /dev/null @@ -1,132 +0,0 @@ -import os - -from openpype.pipeline import ( - legacy_io, - load, - get_representation_path -) -from openpype.settings import get_project_settings - - -class AlembicStandinLoader(load.LoaderPlugin): - """Load Alembic as Arnold Standin""" - - families = ["animation", "model", "proxyAbc", "pointcache"] - representations = ["abc"] - - label = "Import Alembic as Arnold Standin" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, options): - - import maya.cmds as cmds - import mtoa.ui.arnoldmenu - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace - - version = context["version"] - version_data = version.get("data", {}) - family = version["data"]["families"] - self.log.info("version_data: {}\n".format(version_data)) - self.log.info("family: {}\n".format(family)) - frameStart = version_data.get("frameStart", None) - - asset = context["asset"]["name"] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - # Root group - label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) - - settings = get_project_settings(os.environ['AVALON_PROJECT']) - colors = settings["maya"]["load"]["colors"] - fps = legacy_io.Session["AVALON_FPS"] - c = colors.get(family[0]) - if c is not None: - r = (float(c[0]) / 255) - g = (float(c[1]) / 255) - b = (float(c[2]) / 255) - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - r, g, b) - - transform_name = label + "_ABC" - - standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0] - standin = cmds.listRelatives(standinShape, parent=True, - typ="transform") - standin = cmds.rename(standin, transform_name) - standinShape = cmds.listRelatives(standin, children=True)[0] - - cmds.parent(standin, root) - - # Set the standin filepath - cmds.setAttr(standinShape + ".dso", self.fname, type="string") - cmds.setAttr(standinShape + ".abcFPS", float(fps)) - - if frameStart is None: - cmds.setAttr(standinShape + ".useFrameExtension", 0) - - elif "model" in family: - cmds.setAttr(standinShape + ".useFrameExtension", 0) - - else: - cmds.setAttr(standinShape + ".useFrameExtension", 1) - - nodes = [root, standin] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, representation): - - import pymel.core as pm - - path = get_representation_path(representation) - fps = legacy_io.Session["AVALON_FPS"] - # Update the standin - standins = list() - members = pm.sets(container['objectName'], query=True) - self.log.info("container:{}".format(container)) - for member in members: - shape = member.getShape() - if (shape and shape.type() == "aiStandIn"): - standins.append(shape) - - for standin in standins: - standin.dso.set(path) - standin.abcFPS.set(float(fps)) - if "modelMain" in container['objectName']: - standin.useFrameExtension.set(0) - else: - standin.useFrameExtension.set(1) - - container = pm.PyNode(container["objectName"]) - container.representation.set(str(representation["_id"])) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - import maya.cmds as cmds - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/openpype/hosts/maya/plugins/load/load_arnold_standin.py new file mode 100644 index 0000000000..ab69d62ef5 --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_arnold_standin.py @@ -0,0 +1,218 @@ +import os +import clique + +import maya.cmds as cmds +import mtoa.ui.arnoldmenu + +from openpype.settings import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.maya.api.lib import ( + unique_namespace, get_attribute_input, maintained_selection +) +from openpype.hosts.maya.api.pipeline import containerise + + +def is_sequence(files): + sequence = False + collections, remainder = clique.assemble(files) + if collections: + sequence = True + + return sequence + + +class ArnoldStandinLoader(load.LoaderPlugin): + """Load as Arnold standin""" + + families = ["ass", "animation", "model", "proxyAbc", "pointcache"] + representations = ["ass", "abc"] + + label = "Load as Arnold standin" + order = -5 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, options): + version = context['version'] + version_data = version.get("data", {}) + + self.log.info("version_data: {}\n".format(version_data)) + + asset = context['asset']['name'] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + # Root group + label = "{}:{}".format(namespace, name) + root = cmds.group(name=label, empty=True) + + # Set color. + settings = get_project_settings(context["project"]["name"]) + color = settings['maya']['load']['colors'].get('ass') + if color is not None: + cmds.setAttr(root + ".useOutlinerColor", True) + cmds.setAttr( + root + ".outlinerColor", color[0], color[1], color[2] + ) + + with maintained_selection(): + # Create transform with shape + transform_name = label + "_standin" + + standin_shape = mtoa.ui.arnoldmenu.createStandIn() + standin = cmds.listRelatives(standin_shape, parent=True)[0] + standin = cmds.rename(standin, transform_name) + standin_shape = cmds.listRelatives(standin, shapes=True)[0] + + cmds.parent(standin, root) + + # Set the standin filepath + path, operator = self._setup_proxy( + standin_shape, self.fname, namespace + ) + cmds.setAttr(standin_shape + ".dso", path, type="string") + sequence = is_sequence(os.listdir(os.path.dirname(self.fname))) + cmds.setAttr(standin_shape + ".useFrameExtension", sequence) + + nodes = [root, standin] + if operator is not None: + nodes.append(operator) + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def get_next_free_multi_index(self, attr_name): + """Find the next unconnected multi index at the input attribute.""" + for index in range(10000000): + connection_info = cmds.connectionInfo( + "{}[{}]".format(attr_name, index), + sourceFromDestination=True + ) + if len(connection_info or []) == 0: + return index + + def _get_proxy_path(self, path): + basename_split = os.path.basename(path).split(".") + proxy_basename = ( + basename_split[0] + "_proxy." + ".".join(basename_split[1:]) + ) + proxy_path = "/".join([os.path.dirname(path), proxy_basename]) + return proxy_basename, proxy_path + + def _setup_proxy(self, shape, path, namespace): + proxy_basename, proxy_path = self._get_proxy_path(path) + + options_node = "defaultArnoldRenderOptions" + merge_operator = get_attribute_input(options_node + ".operator") + if merge_operator is None: + merge_operator = cmds.createNode("aiMerge") + cmds.connectAttr( + merge_operator + ".message", options_node + ".operator" + ) + + merge_operator = merge_operator.split(".")[0] + + string_replace_operator = cmds.createNode( + "aiStringReplace", name=namespace + ":string_replace_operator" + ) + node_type = "alembic" if path.endswith(".abc") else "procedural" + cmds.setAttr( + string_replace_operator + ".selection", + "*.(@node=='{}')".format(node_type), + type="string" + ) + cmds.setAttr( + string_replace_operator + ".match", + proxy_basename, + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path), + type="string" + ) + + cmds.connectAttr( + string_replace_operator + ".out", + "{}.inputs[{}]".format( + merge_operator, + self.get_next_free_multi_index(merge_operator + ".inputs") + ) + ) + + # We setup the string operator no matter whether there is a proxy or + # not. This makes it easier to update since the string operator will + # always be created. Return original path to use for standin. + if not os.path.exists(proxy_path): + return path, string_replace_operator + + return proxy_path, string_replace_operator + + def update(self, container, representation): + # Update the standin + members = cmds.sets(container['objectName'], query=True) + for member in members: + if cmds.nodeType(member) == "aiStringReplace": + string_replace_operator = member + + shapes = cmds.listRelatives(member, shapes=True) + if not shapes: + continue + if cmds.nodeType(shapes[0]) == "aiStandIn": + standin = shapes[0] + + path = get_representation_path(representation) + proxy_basename, proxy_path = self._get_proxy_path(path) + + # Whether there is proxy or so, we still update the string operator. + # If no proxy exists, the string operator wont replace anything. + cmds.setAttr( + string_replace_operator + ".match", + "resources/" + proxy_basename, + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path), + type="string" + ) + + dso_path = path + if os.path.exists(proxy_path): + dso_path = proxy_path + cmds.setAttr(standin + ".dso", dso_path, type="string") + + sequence = is_sequence(os.listdir(os.path.dirname(path))) + cmds.setAttr(standin + ".useFrameExtension", sequence) + + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py deleted file mode 100644 index 5db6fc3dfa..0000000000 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ /dev/null @@ -1,290 +0,0 @@ -import os -import clique - -from openpype.settings import get_project_settings -from openpype.pipeline import ( - load, - get_representation_path -) -import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api.plugin import get_reference_node -from openpype.hosts.maya.api.lib import ( - maintained_selection, - unique_namespace -) -from openpype.hosts.maya.api.pipeline import containerise - - -class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Load Arnold Proxy as reference""" - - families = ["ass"] - representations = ["ass"] - - label = "Reference .ASS standin with Proxy" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - - import maya.cmds as cmds - import pymel.core as pm - - version = context['version'] - version_data = version.get("data", {}) - - self.log.info("version_data: {}\n".format(version_data)) - - frameStart = version_data.get("frameStart", None) - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "ass" - - with maintained_selection(): - - groupName = "{}:{}".format(namespace, name) - path = self.fname - proxyPath_base = os.path.splitext(path)[0] - - if frameStart is not None: - proxyPath_base = os.path.splitext(proxyPath_base)[0] - - publish_folder = os.path.split(path)[0] - files_in_folder = os.listdir(publish_folder) - collections, remainder = clique.assemble(files_in_folder) - - if collections: - hashes = collections[0].padding * '#' - coll = collections[0].format('{head}[index]{tail}') - filename = coll.replace('[index]', hashes) - - path = os.path.join(publish_folder, filename) - - proxyPath = proxyPath_base + ".ma" - - project_name = context["project"]["name"] - file_url = self.prepare_root_value(proxyPath, - project_name) - - nodes = cmds.file(file_url, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName=groupName) - - cmds.makeIdentity(groupName, apply=False, rotate=True, - translate=True, scale=True) - - # Set attributes - proxyShape = pm.ls(nodes, type="mesh")[0] - - proxyShape.aiTranslator.set('procedural') - proxyShape.dso.set(path) - proxyShape.aiOverrideShaders.set(0) - - settings = get_project_settings(project_name) - colors = settings['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) - ) - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - from maya import cmds - import pymel.core as pm - - node = container["objectName"] - - representation["context"].pop("frame", None) - path = get_representation_path(representation) - print(path) - # path = self.fname - print(self.fname) - proxyPath = os.path.splitext(path)[0] + ".ma" - print(proxyPath) - - # Get reference node from container members - members = cmds.sets(node, query=True, nodesOnly=True) - reference_node = get_reference_node(members) - - assert os.path.exists(proxyPath), "%s does not exist." % proxyPath - - try: - file_url = self.prepare_root_value(proxyPath, - representation["context"] - ["project"] - ["name"]) - content = cmds.file(file_url, - loadReference=reference_node, - type="mayaAscii", - returnNewNodes=True) - - # Set attributes - proxyShape = pm.ls(content, type="mesh")[0] - - proxyShape.aiTranslator.set('procedural') - proxyShape.dso.set(path) - proxyShape.aiOverrideShaders.set(0) - - except RuntimeError as exc: - # When changing a reference to a file that has load errors the - # command will raise an error even if the file is still loaded - # correctly (e.g. when raising errors on Arnold attributes) - # When the file is loaded and has content, we consider it's fine. - if not cmds.referenceQuery(reference_node, isLoaded=True): - raise - - content = cmds.referenceQuery(reference_node, - nodes=True, - dagPath=True) - if not content: - raise - - self.log.warning("Ignoring file read error:\n%s", exc) - - # Add new nodes of the reference to the container - cmds.sets(content, forceElement=node) - - # Remove any placeHolderList attribute entries from the set that - # are remaining from nodes being removed from the referenced file. - members = cmds.sets(node, query=True) - invalid = [x for x in members if ".placeHolderList" in x] - if invalid: - cmds.sets(invalid, remove=node) - - # Update metadata - cmds.setAttr("{}.representation".format(node), - str(representation["_id"]), - type="string") - - -class AssStandinLoader(load.LoaderPlugin): - """Load .ASS file as standin""" - - families = ["ass"] - representations = ["ass"] - - label = "Load .ASS file as standin" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, options): - - import maya.cmds as cmds - import mtoa.ui.arnoldmenu - import pymel.core as pm - - version = context['version'] - version_data = version.get("data", {}) - - self.log.info("version_data: {}\n".format(version_data)) - - frameStart = version_data.get("frameStart", None) - - asset = context['asset']['name'] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - # cmds.loadPlugin("gpuCache", quiet=True) - - # Root group - label = "{}:{}".format(namespace, name) - root = pm.group(name=label, empty=True) - - settings = get_project_settings(os.environ['AVALON_PROJECT']) - colors = settings['maya']['load']['colors'] - - c = colors.get('ass') - if c is not None: - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - c[0], c[1], c[2]) - - # Create transform with shape - transform_name = label + "_ASS" - # transform = pm.createNode("transform", name=transform_name, - # parent=root) - - standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn()) - standin = standinShape.getParent() - standin.rename(transform_name) - - pm.parent(standin, root) - - # Set the standin filepath - standinShape.dso.set(self.fname) - if frameStart is not None: - standinShape.useFrameExtension.set(1) - - nodes = [root, standin] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, representation): - - import pymel.core as pm - - path = get_representation_path(representation) - - files_in_path = os.listdir(os.path.split(path)[0]) - sequence = 0 - collections, remainder = clique.assemble(files_in_path) - if collections: - sequence = 1 - - # Update the standin - standins = list() - members = pm.sets(container['objectName'], query=True) - for member in members: - shape = member.getShape() - if (shape and shape.type() == "aiStandIn"): - standins.append(shape) - - for standin in standins: - standin.dso.set(path) - standin.useFrameExtension.set(sequence) - - container = pm.PyNode(container["objectName"]) - container.representation.set(str(representation["_id"])) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - import maya.cmds as cmds - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/openpype/hosts/maya/plugins/publish/collect_ass.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py similarity index 60% rename from openpype/hosts/maya/plugins/publish/collect_ass.py rename to openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py index b5e05d6665..0415808b7a 100644 --- a/openpype/hosts/maya/plugins/publish/collect_ass.py +++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -1,19 +1,18 @@ from maya import cmds -from openpype.pipeline.publish import KnownPublishError import pyblish.api -class CollectAssData(pyblish.api.InstancePlugin): - """Collect Ass data.""" +class CollectArnoldSceneSource(pyblish.api.InstancePlugin): + """Collect Arnold Scene Source data.""" # Offset to be after renderable camera collection. order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Ass' + label = "Collect Arnold Scene Source" families = ["ass"] def process(self, instance): - objsets = instance.data['setMembers'] + objsets = instance.data["setMembers"] for objset in objsets: objset = str(objset) @@ -21,15 +20,12 @@ class CollectAssData(pyblish.api.InstancePlugin): if members is None: self.log.warning("Skipped empty instance: \"%s\" " % objset) continue - if "content_SET" in objset: - instance.data['setMembers'] = members - self.log.debug('content members: {}'.format(members)) - elif objset.startswith("proxy_SET"): - if len(members) != 1: - msg = "You have multiple proxy meshes, please only use one" - raise KnownPublishError(msg) - instance.data['proxy'] = members - self.log.debug('proxy members: {}'.format(members)) + if objset.endswith("content_SET"): + instance.data["setMembers"] = cmds.ls(members, long=True) + self.log.debug("content members: {}".format(members)) + elif objset.endswith("proxy_SET"): + instance.data["proxy"] = cmds.ls(members, long=True) + self.log.debug("proxy members: {}".format(members)) # Use camera in object set if present else default to render globals # camera. diff --git a/openpype/hosts/maya/plugins/publish/collect_pointcache.py b/openpype/hosts/maya/plugins/publish/collect_pointcache.py index a841341f72..332992ca92 100644 --- a/openpype/hosts/maya/plugins/publish/collect_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/collect_pointcache.py @@ -1,3 +1,5 @@ +from maya import cmds + import pyblish.api @@ -12,3 +14,31 @@ class CollectPointcache(pyblish.api.InstancePlugin): def process(self, instance): if instance.data.get("farm"): instance.data["families"].append("publish.farm") + + proxy_set = None + for node in instance.data["setMembers"]: + if cmds.nodeType(node) != "objectSet": + continue + members = cmds.sets(node, query=True) + if members is None: + self.log.warning("Skipped empty objectset: \"%s\" " % node) + continue + if node.endswith("proxy_SET"): + proxy_set = node + instance.data["proxy"] = [] + instance.data["proxyRoots"] = [] + for member in members: + instance.data["proxy"].extend(cmds.ls(member, long=True)) + instance.data["proxyRoots"].extend( + cmds.ls(member, long=True) + ) + instance.data["proxy"].extend( + cmds.listRelatives(member, shapes=True, fullPath=True) + ) + self.log.debug( + "proxy members: {}".format(instance.data["proxy"]) + ) + + if proxy_set: + instance.remove(proxy_set) + instance.data["setMembers"].remove(proxy_set) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index fc297ef612..f2b5262187 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -42,7 +42,6 @@ Provides: import re import os import platform -import json from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -320,7 +319,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "renderSetupIncludeLights" ), "strict_error_checking": render_instance.data.get( - "strict_error_checking") + "strict_error_checking", True + ) } # Collect Deadline url if Deadline module is enabled diff --git a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py new file mode 100644 index 0000000000..924ac58c40 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -0,0 +1,160 @@ +import os + +from maya import cmds +import arnold + +from openpype.pipeline import publish +from openpype.hosts.maya.api.lib import ( + maintained_selection, attribute_values, delete_after +) + + +class ExtractArnoldSceneSource(publish.Extractor): + """Extract the content of the instance to an Arnold Scene Source file.""" + + label = "Extract Arnold Scene Source" + hosts = ["maya"] + families = ["ass"] + asciiAss = False + + def process(self, instance): + staging_dir = self.staging_dir(instance) + filename = "{}.ass".format(instance.name) + file_path = os.path.join(staging_dir, filename) + + # Mask + mask = arnold.AI_NODE_ALL + + node_types = { + "options": arnold.AI_NODE_OPTIONS, + "camera": arnold.AI_NODE_CAMERA, + "light": arnold.AI_NODE_LIGHT, + "shape": arnold.AI_NODE_SHAPE, + "shader": arnold.AI_NODE_SHADER, + "override": arnold.AI_NODE_OVERRIDE, + "driver": arnold.AI_NODE_DRIVER, + "filter": arnold.AI_NODE_FILTER, + "color_manager": arnold.AI_NODE_COLOR_MANAGER, + "operator": arnold.AI_NODE_OPERATOR + } + + for key in node_types.keys(): + if instance.data.get("mask" + key.title()): + mask = mask ^ node_types[key] + + # Motion blur + attribute_data = { + "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get( + "motionBlur", True + ), + "defaultArnoldRenderOptions.motion_steps": instance.data.get( + "motionBlurKeys", 2 + ), + "defaultArnoldRenderOptions.motion_frames": instance.data.get( + "motionBlurLength", 0.5 + ) + } + + # Write out .ass file + kwargs = { + "filename": file_path, + "startFrame": instance.data.get("frameStartHandle", 1), + "endFrame": instance.data.get("frameEndHandle", 1), + "frameStep": instance.data.get("step", 1), + "selected": True, + "asciiAss": self.asciiAss, + "shadowLinks": True, + "lightLinks": True, + "boundingBox": True, + "expandProcedurals": instance.data.get("expandProcedurals", False), + "camera": instance.data["camera"], + "mask": mask + } + + filenames = self._extract( + instance.data["setMembers"], attribute_data, kwargs + ) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "ass", + "ext": "ass", + "files": filenames if len(filenames) > 1 else filenames[0], + "stagingDir": staging_dir, + "frameStart": kwargs["startFrame"] + } + + instance.data["representations"].append(representation) + + self.log.info( + "Extracted instance {} to: {}".format(instance.name, staging_dir) + ) + + # Extract proxy. + if not instance.data.get("proxy", []): + return + + kwargs["filename"] = file_path.replace(".ass", "_proxy.ass") + filenames = self._extract( + instance.data["proxy"], attribute_data, kwargs + ) + + representation = { + "name": "proxy", + "ext": "ass", + "files": filenames if len(filenames) > 1 else filenames[0], + "stagingDir": staging_dir, + "frameStart": kwargs["startFrame"], + "outputName": "proxy" + } + + instance.data["representations"].append(representation) + + def _extract(self, nodes, attribute_data, kwargs): + self.log.info("Writing: " + kwargs["filename"]) + filenames = [] + # Duplicating nodes so they are direct children of the world. This + # makes the hierarchy of any exported ass file the same. + with delete_after() as delete_bin: + duplicate_nodes = [] + for node in nodes: + duplicate_transform = cmds.duplicate(node)[0] + + # Discard the children. + shapes = cmds.listRelatives(duplicate_transform, shapes=True) + children = cmds.listRelatives( + duplicate_transform, children=True + ) + cmds.delete(set(children) - set(shapes)) + + duplicate_transform = cmds.parent( + duplicate_transform, world=True + )[0] + + cmds.rename(duplicate_transform, node.split("|")[-1]) + duplicate_transform = "|" + node.split("|")[-1] + + duplicate_nodes.append(duplicate_transform) + delete_bin.append(duplicate_transform) + + with attribute_values(attribute_data): + with maintained_selection(): + self.log.info( + "Writing: {}".format(duplicate_nodes) + ) + cmds.select(duplicate_nodes, noExpand=True) + + self.log.info( + "Extracting ass sequence with: {}".format(kwargs) + ) + + exported_files = cmds.arnoldExportAss(**kwargs) + + for file in exported_files: + filenames.append(os.path.split(file)[1]) + + self.log.info("Exported: {}".format(filenames)) + + return filenames diff --git a/openpype/hosts/maya/plugins/publish/extract_ass.py b/openpype/hosts/maya/plugins/publish/extract_ass.py deleted file mode 100644 index 049f256a7a..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_ass.py +++ /dev/null @@ -1,106 +0,0 @@ -import os - -from maya import cmds -import arnold - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection, attribute_values - - -class ExtractAssStandin(publish.Extractor): - """Extract the content of the instance to a ass file""" - - label = "Arnold Scene Source (.ass)" - hosts = ["maya"] - families = ["ass"] - asciiAss = False - - def process(self, instance): - staging_dir = self.staging_dir(instance) - filename = "{}.ass".format(instance.name) - filenames = [] - file_path = os.path.join(staging_dir, filename) - - # Mask - mask = arnold.AI_NODE_ALL - - node_types = { - "options": arnold.AI_NODE_OPTIONS, - "camera": arnold.AI_NODE_CAMERA, - "light": arnold.AI_NODE_LIGHT, - "shape": arnold.AI_NODE_SHAPE, - "shader": arnold.AI_NODE_SHADER, - "override": arnold.AI_NODE_OVERRIDE, - "driver": arnold.AI_NODE_DRIVER, - "filter": arnold.AI_NODE_FILTER, - "color_manager": arnold.AI_NODE_COLOR_MANAGER, - "operator": arnold.AI_NODE_OPERATOR - } - - for key in node_types.keys(): - if instance.data.get("mask" + key.title()): - mask = mask ^ node_types[key] - - # Motion blur - values = { - "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get( - "motionBlur", True - ), - "defaultArnoldRenderOptions.motion_steps": instance.data.get( - "motionBlurKeys", 2 - ), - "defaultArnoldRenderOptions.motion_frames": instance.data.get( - "motionBlurLength", 0.5 - ) - } - - # Write out .ass file - kwargs = { - "filename": file_path, - "startFrame": instance.data.get("frameStartHandle", 1), - "endFrame": instance.data.get("frameEndHandle", 1), - "frameStep": instance.data.get("step", 1), - "selected": True, - "asciiAss": self.asciiAss, - "shadowLinks": True, - "lightLinks": True, - "boundingBox": True, - "expandProcedurals": instance.data.get("expandProcedurals", False), - "camera": instance.data["camera"], - "mask": mask - } - - self.log.info("Writing: '%s'" % file_path) - with attribute_values(values): - with maintained_selection(): - self.log.info( - "Writing: {}".format(instance.data["setMembers"]) - ) - cmds.select(instance.data["setMembers"], noExpand=True) - - self.log.info( - "Extracting ass sequence with: {}".format(kwargs) - ) - - exported_files = cmds.arnoldExportAss(**kwargs) - - for file in exported_files: - filenames.append(os.path.split(file)[1]) - - self.log.info("Exported: {}".format(filenames)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'ass', - 'ext': 'ass', - 'files': filenames if len(filenames) > 1 else filenames[0], - "stagingDir": staging_dir, - 'frameStart': kwargs["startFrame"] - } - - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" - % (instance.name, staging_dir)) diff --git a/openpype/hosts/maya/plugins/publish/extract_assproxy.py b/openpype/hosts/maya/plugins/publish/extract_assproxy.py deleted file mode 100644 index 4937a28a9e..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_assproxy.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -import contextlib - -from maya import cmds - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection - - -class ExtractAssProxy(publish.Extractor): - """Extract proxy model as Maya Ascii to use as arnold standin - - - """ - - order = publish.Extractor.order + 0.2 - label = "Ass Proxy (Maya ASCII)" - hosts = ["maya"] - families = ["ass"] - - def process(self, instance): - - @contextlib.contextmanager - def unparent(root): - """Temporarily unparent `root`""" - parent = cmds.listRelatives(root, parent=True) - if parent: - cmds.parent(root, world=True) - yield - self.log.info("{} - {}".format(root, parent)) - cmds.parent(root, parent) - else: - yield - - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}.ma".format(instance.name) - path = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.info("Performing extraction..") - - # Get only the shape contents we need in such a way that we avoid - # taking along intermediateObjects - proxy = instance.data.get('proxy', None) - - if not proxy: - self.log.info("no proxy mesh") - return - - members = cmds.ls(proxy, - dag=True, - transforms=True, - noIntermediate=True) - self.log.info(members) - - with maintained_selection(): - with unparent(members[0]): - cmds.select(members, noExpand=True) - cmds.file(path, - force=True, - typ="mayaAscii", - exportSelected=True, - preserveReferences=False, - channels=False, - constraints=False, - expressions=False, - constructionHistory=False) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'ma', - 'ext': 'ma', - 'files': filename, - "stagingDir": stagingdir - } - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 1f9f9db99a..1966ad7b66 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -1,4 +1,5 @@ import os +import json import clique import capture @@ -44,10 +45,6 @@ class ExtractPlayblast(publish.Extractor): # get cameras camera = instance.data['review_camera'] - override_viewport_options = ( - self.capture_preset['Viewport Options'] - ['override_viewport_options'] - ) preset = lib.load_capture_preset(data=self.capture_preset) # Grab capture presets from the project settings capture_presets = self.capture_preset @@ -119,6 +116,27 @@ class ExtractPlayblast(publish.Extractor): pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"])) cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False) + # Need to explicitly enable some viewport changes so the viewport is + # refreshed ahead of playblasting. + panel = cmds.getPanel(withFocus=True) + keys = [ + "useDefaultMaterial", + "wireframeOnShaded", + "xray", + "jointXray", + "backfaceCulling" + ] + viewport_defaults = {} + for key in keys: + viewport_defaults[key] = cmds.modelEditor( + panel, query=True, **{key: True} + ) + if preset["viewport_options"][key]: + cmds.modelEditor(panel, edit=True, **{key: True}) + + override_viewport_options = ( + capture_presets['Viewport Options']['override_viewport_options'] + ) with lib.maintained_time(): filename = preset.get("filename", "%TEMP%") @@ -127,18 +145,26 @@ class ExtractPlayblast(publish.Extractor): # playblast and viewer preset['viewer'] = False - self.log.info('using viewport preset: {}'.format(preset)) - # Update preset with current panel setting # if override_viewport_options is turned off - if not override_viewport_options: - panel = cmds.getPanel(withFocus=True) + panel = cmds.getPanel(withFocus=True) or "" + if not override_viewport_options and "modelPanel" in panel: panel_preset = capture.parse_active_view() + panel_preset.pop("camera") preset.update(panel_preset) cmds.setFocus(panel) + self.log.info( + "Using preset:\n{}".format( + json.dumps(preset, sort_keys=True, indent=4) + ) + ) + path = capture.capture(log=self.log, **preset) + # Restoring viewport options. + cmds.modelEditor(panel, edit=True, **viewport_defaults) + cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom) self.log.debug("playblast path {}".format(path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py index 7ed73fd5b0..0eb65e4226 100644 --- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py @@ -1,4 +1,5 @@ import os +import copy from maya import cmds @@ -9,6 +10,7 @@ from openpype.hosts.maya.api.lib import ( maintained_selection, iter_visible_nodes_in_range ) +from openpype.lib import StringTemplate class ExtractAlembic(publish.Extractor): @@ -23,9 +25,7 @@ class ExtractAlembic(publish.Extractor): label = "Extract Pointcache (Alembic)" hosts = ["maya"] - families = ["pointcache", - "model", - "vrayproxy"] + families = ["pointcache", "model", "vrayproxy"] targets = ["local", "remote"] def process(self, instance): @@ -87,6 +87,7 @@ class ExtractAlembic(publish.Extractor): end=end)) suspend = not instance.data.get("refresh", False) + self.log.info(nodes) with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(nodes, noExpand=True) @@ -101,9 +102,9 @@ class ExtractAlembic(publish.Extractor): instance.data["representations"] = [] representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, + "name": "abc", + "ext": "abc", + "files": filename, "stagingDir": dirname } instance.data["representations"].append(representation) @@ -112,6 +113,48 @@ class ExtractAlembic(publish.Extractor): self.log.info("Extracted {} to {}".format(instance, dirname)) + # Extract proxy. + if not instance.data.get("proxy"): + return + + path = path.replace(".abc", "_proxy.abc") + if not instance.data.get("includeParentHierarchy", True): + # Set the root nodes if we don't want to include parents + # The roots are to be considered the ones that are the actual + # direct members of the set + options["root"] = instance.data["proxyRoots"] + + with suspended_refresh(suspend=suspend): + with maintained_selection(): + cmds.select(instance.data["proxy"]) + extract_alembic( + file=path, + startFrame=start, + endFrame=end, + **options + ) + + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({"ext": "abc"}) + templates = instance.context.data["anatomy"].templates["publish"] + published_filename_without_extension = StringTemplate( + templates["file"] + ).format(template_data).replace(".abc", "_proxy") + transfers = [] + destination = os.path.join( + instance.data["resourcesDir"], + filename.replace( + filename.split(".")[0], + published_filename_without_extension + ) + ) + transfers.append((path, destination)) + + for source, destination in transfers: + self.log.debug("Transfer: {} > {}".format(source, destination)) + + instance.data["transfers"] = transfers + def get_members_and_roots(self, instance): return instance[:], instance.data.get("setMembers") diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 1edafeb926..1d94bd58c5 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -134,8 +134,8 @@ class ExtractThumbnail(publish.Extractor): # Update preset with current panel setting # if override_viewport_options is turned off - if not override_viewport_options: - panel = cmds.getPanel(withFocus=True) + panel = cmds.getPanel(withFocus=True) or "" + if not override_viewport_options and "modelPanel" in panel: panel_preset = capture.parse_active_view() preset.update(panel_preset) cmds.setFocus(panel) diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py new file mode 100644 index 0000000000..3b0ffd52d7 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -0,0 +1,106 @@ +import maya.cmds as cmds + +import pyblish.api +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishValidationError +) + + +class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source. + + We require at least 1 root node/parent for the meshes. This is to ensure we + can duplicate the nodes and preserve the names. + + If using proxies we need the nodes to share the same names and not be + parent to the world. This ends up needing at least two groups with content + nodes and proxy nodes in another. + """ + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass"] + label = "Validate Arnold Scene Source" + + def _get_nodes_data(self, nodes): + ungrouped_nodes = [] + nodes_by_name = {} + parents = [] + for node in nodes: + node_split = node.split("|") + if len(node_split) == 2: + ungrouped_nodes.append(node) + + parent = "|".join(node_split[:-1]) + if parent: + parents.append(parent) + + nodes_by_name[node_split[-1]] = node + for shape in cmds.listRelatives(node, shapes=True): + nodes_by_name[shape.split("|")[-1]] = shape + + return ungrouped_nodes, nodes_by_name, parents + + def process(self, instance): + ungrouped_nodes = [] + + nodes, content_nodes_by_name, content_parents = self._get_nodes_data( + instance.data["setMembers"] + ) + ungrouped_nodes.extend(nodes) + + nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_data( + instance.data.get("proxy", []) + ) + ungrouped_nodes.extend(nodes) + + # Validate against nodes directly parented to world. + if ungrouped_nodes: + raise PublishValidationError( + "Found nodes parented to the world: {}\n" + "All nodes need to be grouped.".format(ungrouped_nodes) + ) + + # Proxy validation. + if not instance.data.get("proxy", []): + return + + # Validate for content and proxy nodes amount being the same. + if len(instance.data["setMembers"]) != len(instance.data["proxy"]): + raise PublishValidationError( + "Amount of content nodes ({}) and proxy nodes ({}) needs to " + "be the same.".format( + len(instance.data["setMembers"]), + len(instance.data["proxy"]) + ) + ) + + # Validate against content and proxy nodes sharing same parent. + if list(set(content_parents) & set(proxy_parents)): + raise PublishValidationError( + "Content and proxy nodes cannot share the same parent." + ) + + # Validate for content and proxy nodes sharing same names. + sorted_content_names = sorted(content_nodes_by_name.keys()) + sorted_proxy_names = sorted(proxy_nodes_by_name.keys()) + odd_content_names = list( + set(sorted_content_names) - set(sorted_proxy_names) + ) + odd_content_nodes = [ + content_nodes_by_name[x] for x in odd_content_names + ] + odd_proxy_names = list( + set(sorted_proxy_names) - set(sorted_content_names) + ) + odd_proxy_nodes = [ + proxy_nodes_by_name[x] for x in odd_proxy_names + ] + if not sorted_content_names == sorted_proxy_names: + raise PublishValidationError( + "Content and proxy nodes need to share the same names.\n" + "Content nodes not matching: {}\n" + "Proxy nodes not matching: {}".format( + odd_content_nodes, odd_proxy_nodes + ) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index e6fabb1712..357dde692c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -4,7 +4,6 @@ import pyblish.api import openpype.hosts.maya.api.lib as mayalib from openpype.pipeline.context_tools import get_current_project_asset -from math import ceil from openpype.pipeline.publish import ( RepairContextAction, ValidateSceneOrder, @@ -33,18 +32,11 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): linearunits = context.data.get('linearUnits') angularunits = context.data.get('angularUnits') - # TODO(antirotor): This is hack as for framerates having multiple - # decimal places. FTrack is ceiling decimal values on - # fps to two decimal places but Maya 2019+ is reporting those fps - # with much higher resolution. As we currently cannot fix Ftrack - # rounding, we have to round those numbers coming from Maya. - # NOTE: this must be revisited yet again as it seems that Ftrack is - # now flooring the value? - fps = mayalib.float_round(context.data.get('fps'), 2, ceil) + fps = context.data.get('fps') # TODO repace query with using 'context.data["assetEntity"]' asset_doc = get_current_project_asset() - asset_fps = asset_doc["data"]["fps"] + asset_fps = mayalib.convert_to_maya_fps(asset_doc["data"]["fps"]) self.log.info('Units (linear): {0}'.format(linearunits)) self.log.info('Units (angular): {0}'.format(angularunits)) diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index cdea82cb05..3d82d6b6f0 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -193,7 +193,7 @@ class ImageCreator(Creator): instance_data.pop("uuid") if not instance_data.get("task"): - instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + instance_data["task"] = self.create_context.get_current_task_name() if not instance_data.get("variant"): instance_data["variant"] = '' diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py index 8ee9a0d832..f5d56adcbc 100644 --- a/openpype/hosts/photoshop/plugins/create/workfile_creator.py +++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py @@ -2,8 +2,7 @@ import openpype.hosts.photoshop.api as api from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, - CreatedInstance, - legacy_io + CreatedInstance ) from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances @@ -38,10 +37,11 @@ class PSWorkfileCreator(AutoCreator): existing_instance = instance break - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - host_name = legacy_io.Session["AVALON_APP"] + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name if existing_instance is None: asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index 6ac3e6324c..e94e64e04a 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -309,8 +309,6 @@ class QtTVPaintRpc(BaseTVPaintRpc): self.add_methods( (route_name, self.workfiles_tool), (route_name, self.loader_tool), - (route_name, self.creator_tool), - (route_name, self.subset_manager_tool), (route_name, self.publish_tool), (route_name, self.scene_inventory_tool), (route_name, self.library_loader_tool), @@ -330,21 +328,9 @@ class QtTVPaintRpc(BaseTVPaintRpc): self._execute_in_main_thread(item) return - async def creator_tool(self): - log.info("Triggering Creator tool") - item = MainThreadItem(self.tools_helper.show_creator) - await self._async_execute_in_main_thread(item, wait=False) - - async def subset_manager_tool(self): - log.info("Triggering Subset Manager tool") - item = MainThreadItem(self.tools_helper.show_subset_manager) - # Do not wait for result of callback - self._execute_in_main_thread(item, wait=False) - return - async def publish_tool(self): log.info("Triggering Publish tool") - item = MainThreadItem(self.tools_helper.show_publish) + item = MainThreadItem(self.tools_helper.show_publisher_tool) self._execute_in_main_thread(item) return @@ -859,10 +845,6 @@ class QtCommunicator(BaseCommunicator): "callback": "loader_tool", "label": "Load", "help": "Open loader tool" - }, { - "callback": "creator_tool", - "label": "Create", - "help": "Open creator tool" }, { "callback": "scene_inventory_tool", "label": "Scene inventory", @@ -875,10 +857,6 @@ class QtCommunicator(BaseCommunicator): "callback": "library_loader_tool", "label": "Library", "help": "Open library loader tool" - }, { - "callback": "subset_manager_tool", - "label": "Subset Manager", - "help": "Open subset manager tool" }, { "callback": "experimental_tools", "label": "Experimental tools", diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index 5e64773b8e..312a211d49 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -202,8 +202,9 @@ def get_groups_data(communicator=None): # Variable containing full path to output file "output_path = \"{}\"".format(output_filepath), "empty = 0", - # Loop over 100 groups - "FOR idx = 1 TO 100", + # Loop over 26 groups which is ATM maximum possible (in 11.7) + # - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880 + "FOR idx = 1 TO 26", # Receive information about groups "tv_layercolor \"getcolor\" 0 idx", "PARSE result clip_id group_index c_red c_green c_blue group_name", diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index 249326791b..575e6aa755 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -8,7 +8,7 @@ import requests import pyblish.api from openpype.client import get_project, get_asset_by_name -from openpype.host import HostBase, IWorkfileHost, ILoadHost +from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR from openpype.settings import get_current_project_settings from openpype.lib import register_event_callback @@ -18,6 +18,7 @@ from openpype.pipeline import ( register_creator_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.context_tools import get_global_context from .lib import ( execute_george, @@ -29,6 +30,7 @@ log = logging.getLogger(__name__) METADATA_SECTION = "avalon" SECTION_NAME_CONTEXT = "context" +SECTION_NAME_CREATE_CONTEXT = "create_context" SECTION_NAME_INSTANCES = "instances" SECTION_NAME_CONTAINERS = "containers" # Maximum length of metadata chunk string @@ -58,7 +60,7 @@ instances=2 """ -class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): +class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): name = "tvpaint" def install(self): @@ -85,14 +87,63 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): registered_callbacks = ( pyblish.api.registered_callbacks().get("instanceToggled") or [] ) - if self.on_instance_toggle not in registered_callbacks: - pyblish.api.register_callback( - "instanceToggled", self.on_instance_toggle - ) register_event_callback("application.launched", self.initial_launch) register_event_callback("application.exit", self.application_exit) + def get_current_project_name(self): + """ + Returns: + Union[str, None]: Current project name. + """ + + return self.get_current_context().get("project_name") + + def get_current_asset_name(self): + """ + Returns: + Union[str, None]: Current asset name. + """ + + return self.get_current_context().get("asset_name") + + def get_current_task_name(self): + """ + Returns: + Union[str, None]: Current task name. + """ + + return self.get_current_context().get("task_name") + + def get_current_context(self): + context = get_current_workfile_context() + if not context: + return get_global_context() + + if "project_name" in context: + return context + # This is legacy way how context was stored + return { + "project_name": context.get("project"), + "asset_name": context.get("asset"), + "task_name": context.get("task") + } + + # --- Create --- + def get_context_data(self): + return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {}) + + def update_context_data(self, data, changes): + return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data) + + def list_instances(self): + """List all created instances from current workfile.""" + return list_instances() + + def write_instances(self, data): + return write_instances(data) + + # --- Workfile --- def open_workfile(self, filepath): george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( filepath.replace("\\", "/") @@ -102,11 +153,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): def save_workfile(self, filepath=None): if not filepath: filepath = self.get_current_workfile() - context = { - "project": legacy_io.Session["AVALON_PROJECT"], - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"] - } + context = get_global_context() save_current_workfile_context(context) # Execute george script to save workfile. @@ -125,6 +172,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): def get_workfile_extensions(self): return [".tvpp"] + # --- Load --- def get_containers(self): return get_containers() @@ -137,27 +185,15 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): return log.info("Setting up project...") - set_context_settings() - - def remove_instance(self, instance): - """Remove instance from current workfile metadata. - - Implementation for Subset manager tool. - """ - - current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES) - instance_id = instance.get("uuid") - found_idx = None - if instance_id: - for idx, _inst in enumerate(current_instances): - if _inst["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: + global_context = get_global_context() + project_name = global_context.get("project_name") + asset_name = global_context.get("aset_name") + if not project_name or not asset_name: return - current_instances.pop(found_idx) - write_instances(current_instances) + + asset_doc = get_asset_by_name(project_name, asset_name) + + set_context_settings(project_name, asset_doc) def application_exit(self): """Logic related to TimerManager. @@ -177,34 +213,6 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) requests.post(rest_api_url) - def on_instance_toggle(self, instance, old_value, new_value): - """Update instance data in workfile on publish toggle.""" - # Review may not have real instance in wokrfile metadata - if not instance.data.get("uuid"): - return - - instance_id = instance.data["uuid"] - found_idx = None - current_instances = list_instances() - for idx, workfile_instance in enumerate(current_instances): - if workfile_instance["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: - return - - if "active" in current_instances[found_idx]: - current_instances[found_idx]["active"] = new_value - self.write_instances(current_instances) - - def list_instances(self): - """List all created instances from current workfile.""" - return list_instances() - - def write_instances(self, data): - return write_instances(data) - def containerise( name, namespace, members, context, loader, current_containers=None @@ -462,40 +470,17 @@ def get_containers(): return output -def set_context_settings(asset_doc=None): +def set_context_settings(project_name, asset_doc): """Set workfile settings by asset document data. Change fps, resolution and frame start/end. """ - project_name = legacy_io.active_project() - if asset_doc is None: - asset_name = legacy_io.Session["AVALON_ASSET"] - # Use current session asset if not passed - asset_doc = get_asset_by_name(project_name, asset_name) - - project_doc = get_project(project_name) - - framerate = asset_doc["data"].get("fps") - if framerate is None: - framerate = project_doc["data"].get("fps") - - if framerate is not None: - execute_george( - "tv_framerate {} \"timestretch\"".format(framerate) - ) - else: - print("Framerate was not found!") - width_key = "resolutionWidth" height_key = "resolutionHeight" width = asset_doc["data"].get(width_key) height = asset_doc["data"].get(height_key) - if width is None or height is None: - width = project_doc["data"].get(width_key) - height = project_doc["data"].get(height_key) - if width is None or height is None: print("Resolution was not found!") else: @@ -503,6 +488,15 @@ def set_context_settings(asset_doc=None): "tv_resizepage {} {} 0".format(width, height) ) + framerate = asset_doc["data"].get("fps") + + if framerate is not None: + execute_george( + "tv_framerate {} \"timestretch\"".format(framerate) + ) + else: + print("Framerate was not found!") + frame_start = asset_doc["data"].get("frameStart") frame_end = asset_doc["data"].get("frameEnd") diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py index da456e7067..96b99199f2 100644 --- a/openpype/hosts/tvpaint/api/plugin.py +++ b/openpype/hosts/tvpaint/api/plugin.py @@ -1,80 +1,142 @@ import re -import uuid -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, - registered_host, +from openpype.pipeline import LoaderPlugin +from openpype.pipeline.create import ( + CreatedInstance, + get_subset_name, + AutoCreator, + Creator, ) +from openpype.pipeline.create.creator_plugins import cache_and_get_instances from .lib import get_layers_data -from .pipeline import get_current_workfile_context -class Creator(LegacyCreator): - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - # Add unified identifier created with `uuid` module - self.data["uuid"] = str(uuid.uuid4()) +SHARED_DATA_KEY = "openpype.tvpaint.instances" - @classmethod - def get_dynamic_data(cls, *args, **kwargs): - dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs) - # Change asset and name by current workfile context - workfile_context = get_current_workfile_context() - asset_name = workfile_context.get("asset") - task_name = workfile_context.get("task") - if "asset" not in dynamic_data and asset_name: - dynamic_data["asset"] = asset_name +class TVPaintCreatorCommon: + @property + def subset_template_family_filter(self): + return self.family - if "task" not in dynamic_data and task_name: - dynamic_data["task"] = task_name - return dynamic_data - - @staticmethod - def are_instances_same(instance_1, instance_2): - """Compare instances but skip keys with unique values. - - During compare are skipped keys that will be 100% sure - different on new instance, like "id". - - Returns: - bool: True if instances are same. - """ - if ( - not isinstance(instance_1, dict) - or not isinstance(instance_2, dict) - ): - return instance_1 == instance_2 - - checked_keys = set() - checked_keys.add("id") - for key, value in instance_1.items(): - if key not in checked_keys: - if key not in instance_2: - return False - if value != instance_2[key]: - return False - checked_keys.add(key) - - for key in instance_2.keys(): - if key not in checked_keys: - return False - return True - - def write_instances(self, data): - self.log.debug( - "Storing instance data to workfile. {}".format(str(data)) + def _cache_and_get_instances(self): + return cache_and_get_instances( + self, SHARED_DATA_KEY, self.host.list_instances ) - host = registered_host() - return host.write_instances(data) - def process(self): - host = registered_host() - data = host.list_instances() - data.append(self.data) - self.write_instances(data) + def _collect_create_instances(self): + instances_by_identifier = self._cache_and_get_instances() + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def _update_create_instances(self, update_list): + if not update_list: + return + + cur_instances = self.host.list_instances() + cur_instances_by_id = {} + for instance_data in cur_instances: + instance_id = instance_data.get("instance_id") + if instance_id: + cur_instances_by_id[instance_id] = instance_data + + for instance, changes in update_list: + instance_data = changes.new_value + cur_instance_data = cur_instances_by_id.get(instance.id) + if cur_instance_data is None: + cur_instances.append(instance_data) + continue + for key in set(cur_instance_data) - set(instance_data): + cur_instance_data.pop(key) + cur_instance_data.update(instance_data) + self.host.write_instances(cur_instances) + + def _custom_get_subset_name( + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None + ): + dynamic_data = self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + + return get_subset_name( + self.family, + variant, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data, + project_settings=self.project_settings, + family_filter=self.subset_template_family_filter + ) + + +class TVPaintCreator(Creator, TVPaintCreatorCommon): + def collect_instances(self): + self._collect_create_instances() + + def update_instances(self, update_list): + self._update_create_instances(update_list) + + def remove_instances(self, instances): + ids_to_remove = { + instance.id + for instance in instances + } + cur_instances = self.host.list_instances() + changed = False + new_instances = [] + for instance_data in cur_instances: + if instance_data.get("instance_id") in ids_to_remove: + changed = True + else: + new_instances.append(instance_data) + + if changed: + self.host.write_instances(new_instances) + + for instance in instances: + self._remove_instance_from_context(instance) + + def get_dynamic_data(self, *args, **kwargs): + # Change asset and name by current workfile context + create_context = self.create_context + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + output = {} + if asset_name: + output["asset"] = asset_name + if task_name: + output["task"] = task_name + return output + + def get_subset_name(self, *args, **kwargs): + return self._custom_get_subset_name(*args, **kwargs) + + def _store_new_instance(self, new_instance): + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + +class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon): + def collect_instances(self): + self._collect_create_instances() + + def update_instances(self, update_list): + self._update_create_instances(update_list) + + def get_subset_name(self, *args, **kwargs): + return self._custom_get_subset_name(*args, **kwargs) class Loader(LoaderPlugin): diff --git a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..538c6e4c5e --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py @@ -0,0 +1,150 @@ +import collections + +from openpype.pipeline.create.creator_plugins import ( + SubsetConvertorPlugin, + cache_and_get_instances, +) +from openpype.hosts.tvpaint.api.plugin import SHARED_DATA_KEY +from openpype.hosts.tvpaint.api.lib import get_groups_data + + +class TVPaintLegacyConverted(SubsetConvertorPlugin): + """Conversion of legacy instances in scene to new creators. + + This convertor handles only instances created by core creators. + + All instances that would be created using auto-creators are removed as at + the moment of finding them would there already be existing instances. + """ + + identifier = "tvpaint.legacy.converter" + + def find_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, self.host.list_instances + ) + if instances_by_identifier[None]: + self.add_convertor_item("Convert legacy instances") + + def convert(self): + current_instances = self.host.list_instances() + to_convert = collections.defaultdict(list) + converted = False + for instance in current_instances: + if instance.get("creator_identifier") is not None: + continue + converted = True + + family = instance.get("family") + if family in ( + "renderLayer", + "renderPass", + "renderScene", + "review", + "workfile", + ): + to_convert[family].append(instance) + else: + instance["keep"] = False + + # Skip if nothing was changed + if not converted: + self.remove_convertor_item() + return + + self._convert_render_layers( + to_convert["renderLayer"], current_instances) + self._convert_render_passes( + to_convert["renderpass"], current_instances) + self._convert_render_scenes( + to_convert["renderScene"], current_instances) + self._convert_workfiles( + to_convert["workfile"], current_instances) + self._convert_reviews( + to_convert["review"], current_instances) + + new_instances = [ + instance + for instance in current_instances + if instance.get("keep") is not False + ] + self.host.write_instances(new_instances) + # remove legacy item if all is fine + self.remove_convertor_item() + + def _convert_render_layers(self, render_layers, current_instances): + if not render_layers: + return + + # Look for possible existing render layers in scene + render_layers_by_group_id = {} + for instance in current_instances: + if instance.get("creator_identifier") == "render.layer": + group_id = instance["creator_identifier"]["group_id"] + render_layers_by_group_id[group_id] = instance + + groups_by_id = { + group["group_id"]: group + for group in get_groups_data() + } + for render_layer in render_layers: + group_id = render_layer.pop("group_id") + # Just remove legacy instance if group is already occupied + if group_id in render_layers_by_group_id: + render_layer["keep"] = False + continue + # Add identifier + render_layer["creator_identifier"] = "render.layer" + # Change 'uuid' to 'instance_id' + render_layer["instance_id"] = render_layer.pop("uuid") + # Fill creator attributes + render_layer["creator_attributes"] = { + "group_id": group_id + } + render_layer["family"] = "render" + group = groups_by_id[group_id] + # Use group name for variant + group["variant"] = group["name"] + + def _convert_render_passes(self, render_passes, current_instances): + if not render_passes: + return + + # Render passes must have available render layers so we look for render + # layers first + # - '_convert_render_layers' must be called before this method + render_layers_by_group_id = {} + for instance in current_instances: + if instance.get("creator_identifier") == "render.layer": + group_id = instance["creator_identifier"]["group_id"] + render_layers_by_group_id[group_id] = instance + + for render_pass in render_passes: + group_id = render_pass.pop("group_id") + render_layer = render_layers_by_group_id.get(group_id) + if not render_layer: + render_pass["keep"] = False + continue + + render_pass["creator_identifier"] = "render.pass" + render_pass["instance_id"] = render_pass.pop("uuid") + render_pass["family"] = "render" + + render_pass["creator_attributes"] = { + "render_layer_instance_id": render_layer["instance_id"] + } + render_pass["variant"] = render_pass.pop("pass") + render_pass.pop("renderlayer") + + # Rest of instances are just marked for deletion + def _convert_render_scenes(self, render_scenes, current_instances): + for render_scene in render_scenes: + render_scene["keep"] = False + + def _convert_workfiles(self, workfiles, current_instances): + for render_scene in workfiles: + render_scene["keep"] = False + + def _convert_reviews(self, reviews, current_instances): + for render_scene in reviews: + render_scene["keep"] = False diff --git a/openpype/hosts/tvpaint/plugins/create/create_render.py b/openpype/hosts/tvpaint/plugins/create/create_render.py new file mode 100644 index 0000000000..6a857676a5 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/create_render.py @@ -0,0 +1,739 @@ +"""Render Layer and Passes creators. + +Render layer is main part which is represented by group in TVPaint. All TVPaint +layers marked with that group color are part of the render layer. To be more +specific about some parts of layer it is possible to create sub-sets of layer +which are named passes. Render pass consist of layers in same color group as +render layer but define more specific part. + +For example render layer could be 'Bob' which consist of 5 TVPaint layers. +- Bob has 'head' which consist of 2 TVPaint layers -> Render pass 'head' +- Bob has 'body' which consist of 1 TVPaint layer -> Render pass 'body' +- Bob has 'arm' which consist of 1 TVPaint layer -> Render pass 'arm' +- Last layer does not belong to render pass at all + +Bob will be rendered as 'beauty' of bob (all visible layers in group). +His head will be rendered too but without any other parts. The same for body +and arm. + +What is this good for? Compositing has more power how the renders are used. +Can do transforms on each render pass without need to modify a re-render them +using TVPaint. + +The workflow may hit issues when there are used other blending modes than +default 'color' blend more. In that case it is not recommended to use this +workflow at all as other blend modes may affect all layers in clip which can't +be done. + +There is special case for simple publishing of scene which is called +'render.scene'. That will use all visible layers and render them as one big +sequence. + +Todos: + Add option to extract marked layers and passes as json output format for + AfterEffects. +""" + +import collections + +from openpype.client import get_asset_by_name +from openpype.lib import ( + prepare_template_data, + EnumDef, + TextDef, + BoolDef, +) +from openpype.pipeline.create import ( + CreatedInstance, + CreatorError, +) +from openpype.hosts.tvpaint.api.plugin import ( + TVPaintCreator, + TVPaintAutoCreator, +) +from openpype.hosts.tvpaint.api.lib import ( + get_layers_data, + get_groups_data, + execute_george_through_file, +) + +RENDER_LAYER_DETAILED_DESCRIPTIONS = ( + """Render Layer is "a group of TVPaint layers" + +Be aware Render Layer is not TVPaint layer. + +All TVPaint layers in the scene with the color group id are rendered in the +beauty pass. To create sub passes use Render Layer creator which is +dependent on existence of render layer instance. + +The group can represent an asset (tree) or different part of scene that consist +of one or more TVPaint layers that can be used as single item during +compositing (for example). + +In some cases may be needed to have sub parts of the layer. For example 'Bob' +could be Render Layer which has 'Arm', 'Head' and 'Body' as Render Passes. +""" +) + + +RENDER_PASS_DETAILED_DESCRIPTIONS = ( + """Render Pass is sub part of Render Layer. + +Render Pass can consist of one or more TVPaint layers. Render Layers must +belong to a Render Layer. Marker TVPaint layers will change it's group color +to match group color of Render Layer. +""" +) + + +class CreateRenderlayer(TVPaintCreator): + """Mark layer group as Render layer instance. + + All TVPaint layers in the scene with the color group id are rendered in the + beauty pass. To create sub passes use Render Layer creator which is + dependent on existence of render layer instance. + """ + + label = "Render Layer" + family = "render" + subset_template_family_filter = "renderLayer" + identifier = "render.layer" + icon = "fa5.images" + + # George script to change color group + rename_script_template = ( + "tv_layercolor \"setcolor\"" + " {clip_id} {group_id} {r} {g} {b} \"{name}\"" + ) + # Order to be executed before Render Pass creator + order = 90 + description = "Mark TVPaint color group as one Render Layer." + detailed_description = RENDER_LAYER_DETAILED_DESCRIPTIONS + + # Settings + # - Default render pass name for beauty + default_pass_name = "beauty" + # - Mark by default instance for review + mark_for_review = True + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_layer"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.default_pass_name = plugin_settings["default_pass_name"] + self.mark_for_review = plugin_settings["mark_for_review"] + + def get_dynamic_data( + self, variant, task_name, asset_doc, project_name, host_name, instance + ): + dynamic_data = super().get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + dynamic_data["renderpass"] = self.default_pass_name + dynamic_data["renderlayer"] = variant + return dynamic_data + + def _get_selected_group_ids(self): + return { + layer["group_id"] + for layer in get_layers_data() + if layer["selected"] + } + + def create(self, subset_name, instance_data, pre_create_data): + self.log.debug("Query data from workfile.") + + group_name = instance_data["variant"] + group_id = pre_create_data.get("group_id") + # This creator should run only on one group + if group_id is None or group_id == -1: + selected_groups = self._get_selected_group_ids() + selected_groups.discard(0) + if len(selected_groups) > 1: + raise CreatorError("You have selected more than one group") + + if len(selected_groups) == 0: + raise CreatorError("You don't have selected any group") + group_id = tuple(selected_groups)[0] + + self.log.debug("Querying groups data from workfile.") + groups_data = get_groups_data() + group_item = None + for group_data in groups_data: + if group_data["group_id"] == group_id: + group_item = group_data + + for instance in self.create_context.instances: + if ( + instance.creator_identifier == self.identifier + and instance["creator_attributes"]["group_id"] == group_id + ): + raise CreatorError(( + f"Group \"{group_item.get('name')}\" is already used" + f" by another render layer \"{instance['subset']}\"" + )) + + self.log.debug(f"Selected group id is \"{group_id}\".") + if "creator_attributes" not in instance_data: + instance_data["creator_attributes"] = {} + creator_attributes = instance_data["creator_attributes"] + mark_for_review = pre_create_data.get("mark_for_review") + if mark_for_review is None: + mark_for_review = self.mark_for_review + creator_attributes["group_id"] = group_id + creator_attributes["mark_for_review"] = mark_for_review + + self.log.info(f"Subset name is {subset_name}") + new_instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + self._store_new_instance(new_instance) + + if not group_id or group_item["name"] == group_name: + return new_instance + + self.log.debug("Changing name of the group.") + # Rename TVPaint group (keep color same) + # - groups can't contain spaces + rename_script = self.rename_script_template.format( + clip_id=group_item["clip_id"], + group_id=group_item["group_id"], + r=group_item["red"], + g=group_item["green"], + b=group_item["blue"], + name=group_name + ) + execute_george_through_file(rename_script) + + self.log.info(( + f"Name of group with index {group_id}" + f" was changed to \"{group_name}\"." + )) + return new_instance + + def _get_groups_enum(self): + groups_enum = [] + empty_groups = [] + for group in get_groups_data(): + group_name = group["name"] + item = { + "label": group_name, + "value": group["group_id"] + } + # TVPaint have defined how many color groups is available, but + # the count is not consistent across versions. It is not possible + # to know how many groups there is. + # + if group_name and group_name != "0": + if empty_groups: + groups_enum.extend(empty_groups) + empty_groups = [] + groups_enum.append(item) + else: + empty_groups.append(item) + return groups_enum + + def get_pre_create_attr_defs(self): + groups_enum = self._get_groups_enum() + groups_enum.insert(0, {"label": "", "value": -1}) + + return [ + EnumDef( + "group_id", + label="Group", + items=groups_enum + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def get_instance_attr_defs(self): + groups_enum = self._get_groups_enum() + return [ + EnumDef( + "group_id", + label="Group", + items=groups_enum + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def update_instances(self, update_list): + self._update_color_groups() + self._update_renderpass_groups() + + super().update_instances(update_list) + + def _update_color_groups(self): + render_layer_instances = [] + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + render_layer_instances.append(instance) + + if not render_layer_instances: + return + + groups_by_id = { + group["group_id"]: group + for group in get_groups_data() + } + grg_script_lines = [] + for instance in render_layer_instances: + group_id = instance["creator_attributes"]["group_id"] + variant = instance["variant"] + group = groups_by_id[group_id] + if group["name"] == variant: + continue + + grg_script_lines.append(self.rename_script_template.format( + clip_id=group["clip_id"], + group_id=group["group_id"], + r=group["red"], + g=group["green"], + b=group["blue"], + name=variant + )) + + if grg_script_lines: + execute_george_through_file("\n".join(grg_script_lines)) + + def _update_renderpass_groups(self): + render_layer_instances = {} + render_pass_instances = collections.defaultdict(list) + + for instance in self.create_context.instances: + if instance.creator_identifier == CreateRenderPass.identifier: + render_layer_id = ( + instance["creator_attributes"]["render_layer_instance_id"] + ) + render_pass_instances[render_layer_id].append(instance) + elif instance.creator_identifier == self.identifier: + render_layer_instances[instance.id] = instance + + if not render_pass_instances or not render_layer_instances: + return + + layers_data = get_layers_data() + layers_by_name = collections.defaultdict(list) + for layer in layers_data: + layers_by_name[layer["name"]].append(layer) + + george_lines = [] + for render_layer_id, instances in render_pass_instances.items(): + render_layer_inst = render_layer_instances.get(render_layer_id) + if render_layer_inst is None: + continue + group_id = render_layer_inst["creator_attributes"]["group_id"] + layer_names = set() + for instance in instances: + layer_names |= set(instance["layer_names"]) + + for layer_name in layer_names: + george_lines.extend( + f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" + for layer in layers_by_name[layer_name] + if layer["group_id"] != group_id + ) + if george_lines: + execute_george_through_file("\n".join(george_lines)) + + +class CreateRenderPass(TVPaintCreator): + family = "render" + subset_template_family_filter = "renderPass" + identifier = "render.pass" + label = "Render Pass" + icon = "fa5.image" + description = "Mark selected TVPaint layers as pass of Render Layer." + detailed_description = RENDER_PASS_DETAILED_DESCRIPTIONS + + order = CreateRenderlayer.order + 10 + + # Settings + mark_for_review = True + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_pass"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.mark_for_review = plugin_settings["mark_for_review"] + + def collect_instances(self): + instances_by_identifier = self._cache_and_get_instances() + render_layers = { + instance_data["instance_id"]: { + "variant": instance_data["variant"], + "template_data": prepare_template_data({ + "renderlayer": instance_data["variant"] + }) + } + for instance_data in ( + instances_by_identifier[CreateRenderlayer.identifier] + ) + } + + for instance_data in instances_by_identifier[self.identifier]: + render_layer_instance_id = ( + instance_data + .get("creator_attributes", {}) + .get("render_layer_instance_id") + ) + render_layer_info = render_layers.get(render_layer_instance_id) + self.update_instance_labels( + instance_data, + render_layer_info["variant"], + render_layer_info["template_data"] + ) + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def get_dynamic_data( + self, variant, task_name, asset_doc, project_name, host_name, instance + ): + dynamic_data = super().get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + dynamic_data["renderpass"] = variant + dynamic_data["renderlayer"] = "{renderlayer}" + return dynamic_data + + def update_instance_labels( + self, instance, render_layer_variant, render_layer_data=None + ): + old_label = instance.get("label") + old_group = instance.get("group") + new_label = None + new_group = None + if render_layer_variant is not None: + if render_layer_data is None: + render_layer_data = prepare_template_data({ + "renderlayer": render_layer_variant + }) + try: + new_label = instance["subset"].format(**render_layer_data) + except (KeyError, ValueError): + pass + + new_group = f"{self.get_group_label()} ({render_layer_variant})" + + instance["label"] = new_label + instance["group"] = new_group + return old_group != new_group or old_label != new_label + + def create(self, subset_name, instance_data, pre_create_data): + render_layer_instance_id = pre_create_data.get( + "render_layer_instance_id" + ) + if not render_layer_instance_id: + raise CreatorError("Missing RenderLayer instance") + + render_layer_instance = self.create_context.instances_by_id.get( + render_layer_instance_id + ) + if render_layer_instance is None: + raise CreatorError(( + "RenderLayer instance was not found" + f" by id \"{render_layer_instance_id}\"" + )) + + group_id = render_layer_instance["creator_attributes"]["group_id"] + self.log.debug("Query data from workfile.") + layers_data = get_layers_data() + + self.log.debug("Checking selection.") + # Get all selected layers and their group ids + marked_layer_names = pre_create_data.get("layer_names") + if marked_layer_names is not None: + layers_by_name = {layer["name"]: layer for layer in layers_data} + marked_layers = [] + for layer_name in marked_layer_names: + layer = layers_by_name.get(layer_name) + if layer is None: + raise CreatorError( + f"Layer with name \"{layer_name}\" was not found") + marked_layers.append(layer) + + else: + marked_layers = [ + layer + for layer in layers_data + if layer["selected"] + ] + + # Raise if nothing is selected + if not marked_layers: + raise CreatorError( + "Nothing is selected. Please select layers.") + + marked_layer_names = {layer["name"] for layer in marked_layers} + + marked_layer_names = set(marked_layer_names) + + instances_to_remove = [] + for instance in self.create_context.instances: + if instance.creator_identifier != self.identifier: + continue + cur_layer_names = set(instance["layer_names"]) + if not cur_layer_names.intersection(marked_layer_names): + continue + new_layer_names = cur_layer_names - marked_layer_names + if new_layer_names: + instance["layer_names"] = list(new_layer_names) + else: + instances_to_remove.append(instance) + + render_layer = render_layer_instance["variant"] + subset_name_fill_data = {"renderlayer": render_layer} + + # Format dynamic keys in subset name + label = subset_name + try: + label = label.format( + **prepare_template_data(subset_name_fill_data) + ) + except (KeyError, ValueError): + pass + + self.log.info(f"New subset name is \"{label}\".") + instance_data["label"] = label + instance_data["group"] = f"{self.get_group_label()} ({render_layer})" + instance_data["layer_names"] = list(marked_layer_names) + if "creator_attributes" not in instance_data: + instance_data["creator_attributes"] = {} + + creator_attributes = instance_data["creator_attributes"] + mark_for_review = pre_create_data.get("mark_for_review") + if mark_for_review is None: + mark_for_review = self.mark_for_review + creator_attributes["mark_for_review"] = mark_for_review + creator_attributes["render_layer_instance_id"] = ( + render_layer_instance_id + ) + + new_instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + instances_data = self._remove_and_filter_instances( + instances_to_remove + ) + instances_data.append(new_instance.data_to_store()) + + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + self._change_layers_group(marked_layers, group_id) + + return new_instance + + def _change_layers_group(self, layers, group_id): + filtered_layers = [ + layer + for layer in layers + if layer["group_id"] != group_id + ] + if filtered_layers: + self.log.info(( + "Changing group of " + f"{','.join([l['name'] for l in filtered_layers])}" + f" to {group_id}" + )) + george_lines = [ + f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" + for layer in filtered_layers + ] + execute_george_through_file("\n".join(george_lines)) + + def _remove_and_filter_instances(self, instances_to_remove): + instances_data = self.host.list_instances() + if not instances_to_remove: + return instances_data + + removed_ids = set() + for instance in instances_to_remove: + removed_ids.add(instance.id) + self._remove_instance_from_context(instance) + + return [ + instance_data + for instance_data in instances_data + if instance_data.get("instance_id") not in removed_ids + ] + + def get_pre_create_attr_defs(self): + render_layers = [ + { + "value": instance.id, + "label": instance.label + } + for instance in self.create_context.instances + if instance.creator_identifier == CreateRenderlayer.identifier + ] + if not render_layers: + render_layers.append({"value": None, "label": "N/A"}) + + return [ + EnumDef( + "render_layer_instance_id", + label="Render Layer", + items=render_layers + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def get_instance_attr_defs(self): + return self.get_pre_create_attr_defs() + + +class TVPaintSceneRenderCreator(TVPaintAutoCreator): + family = "render" + subset_template_family_filter = "renderScene" + identifier = "render.scene" + label = "Scene Render" + icon = "fa.file-image-o" + + # Settings + default_pass_name = "beauty" + mark_for_review = True + active_on_create = False + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_scene"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.mark_for_review = plugin_settings["mark_for_review"] + self.active_on_create = plugin_settings["active_on_create"] + self.default_pass_name = plugin_settings["default_pass_name"] + + def get_dynamic_data(self, variant, *args, **kwargs): + dynamic_data = super().get_dynamic_data(variant, *args, **kwargs) + dynamic_data["renderpass"] = "{renderpass}" + dynamic_data["renderlayer"] = variant + return dynamic_data + + def _create_new_instance(self): + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": self.default_variant, + "creator_attributes": { + "render_pass_name": self.default_pass_name, + "mark_for_review": True + }, + "label": self._get_label( + subset_name, + self.default_pass_name + ) + } + if not self.active_on_create: + data["active"] = False + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + return new_instance + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + if existing_instance is None: + return self._create_new_instance() + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name + + existing_instance["label"] = self._get_label( + existing_instance["subset"], + existing_instance["creator_attributes"]["render_pass_name"] + ) + + def _get_label(self, subset_name, render_pass_name): + try: + subset_name = subset_name.format(**prepare_template_data({ + "renderpass": render_pass_name + })) + except (KeyError, ValueError): + pass + + return subset_name + + def get_instance_attr_defs(self): + return [ + TextDef( + "render_pass_name", + label="Pass Name", + default=self.default_pass_name, + tooltip=( + "Value is calculated during publishing and UI will update" + " label after refresh." + ) + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py deleted file mode 100644 index 009b69c4f1..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py +++ /dev/null @@ -1,231 +0,0 @@ -from openpype.lib import prepare_template_data -from openpype.pipeline import CreatorError -from openpype.hosts.tvpaint.api import ( - plugin, - CommunicationWrapper -) -from openpype.hosts.tvpaint.api.lib import ( - get_layers_data, - get_groups_data, - execute_george_through_file, -) -from openpype.hosts.tvpaint.api.pipeline import list_instances - - -class CreateRenderlayer(plugin.Creator): - """Mark layer group as one instance.""" - name = "render_layer" - label = "RenderLayer" - family = "renderLayer" - icon = "cube" - defaults = ["Main"] - - rename_group = True - render_pass = "beauty" - - rename_script_template = ( - "tv_layercolor \"setcolor\"" - " {clip_id} {group_id} {r} {g} {b} \"{name}\"" - ) - - dynamic_subset_keys = [ - "renderpass", "renderlayer", "render_pass", "render_layer", "group" - ] - - @classmethod - def get_dynamic_data( - cls, variant, task_name, asset_id, project_name, host_name - ): - dynamic_data = super(CreateRenderlayer, cls).get_dynamic_data( - variant, task_name, asset_id, project_name, host_name - ) - # Use render pass name from creator's plugin - dynamic_data["renderpass"] = cls.render_pass - # Add variant to render layer - dynamic_data["renderlayer"] = variant - # Change family for subset name fill - dynamic_data["family"] = "render" - - # TODO remove - Backwards compatibility for old subset name templates - # - added 2022/04/28 - dynamic_data["render_pass"] = dynamic_data["renderpass"] - dynamic_data["render_layer"] = dynamic_data["renderlayer"] - - return dynamic_data - - @classmethod - def get_default_variant(cls): - """Default value for variant in Creator tool. - - Method checks if TVPaint implementation is running and tries to find - selected layers from TVPaint. If only one is selected it's name is - returned. - - Returns: - str: Default variant name for Creator tool. - """ - # Validate that communication is initialized - if CommunicationWrapper.communicator: - # Get currently selected layers - layers_data = get_layers_data() - - selected_layers = [ - layer - for layer in layers_data - if layer["selected"] - ] - # Return layer name if only one is selected - if len(selected_layers) == 1: - return selected_layers[0]["name"] - - # Use defaults - if cls.defaults: - return cls.defaults[0] - return None - - def process(self): - self.log.debug("Query data from workfile.") - instances = list_instances() - layers_data = get_layers_data() - - self.log.debug("Checking for selection groups.") - # Collect group ids from selection - group_ids = set() - for layer in layers_data: - if layer["selected"]: - group_ids.add(layer["group_id"]) - - # Raise if there is no selection - if not group_ids: - raise CreatorError("Nothing is selected.") - - # This creator should run only on one group - if len(group_ids) > 1: - raise CreatorError("More than one group is in selection.") - - group_id = tuple(group_ids)[0] - # If group id is `0` it is `default` group which is invalid - if group_id == 0: - raise CreatorError( - "Selection is not in group. Can't mark selection as Beauty." - ) - - self.log.debug(f"Selected group id is \"{group_id}\".") - self.data["group_id"] = group_id - - group_data = get_groups_data() - group_name = None - for group in group_data: - if group["group_id"] == group_id: - group_name = group["name"] - break - - if group_name is None: - raise AssertionError( - "Couldn't find group by id \"{}\"".format(group_id) - ) - - subset_name_fill_data = { - "group": group_name - } - - family = self.family = self.data["family"] - - # Fill dynamic key 'group' - subset_name = self.data["subset"].format( - **prepare_template_data(subset_name_fill_data) - ) - self.data["subset"] = subset_name - - # Check for instances of same group - existing_instance = None - existing_instance_idx = None - # Check if subset name is not already taken - same_subset_instance = None - same_subset_instance_idx = None - for idx, instance in enumerate(instances): - if instance["family"] == family: - if instance["group_id"] == group_id: - existing_instance = instance - existing_instance_idx = idx - elif instance["subset"] == subset_name: - same_subset_instance = instance - same_subset_instance_idx = idx - - if ( - same_subset_instance_idx is not None - and existing_instance_idx is not None - ): - break - - if same_subset_instance_idx is not None: - if self._ask_user_subset_override(same_subset_instance): - instances.pop(same_subset_instance_idx) - else: - return - - if existing_instance is not None: - self.log.info( - f"Beauty instance for group id {group_id} already exists" - ", overriding" - ) - instances[existing_instance_idx] = self.data - else: - instances.append(self.data) - - self.write_instances(instances) - - if not self.rename_group: - self.log.info("Group rename function is turned off. Skipping") - return - - self.log.debug("Querying groups data from workfile.") - groups_data = get_groups_data() - - self.log.debug("Changing name of the group.") - selected_group = None - for group_data in groups_data: - if group_data["group_id"] == group_id: - selected_group = group_data - - # Rename TVPaint group (keep color same) - # - groups can't contain spaces - new_group_name = self.data["variant"].replace(" ", "_") - rename_script = self.rename_script_template.format( - clip_id=selected_group["clip_id"], - group_id=selected_group["group_id"], - r=selected_group["red"], - g=selected_group["green"], - b=selected_group["blue"], - name=new_group_name - ) - execute_george_through_file(rename_script) - - self.log.info( - f"Name of group with index {group_id}" - f" was changed to \"{new_group_name}\"." - ) - - def _ask_user_subset_override(self, instance): - from qtpy import QtCore - from qtpy.QtWidgets import QMessageBox - - title = "Subset \"{}\" already exist".format(instance["subset"]) - text = ( - "Instance with subset name \"{}\" already exists." - "\n\nDo you want to override existing?" - ).format(instance["subset"]) - - dialog = QMessageBox() - dialog.setWindowFlags( - dialog.windowFlags() - | QtCore.Qt.WindowStaysOnTopHint - ) - dialog.setWindowTitle(title) - dialog.setText(text) - dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No) - dialog.setDefaultButton(QMessageBox.Yes) - dialog.exec_() - if dialog.result() == QMessageBox.Yes: - return True - return False diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py deleted file mode 100644 index a44cb29f20..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py +++ /dev/null @@ -1,167 +0,0 @@ -from openpype.pipeline import CreatorError -from openpype.lib import prepare_template_data -from openpype.hosts.tvpaint.api import ( - plugin, - CommunicationWrapper -) -from openpype.hosts.tvpaint.api.lib import get_layers_data -from openpype.hosts.tvpaint.api.pipeline import list_instances - - -class CreateRenderPass(plugin.Creator): - """Render pass is combination of one or more layers from same group. - - Requirement to create Render Pass is to have already created beauty - instance. Beauty instance is used as base for subset name. - """ - name = "render_pass" - label = "RenderPass" - family = "renderPass" - icon = "cube" - defaults = ["Main"] - - dynamic_subset_keys = [ - "renderpass", "renderlayer", "render_pass", "render_layer" - ] - - @classmethod - def get_dynamic_data( - cls, variant, task_name, asset_id, project_name, host_name - ): - dynamic_data = super(CreateRenderPass, cls).get_dynamic_data( - variant, task_name, asset_id, project_name, host_name - ) - dynamic_data["renderpass"] = variant - dynamic_data["family"] = "render" - - # TODO remove - Backwards compatibility for old subset name templates - # - added 2022/04/28 - dynamic_data["render_pass"] = dynamic_data["renderpass"] - - return dynamic_data - - @classmethod - def get_default_variant(cls): - """Default value for variant in Creator tool. - - Method checks if TVPaint implementation is running and tries to find - selected layers from TVPaint. If only one is selected it's name is - returned. - - Returns: - str: Default variant name for Creator tool. - """ - # Validate that communication is initialized - if CommunicationWrapper.communicator: - # Get currently selected layers - layers_data = get_layers_data() - - selected_layers = [ - layer - for layer in layers_data - if layer["selected"] - ] - # Return layer name if only one is selected - if len(selected_layers) == 1: - return selected_layers[0]["name"] - - # Use defaults - if cls.defaults: - return cls.defaults[0] - return None - - def process(self): - self.log.debug("Query data from workfile.") - instances = list_instances() - layers_data = get_layers_data() - - self.log.debug("Checking selection.") - # Get all selected layers and their group ids - group_ids = set() - selected_layers = [] - for layer in layers_data: - if layer["selected"]: - selected_layers.append(layer) - group_ids.add(layer["group_id"]) - - # Raise if nothing is selected - if not selected_layers: - raise CreatorError("Nothing is selected.") - - # Raise if layers from multiple groups are selected - if len(group_ids) != 1: - raise CreatorError("More than one group is in selection.") - - group_id = tuple(group_ids)[0] - self.log.debug(f"Selected group id is \"{group_id}\".") - - # Find beauty instance for selected layers - beauty_instance = None - for instance in instances: - if ( - instance["family"] == "renderLayer" - and instance["group_id"] == group_id - ): - beauty_instance = instance - break - - # Beauty is required for this creator so raise if was not found - if beauty_instance is None: - raise CreatorError("Beauty pass does not exist yet.") - - subset_name = self.data["subset"] - - subset_name_fill_data = {} - - # Backwards compatibility - # - beauty may be created with older creator where variant was not - # stored - if "variant" not in beauty_instance: - render_layer = beauty_instance["name"] - else: - render_layer = beauty_instance["variant"] - - subset_name_fill_data["renderlayer"] = render_layer - subset_name_fill_data["render_layer"] = render_layer - - # Format dynamic keys in subset name - new_subset_name = subset_name.format( - **prepare_template_data(subset_name_fill_data) - ) - self.data["subset"] = new_subset_name - self.log.info(f"New subset name is \"{new_subset_name}\".") - - family = self.data["family"] - variant = self.data["variant"] - - self.data["group_id"] = group_id - self.data["pass"] = variant - self.data["renderlayer"] = render_layer - - # Collect selected layer ids to be stored into instance - layer_names = [layer["name"] for layer in selected_layers] - self.data["layer_names"] = layer_names - - # Check if same instance already exists - existing_instance = None - existing_instance_idx = None - for idx, instance in enumerate(instances): - if ( - instance["family"] == family - and instance["group_id"] == group_id - and instance["pass"] == variant - ): - existing_instance = instance - existing_instance_idx = idx - break - - if existing_instance is not None: - self.log.info( - f"Render pass instance for group id {group_id}" - f" and name \"{variant}\" already exists, overriding." - ) - instances[existing_instance_idx] = self.data - else: - instances.append(self.data) - - self.write_instances(instances) diff --git a/openpype/hosts/tvpaint/plugins/create/create_review.py b/openpype/hosts/tvpaint/plugins/create/create_review.py new file mode 100644 index 0000000000..886dae7c39 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/create_review.py @@ -0,0 +1,76 @@ +from openpype.client import get_asset_by_name +from openpype.pipeline import CreatedInstance +from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator + + +class TVPaintReviewCreator(TVPaintAutoCreator): + family = "review" + identifier = "scene.review" + label = "Review" + icon = "ei.video" + + # Settings + active_on_create = True + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_review"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.active_on_create = plugin_settings["active_on_create"] + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": self.default_variant + } + if not self.active_on_create: + data["active"] = False + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/tvpaint/plugins/create/create_workfile.py b/openpype/hosts/tvpaint/plugins/create/create_workfile.py new file mode 100644 index 0000000000..41347576d5 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/create_workfile.py @@ -0,0 +1,70 @@ +from openpype.client import get_asset_by_name +from openpype.pipeline import CreatedInstance +from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator + + +class TVPaintWorkfileCreator(TVPaintAutoCreator): + family = "workfile" + identifier = "workfile" + label = "Workfile" + icon = "fa.file-o" + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_workfile"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": self.default_variant + } + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py index d5b79758ad..5eb702a1da 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -1,37 +1,34 @@ import pyblish.api -class CollectOutputFrameRange(pyblish.api.ContextPlugin): +class CollectOutputFrameRange(pyblish.api.InstancePlugin): """Collect frame start/end from context. When instances are collected context does not contain `frameStart` and `frameEnd` keys yet. They are collected in global plugin `CollectContextEntities`. """ + label = "Collect output frame range" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.4999 hosts = ["tvpaint"] + families = ["review", "render"] - def process(self, context): - for instance in context: - frame_start = instance.data.get("frameStart") - frame_end = instance.data.get("frameEnd") - if frame_start is not None and frame_end is not None: - self.log.debug( - "Instance {} already has set frames {}-{}".format( - str(instance), frame_start, frame_end - ) - ) - return + def process(self, instance): + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + return - frame_start = context.data.get("frameStart") - frame_end = context.data.get("frameEnd") + context = instance.context - instance.data["frameStart"] = frame_start - instance.data["frameEnd"] = frame_end - - self.log.info( - "Set frames {}-{} on instance {} ".format( - frame_start, frame_end, str(instance) - ) + frame_start = asset_doc["data"]["frameStart"] + frame_end = frame_start + ( + context.data["sceneMarkOut"] - context.data["sceneMarkIn"] + ) + instance.data["frameStart"] = frame_start + instance.data["frameEnd"] = frame_end + self.log.info( + "Set frames {}-{} on instance {} ".format( + frame_start, frame_end, instance.data["subset"] ) + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py deleted file mode 100644 index ae1326a5bd..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ /dev/null @@ -1,280 +0,0 @@ -import json -import copy -import pyblish.api - -from openpype.client import get_asset_by_name -from openpype.pipeline import legacy_io -from openpype.pipeline.create import get_subset_name - - -class CollectInstances(pyblish.api.ContextPlugin): - label = "Collect Instances" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["tvpaint"] - - def process(self, context): - workfile_instances = context.data["workfileInstances"] - - self.log.debug("Collected ({}) instances:\n{}".format( - len(workfile_instances), - json.dumps(workfile_instances, indent=4) - )) - - filtered_instance_data = [] - # Backwards compatibility for workfiles that already have review - # instance in metadata. - review_instance_exist = False - for instance_data in workfile_instances: - family = instance_data["family"] - if family == "review": - review_instance_exist = True - - elif family not in ("renderPass", "renderLayer"): - self.log.info("Unknown family \"{}\". Skipping {}".format( - family, json.dumps(instance_data, indent=4) - )) - continue - - filtered_instance_data.append(instance_data) - - # Fake review instance if review was not found in metadata families - if not review_instance_exist: - filtered_instance_data.append( - self._create_review_instance_data(context) - ) - - for instance_data in filtered_instance_data: - instance_data["fps"] = context.data["sceneFps"] - - # Conversion from older instances - # - change 'render_layer' to 'renderlayer' - render_layer = instance_data.get("instance_data") - if not render_layer: - # Render Layer has only variant - if instance_data["family"] == "renderLayer": - render_layer = instance_data.get("variant") - - # Backwards compatibility for renderPasses - elif "render_layer" in instance_data: - render_layer = instance_data["render_layer"] - - if render_layer: - instance_data["renderlayer"] = render_layer - - # Store workfile instance data to instance data - instance_data["originData"] = copy.deepcopy(instance_data) - # Global instance data modifications - # Fill families - family = instance_data["family"] - families = [family] - if family != "review": - families.append("review") - # Add `review` family for thumbnail integration - instance_data["families"] = families - - # Instance name - subset_name = instance_data["subset"] - name = instance_data.get("name", subset_name) - instance_data["name"] = name - instance_data["label"] = "{} [{}-{}]".format( - name, - context.data["sceneMarkIn"] + 1, - context.data["sceneMarkOut"] + 1 - ) - - active = instance_data.get("active", True) - instance_data["active"] = active - instance_data["publish"] = active - # Add representations key - instance_data["representations"] = [] - - # Different instance creation based on family - instance = None - if family == "review": - # Change subset name of review instance - - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] - - # Collect asset doc to get asset id - # - not sure if it's good idea to require asset id in - # get_subset_name? - asset_name = context.data["workfile_context"]["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - - # Host name from environment variable - host_name = context.data["hostName"] - # Use empty variant value - variant = "" - task_name = legacy_io.Session["AVALON_TASK"] - new_subset_name = get_subset_name( - family, - variant, - task_name, - asset_doc, - project_name, - host_name, - project_settings=context.data["project_settings"] - ) - instance_data["subset"] = new_subset_name - - instance = context.create_instance(**instance_data) - - instance.data["layers"] = copy.deepcopy( - context.data["layersData"] - ) - - elif family == "renderLayer": - instance = self.create_render_layer_instance( - context, instance_data - ) - elif family == "renderPass": - instance = self.create_render_pass_instance( - context, instance_data - ) - - if instance is None: - continue - - any_visible = False - for layer in instance.data["layers"]: - if layer["visible"]: - any_visible = True - break - - instance.data["publish"] = any_visible - - self.log.debug("Created instance: {}\n{}".format( - instance, json.dumps(instance.data, indent=4) - )) - - def _create_review_instance_data(self, context): - """Fake review instance data.""" - - return { - "family": "review", - "asset": context.data["asset"], - # Dummy subset name - "subset": "reviewMain" - } - - def create_render_layer_instance(self, context, instance_data): - name = instance_data["name"] - # Change label - subset_name = instance_data["subset"] - - # Backwards compatibility - # - subset names were not stored as final subset names during creation - if "variant" not in instance_data: - instance_data["label"] = "{}_Beauty".format(name) - - # Change subset name - # Final family of an instance will be `render` - new_family = "render" - task_name = legacy_io.Session["AVALON_TASK"] - new_subset_name = "{}{}_{}_Beauty".format( - new_family, task_name.capitalize(), name - ) - instance_data["subset"] = new_subset_name - self.log.debug("Changed subset name \"{}\"->\"{}\"".format( - subset_name, new_subset_name - )) - - # Get all layers for the layer - layers_data = context.data["layersData"] - group_id = instance_data["group_id"] - group_layers = [] - for layer in layers_data: - if layer["group_id"] == group_id: - group_layers.append(layer) - - if not group_layers: - # Should be handled here? - self.log.warning(( - f"Group with id {group_id} does not contain any layers." - f" Instance \"{name}\" not created." - )) - return None - - instance_data["layers"] = group_layers - - return context.create_instance(**instance_data) - - def create_render_pass_instance(self, context, instance_data): - pass_name = instance_data["pass"] - self.log.info( - "Creating render pass instance. \"{}\"".format(pass_name) - ) - # Change label - render_layer = instance_data["renderlayer"] - - # Backwards compatibility - # - subset names were not stored as final subset names during creation - if "variant" not in instance_data: - instance_data["label"] = "{}_{}".format(render_layer, pass_name) - # Change subset name - # Final family of an instance will be `render` - new_family = "render" - old_subset_name = instance_data["subset"] - task_name = legacy_io.Session["AVALON_TASK"] - new_subset_name = "{}{}_{}_{}".format( - new_family, task_name.capitalize(), render_layer, pass_name - ) - instance_data["subset"] = new_subset_name - self.log.debug("Changed subset name \"{}\"->\"{}\"".format( - old_subset_name, new_subset_name - )) - - layers_data = context.data["layersData"] - layers_by_name = { - layer["name"]: layer - for layer in layers_data - } - - if "layer_names" in instance_data: - layer_names = instance_data["layer_names"] - else: - # Backwards compatibility - # - not 100% working as it was found out that layer ids can't be - # used as unified identifier across multiple workstations - layers_by_id = { - layer["layer_id"]: layer - for layer in layers_data - } - layer_ids = instance_data["layer_ids"] - layer_names = [] - for layer_id in layer_ids: - layer = layers_by_id.get(layer_id) - if layer: - layer_names.append(layer["name"]) - - if not layer_names: - raise ValueError(( - "Metadata contain old way of storing layers information." - " It is not possible to identify layers to publish with" - " these data. Please remove Render Pass instances with" - " Subset manager and use Creator tool to recreate them." - )) - - render_pass_layers = [] - for layer_name in layer_names: - layer = layers_by_name.get(layer_name) - # NOTE This is kind of validation before validators? - if not layer: - self.log.warning( - f"Layer with name {layer_name} was not found." - ) - continue - - render_pass_layers.append(layer) - - if not render_pass_layers: - name = instance_data["name"] - self.log.warning( - f"None of the layers from the RenderPass \"{name}\"" - " exist anymore. Instance not created." - ) - return None - - instance_data["layers"] = render_pass_layers - return context.create_instance(**instance_data) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py new file mode 100644 index 0000000000..ba89deac5d --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py @@ -0,0 +1,109 @@ +import copy +import pyblish.api +from openpype.lib import prepare_template_data + + +class CollectRenderInstances(pyblish.api.InstancePlugin): + label = "Collect Render Instances" + order = pyblish.api.CollectorOrder - 0.4 + hosts = ["tvpaint"] + families = ["render", "review"] + + def process(self, instance): + context = instance.context + creator_identifier = instance.data["creator_identifier"] + if creator_identifier == "render.layer": + self._collect_data_for_render_layer(instance) + + elif creator_identifier == "render.pass": + self._collect_data_for_render_pass(instance) + + elif creator_identifier == "render.scene": + self._collect_data_for_render_scene(instance) + + else: + if creator_identifier == "scene.review": + self._collect_data_for_review(instance) + return + + subset_name = instance.data["subset"] + instance.data["name"] = subset_name + instance.data["label"] = "{} [{}-{}]".format( + subset_name, + context.data["sceneMarkIn"] + 1, + context.data["sceneMarkOut"] + 1 + ) + + def _collect_data_for_render_layer(self, instance): + instance.data["families"].append("renderLayer") + creator_attributes = instance.data["creator_attributes"] + group_id = creator_attributes["group_id"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + layers_data = instance.context.data["layersData"] + instance.data["layers"] = [ + copy.deepcopy(layer) + for layer in layers_data + if layer["group_id"] == group_id + ] + + def _collect_data_for_render_pass(self, instance): + instance.data["families"].append("renderPass") + + layer_names = set(instance.data["layer_names"]) + layers_data = instance.context.data["layersData"] + + creator_attributes = instance.data["creator_attributes"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + instance.data["layers"] = [ + copy.deepcopy(layer) + for layer in layers_data + if layer["name"] in layer_names + ] + + render_layer_data = None + render_layer_id = creator_attributes["render_layer_instance_id"] + for in_data in instance.context.data["workfileInstances"]: + if ( + in_data["creator_identifier"] == "render.layer" + and in_data["instance_id"] == render_layer_id + ): + render_layer_data = in_data + break + + instance.data["renderLayerData"] = copy.deepcopy(render_layer_data) + # Invalid state + if render_layer_data is None: + return + render_layer_name = render_layer_data["variant"] + subset_name = instance.data["subset"] + instance.data["subset"] = subset_name.format( + **prepare_template_data({"renderlayer": render_layer_name}) + ) + + def _collect_data_for_render_scene(self, instance): + instance.data["families"].append("renderScene") + + creator_attributes = instance.data["creator_attributes"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + instance.data["layers"] = copy.deepcopy( + instance.context.data["layersData"] + ) + + render_pass_name = ( + instance.data["creator_attributes"]["render_pass_name"] + ) + subset_name = instance.data["subset"] + instance.data["subset"] = subset_name.format( + **prepare_template_data({"renderpass": render_pass_name}) + ) + + def _collect_data_for_review(self, instance): + instance.data["layers"] = copy.deepcopy( + instance.context.data["layersData"] + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py deleted file mode 100644 index 92a2815ba0..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py +++ /dev/null @@ -1,114 +0,0 @@ -import json -import copy -import pyblish.api - -from openpype.client import get_asset_by_name -from openpype.pipeline.create import get_subset_name - - -class CollectRenderScene(pyblish.api.ContextPlugin): - """Collect instance which renders whole scene in PNG. - - Creates instance with family 'renderScene' which will have all layers - to render which will be composite into one result. The instance is not - collected from scene. - - Scene will be rendered with all visible layers similar way like review is. - - Instance is disabled if there are any created instances of 'renderLayer' - or 'renderPass'. That is because it is expected that this instance is - used as lazy publish of TVPaint file. - - Subset name is created similar way like 'renderLayer' family. It can use - `renderPass` and `renderLayer` keys which can be set using settings and - `variant` is filled using `renderPass` value. - """ - label = "Collect Render Scene" - order = pyblish.api.CollectorOrder - 0.39 - hosts = ["tvpaint"] - - # Value of 'render_pass' in subset name template - render_pass = "beauty" - - # Settings attributes - enabled = False - # Value of 'render_layer' and 'variant' in subset name template - render_layer = "Main" - - def process(self, context): - # Check if there are created instances of renderPass and renderLayer - # - that will define if renderScene instance is enabled after - # collection - any_created_instance = False - for instance in context: - family = instance.data["family"] - if family in ("renderPass", "renderLayer"): - any_created_instance = True - break - - # Global instance data modifications - # Fill families - family = "renderScene" - # Add `review` family for thumbnail integration - families = [family, "review"] - - # Collect asset doc to get asset id - # - not sure if it's good idea to require asset id in - # get_subset_name? - workfile_context = context.data["workfile_context"] - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] - asset_name = workfile_context["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - - # Host name from environment variable - host_name = context.data["hostName"] - # Variant is using render pass name - variant = self.render_layer - dynamic_data = { - "renderlayer": self.render_layer, - "renderpass": self.render_pass, - } - # TODO remove - Backwards compatibility for old subset name templates - # - added 2022/04/28 - dynamic_data["render_layer"] = dynamic_data["renderlayer"] - dynamic_data["render_pass"] = dynamic_data["renderpass"] - - task_name = workfile_context["task"] - subset_name = get_subset_name( - "render", - variant, - task_name, - asset_doc, - project_name, - host_name, - dynamic_data=dynamic_data, - project_settings=context.data["project_settings"] - ) - - instance_data = { - "family": family, - "families": families, - "fps": context.data["sceneFps"], - "subset": subset_name, - "name": subset_name, - "label": "{} [{}-{}]".format( - subset_name, - context.data["sceneMarkIn"] + 1, - context.data["sceneMarkOut"] + 1 - ), - "active": not any_created_instance, - "publish": not any_created_instance, - "representations": [], - "layers": copy.deepcopy(context.data["layersData"]), - "asset": asset_name, - "task": task_name, - # Add render layer to instance data - "renderlayer": self.render_layer - } - - instance = context.create_instance(**instance_data) - - self.log.debug("Created instance: {}\n{}".format( - instance, json.dumps(instance.data, indent=4) - )) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py index 8c7c8c3899..a3449663f8 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py @@ -2,17 +2,15 @@ import os import json import pyblish.api -from openpype.client import get_asset_by_name -from openpype.pipeline import legacy_io -from openpype.pipeline.create import get_subset_name - -class CollectWorkfile(pyblish.api.ContextPlugin): +class CollectWorkfile(pyblish.api.InstancePlugin): label = "Collect Workfile" order = pyblish.api.CollectorOrder - 0.4 hosts = ["tvpaint"] + families = ["workfile"] - def process(self, context): + def process(self, instance): + context = instance.context current_file = context.data["currentFile"] self.log.info( @@ -21,49 +19,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin): dirpath, filename = os.path.split(current_file) basename, ext = os.path.splitext(filename) - instance = context.create_instance(name=basename) - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] - - # Get subset name of workfile instance - # Collect asset doc to get asset id - # - not sure if it's good idea to require asset id in - # get_subset_name? - family = "workfile" - asset_name = context.data["workfile_context"]["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - - # Host name from environment variable - host_name = os.environ["AVALON_APP"] - # Use empty variant value - variant = "" - task_name = legacy_io.Session["AVALON_TASK"] - subset_name = get_subset_name( - family, - variant, - task_name, - asset_doc, - project_name, - host_name, - project_settings=context.data["project_settings"] - ) - - # Create Workfile instance - instance.data.update({ - "subset": subset_name, - "asset": context.data["asset"], - "label": subset_name, - "publish": True, - "family": "workfile", - "families": ["workfile"], - "representations": [{ - "name": ext.lstrip("."), - "ext": ext.lstrip("."), - "files": filename, - "stagingDir": dirpath - }] + instance.data["representations"].append({ + "name": ext.lstrip("."), + "ext": ext.lstrip("."), + "files": filename, + "stagingDir": dirpath }) + self.log.info("Collected workfile instance: {}".format( json.dumps(instance.data, indent=4) )) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index 8fe71a4a46..95a5cd77bd 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -65,9 +65,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect and store current context to have reference current_context = { - "project": legacy_io.Session["AVALON_PROJECT"], - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"] + "project_name": context.data["projectName"], + "asset_name": context.data["asset"], + "task_name": context.data["task"] } context.data["previous_context"] = current_context self.log.debug("Current context is: {}".format(current_context)) @@ -76,25 +76,31 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): self.log.info("Collecting workfile context") workfile_context = get_current_workfile_context() + if "project" in workfile_context: + workfile_context = { + "project_name": workfile_context.get("project"), + "asset_name": workfile_context.get("asset"), + "task_name": workfile_context.get("task"), + } # Store workfile context to pyblish context context.data["workfile_context"] = workfile_context if workfile_context: # Change current context with context from workfile key_map = ( - ("AVALON_ASSET", "asset"), - ("AVALON_TASK", "task") + ("AVALON_ASSET", "asset_name"), + ("AVALON_TASK", "task_name") ) for env_key, key in key_map: legacy_io.Session[env_key] = workfile_context[key] os.environ[env_key] = workfile_context[key] self.log.info("Context changed to: {}".format(workfile_context)) - asset_name = workfile_context["asset"] - task_name = workfile_context["task"] + asset_name = workfile_context["asset_name"] + task_name = workfile_context["task_name"] else: - asset_name = current_context["asset"] - task_name = current_context["task"] + asset_name = current_context["asset_name"] + task_name = current_context["task_name"] # Handle older workfiles or workfiles without metadata self.log.warning(( "Workfile does not contain information about context." @@ -103,6 +109,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Store context asset name context.data["asset"] = asset_name + context.data["task"] = task_name self.log.info( "Context is set to Asset: \"{}\" and Task: \"{}\"".format( asset_name, task_name diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 78074f720c..f2856c72a9 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -6,6 +6,7 @@ from PIL import Image import pyblish.api +from openpype.pipeline.publish import KnownPublishError from openpype.hosts.tvpaint.api.lib import ( execute_george, execute_george_through_file, @@ -24,8 +25,7 @@ from openpype.hosts.tvpaint.lib import ( class ExtractSequence(pyblish.api.Extractor): label = "Extract Sequence" hosts = ["tvpaint"] - families = ["review", "renderPass", "renderLayer", "renderScene"] - families_to_review = ["review"] + families = ["review", "render"] # Modifiable with settings review_bg = [255, 255, 255, 255] @@ -136,7 +136,7 @@ class ExtractSequence(pyblish.api.Extractor): # Fill tags and new families from project settings tags = [] - if family_lowered in self.families_to_review: + if family_lowered == "review": tags.append("review") # Sequence of one frame @@ -162,10 +162,6 @@ class ExtractSequence(pyblish.api.Extractor): instance.data["representations"].append(new_repre) - if family_lowered in ("renderpass", "renderlayer", "renderscene"): - # Change family to render - instance.data["family"] = "render" - if not thumbnail_fullpath: return @@ -259,7 +255,7 @@ class ExtractSequence(pyblish.api.Extractor): output_filepaths_by_frame_idx[frame_idx] = filepath if not os.path.exists(filepath): - raise AssertionError( + raise KnownPublishError( "Output was not rendered. File was not found {}".format( filepath ) diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml new file mode 100644 index 0000000000..a95387356f --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml @@ -0,0 +1,18 @@ + + + +Overused Color group +## One Color group is used by multiple Render Layers + +Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups. + +### Missing layer names + +{groups_information} + +### How to repair? + +Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py index 9f61bdbcd0..722d76b4d2 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py @@ -20,6 +20,9 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin): duplicated_layer_names = [] for layer_name in layer_names: layers = layers_by_name.get(layer_name) + # It is not job of this validator to handle missing layers + if layers is None: + continue if len(layers) > 1: duplicated_layer_names.append(layer_name) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index d3a04cc69f..6a496a2e49 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -8,11 +8,16 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): label = "Validate Layers Visibility" order = pyblish.api.ValidatorOrder - families = ["review", "renderPass", "renderLayer", "renderScene"] + families = ["review", "render"] def process(self, instance): + layers = instance.data["layers"] + # Instance have empty layers + # - it is not job of this validator to check that + if not layers: + return layer_names = set() - for layer in instance.data["layers"]: + for layer in layers: layer_names.add(layer["name"]) if layer["visible"]: return diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py b/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py new file mode 100644 index 0000000000..bb0a9a4ffe --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py @@ -0,0 +1,74 @@ +import collections +import pyblish.api +from openpype.pipeline import PublishXmlValidationError + + +class ValidateRenderLayerGroups(pyblish.api.ContextPlugin): + """Validate group ids of renderLayer subsets. + + Validate that there are not 2 render layers using the same group. + """ + + label = "Validate Render Layers Group" + order = pyblish.api.ValidatorOrder + 0.1 + + def process(self, context): + # Prepare layers + render_layers_by_group_id = collections.defaultdict(list) + for instance in context: + families = instance.data.get("families") + if not families or "renderLayer" not in families: + continue + + group_id = instance.data["creator_attributes"]["group_id"] + render_layers_by_group_id[group_id].append(instance) + + duplicated_instances = [] + for group_id, instances in render_layers_by_group_id.items(): + if len(instances) > 1: + duplicated_instances.append((group_id, instances)) + + if not duplicated_instances: + return + + # Exception message preparations + groups_data = context.data["groupsData"] + groups_by_id = { + group["group_id"]: group + for group in groups_data + } + + per_group_msgs = [] + groups_information_lines = [] + for group_id, instances in duplicated_instances: + group = groups_by_id[group_id] + group_label = "Group \"{}\" ({})".format( + group["name"], + group["group_id"], + ) + line_join_subset_names = "\n".join([ + f" - {instance['subset']}" + for instance in instances + ]) + joined_subset_names = ", ".join([ + f"\"{instance['subset']}\"" + for instance in instances + ]) + per_group_msgs.append( + "{} < {} >".format(group_label, joined_subset_names) + ) + groups_information_lines.append( + "{}\n{}".format(group_label, line_join_subset_names) + ) + + # Raise an error + raise PublishXmlValidationError( + self, + ( + "More than one Render Layer is using the same TVPaint" + " group color. {}" + ).format(" | ".join(per_group_msgs)), + formatting_data={ + "groups_information": "\n".join(groups_information_lines) + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py index 0fbfca6c56..2a3173c698 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py @@ -85,6 +85,5 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin): ), "expected_group": correct_group["name"], "layer_names": ", ".join(invalid_layer_names) - } ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py index d235215ac9..4473e4b1b7 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py @@ -42,7 +42,7 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin): "expected_width": expected_data["resolutionWidth"], "expected_height": expected_data["resolutionHeight"], "current_width": scene_data["resolutionWidth"], - "current_height": scene_data["resolutionWidth"], + "current_height": scene_data["resolutionHeight"], "expected_pixel_ratio": expected_data["pixelAspect"], "current_pixel_ratio": scene_data["pixelAspect"] } diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py index d66ae50c60..b38231e208 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py @@ -1,5 +1,9 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError, registered_host +from openpype.pipeline import ( + PublishXmlValidationError, + PublishValidationError, + registered_host, +) class ValidateWorkfileMetadataRepair(pyblish.api.Action): @@ -27,13 +31,18 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin): actions = [ValidateWorkfileMetadataRepair] - required_keys = {"project", "asset", "task"} + required_keys = {"project_name", "asset_name", "task_name"} def process(self, context): workfile_context = context.data["workfile_context"] if not workfile_context: - raise AssertionError( - "Current workfile is missing whole metadata about context." + raise PublishValidationError( + "Current workfile is missing whole metadata about context.", + "Missing context", + ( + "Current workfile is missing metadata about task." + " To fix this issue save the file using Workfiles tool." + ) ) missing_keys = [] diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py index 0f25f2f7be..2ed5afa11c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py @@ -1,4 +1,3 @@ -import os import pyblish.api from openpype.pipeline import PublishXmlValidationError @@ -16,15 +15,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin): def process(self, context): workfile_context = context.data.get("workfile_context") # If workfile context is missing than project is matching to - # `AVALON_PROJECT` value for 100% + # global project if not workfile_context: self.log.info( "Workfile context (\"workfile_context\") is not filled." ) return - workfile_project_name = workfile_context["project"] - env_project_name = os.environ["AVALON_PROJECT"] + workfile_project_name = workfile_context["project_name"] + env_project_name = context.data["projectName"] if workfile_project_name == env_project_name: self.log.info(( "Both workfile project and environment project are same. {}" diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index b5fb955a84..9eb7724a60 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -82,9 +82,6 @@ from .mongo import ( validate_mongo_connection, OpenPypeMongoConnection ) -from .anatomy import ( - Anatomy -) from .dateutils import ( get_datetime_data, @@ -119,36 +116,19 @@ from .transcoding import ( ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, create_project, - is_latest, - any_outdated, - get_asset, - get_linked_assets, - get_latest_version, - get_system_general_anatomy_data, get_workfile_template_key, get_workfile_template_key_from_context, - get_workdir_data, - get_workdir, - get_workdir_with_workdir_data, get_last_workfile_with_version, get_last_workfile, - create_workfile_doc, - save_workfile_data_to_doc, - get_workfile_doc, - BuildWorkfile, get_creator_by_name, get_custom_workfile_template, - change_timer_to_current_context, - get_custom_workfile_template_by_context, get_custom_workfile_template_by_string_context, get_custom_workfile_template @@ -186,8 +166,6 @@ from .plugin_tools import ( get_subset_name, get_subset_name_with_asset_doc, prepare_template_data, - filter_pyblish_plugins, - set_plugin_attributes_from_settings, source_hash, ) @@ -278,34 +256,17 @@ __all__ = [ "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", - "PROJECT_NAME_ALLOWED_SYMBOLS", - "PROJECT_NAME_REGEX", "create_project", - "is_latest", - "any_outdated", - "get_asset", - "get_linked_assets", - "get_latest_version", - "get_system_general_anatomy_data", "get_workfile_template_key", "get_workfile_template_key_from_context", - "get_workdir_data", - "get_workdir", - "get_workdir_with_workdir_data", "get_last_workfile_with_version", "get_last_workfile", - "create_workfile_doc", - "save_workfile_data_to_doc", - "get_workfile_doc", - "BuildWorkfile", "get_creator_by_name", - "change_timer_to_current_context", - "get_custom_workfile_template_by_context", "get_custom_workfile_template_by_string_context", "get_custom_workfile_template", @@ -338,8 +299,6 @@ __all__ = [ "TaskNotSetError", "get_subset_name", "get_subset_name_with_asset_doc", - "filter_pyblish_plugins", - "set_plugin_attributes_from_settings", "source_hash", "format_file_size", @@ -358,8 +317,6 @@ __all__ = [ "terminal", - "Anatomy", - "get_datetime_data", "get_formatted_current_time", diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py deleted file mode 100644 index 6d339f058f..0000000000 --- a/openpype/lib/anatomy.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code related to project Anatomy was moved -to 'openpype.pipeline.anatomy' please change your imports as soon as -possible. File will be probably removed in OpenPype 3.14.* -""" - -import warnings -import functools - - -class AnatomyDeprecatedWarning(DeprecationWarning): - pass - - -def anatomy_deprecated(func): - """Mark functions as deprecated. - - It will result in a warning being emitted when the function is used. - """ - - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.simplefilter("always", AnatomyDeprecatedWarning) - warnings.warn( - ( - "Deprecated import of 'Anatomy'." - " Class was moved to 'openpype.pipeline.anatomy'." - " Please change your imports of Anatomy in codebase." - ), - category=AnatomyDeprecatedWarning - ) - return func(*args, **kwargs) - return new_func - - -@anatomy_deprecated -def Anatomy(*args, **kwargs): - from openpype.pipeline.anatomy import Anatomy - return Anatomy(*args, **kwargs) diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 12f4a5198b..a9ae27cb79 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -1,6 +1,5 @@ """Should be used only inside of hosts.""" -import os -import copy + import platform import logging import functools @@ -10,17 +9,12 @@ import six from openpype.client import ( get_project, - get_assets, get_asset_by_name, - get_last_version_by_subset_name, - get_workfile_info, ) from openpype.client.operations import ( CURRENT_ASSET_DOC_SCHEMA, CURRENT_PROJECT_SCHEMA, CURRENT_PROJECT_CONFIG_SCHEMA, - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, ) from .profiles_filtering import filter_profiles from .path_templates import StringTemplate @@ -128,70 +122,6 @@ def with_pipeline_io(func): return wrapped -@deprecated("openpype.pipeline.context_tools.is_representation_from_latest") -def is_latest(representation): - """Return whether the representation is from latest version - - Args: - representation (dict): The representation document from the database. - - Returns: - bool: Whether the representation is of latest version. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import is_representation_from_latest - - return is_representation_from_latest(representation) - - -@deprecated("openpype.pipeline.load.any_outdated_containers") -def any_outdated(): - """Return whether the current scene has any outdated content. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.load import any_outdated_containers - - return any_outdated_containers() - - -@deprecated("openpype.pipeline.context_tools.get_current_project_asset") -def get_asset(asset_name=None): - """ Returning asset document from database by its name. - - Doesn't count with duplicities on asset names! - - Args: - asset_name (str) - - Returns: - (MongoDB document) - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import get_current_project_asset - - return get_current_project_asset(asset_name=asset_name) - - -@deprecated("openpype.pipeline.template_data.get_general_template_data") -def get_system_general_anatomy_data(system_settings=None): - """ - Deprecated: - Function will be removed after release version 3.15.* - """ - from openpype.pipeline.template_data import get_general_template_data - - return get_general_template_data(system_settings) - - @deprecated("openpype.client.get_linked_asset_ids") def get_linked_asset_ids(asset_doc): """Return linked asset ids for `asset_doc` from DB @@ -214,66 +144,6 @@ def get_linked_asset_ids(asset_doc): return get_linked_asset_ids(project_name, asset_doc=asset_doc) -@deprecated("openpype.client.get_linked_assets") -def get_linked_assets(asset_doc): - """Return linked assets for `asset_doc` from DB - - Args: - asset_doc (dict): Asset document from DB - - Returns: - (list) Asset documents of input links for passed asset doc. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline import legacy_io - from openpype.client import get_linked_assets - - project_name = legacy_io.active_project() - - return get_linked_assets(project_name, asset_doc=asset_doc) - - -@deprecated("openpype.client.get_last_version_by_subset_name") -def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): - """Retrieve latest version from `asset_name`, and `subset_name`. - - Do not use if you want to query more than 5 latest versions as this method - query 3 times to mongo for each call. For those cases is better to use - more efficient way, e.g. with help of aggregations. - - Args: - asset_name (str): Name of asset. - subset_name (str): Name of subset. - dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session. - project_name (str, optional): Find latest version in specific project. - - Returns: - None: If asset, subset or version were not found. - dict: Last version document for entered. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - if not project_name: - if not dbcon: - from openpype.pipeline import legacy_io - - log.debug("Using `legacy_io` for query.") - dbcon = legacy_io - # Make sure is installed - dbcon.install() - - project_name = dbcon.active_project() - - return get_last_version_by_subset_name( - project_name, subset_name, asset_name=asset_name - ) - - @deprecated( "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( @@ -361,142 +231,6 @@ def get_workfile_template_key( ) -@deprecated("openpype.pipeline.template_data.get_template_data") -def get_workdir_data(project_doc, asset_doc, task_name, host_name): - """Prepare data for workdir template filling from entered information. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. - - Returns: - dict: Data prepared for filling workdir template. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.template_data import get_template_data - - return get_template_data( - project_doc, asset_doc, task_name, host_name - ) - - -@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") -def get_workdir_with_workdir_data( - workdir_data, anatomy=None, project_name=None, template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - It is possible to pass only project's name instead of project's anatomy but - one of them **must** be entered. It is preferred to enter anatomy if is - available as initialization of a new Anatomy object may be time consuming. - - Args: - workdir_data (dict): Data to fill workdir template. - anatomy (Anatomy): Anatomy object for specific project. Optional if - `project_name` is entered. - project_name (str): Project's name. Optional if `anatomy` is entered - otherwise Anatomy object is created with using the project name. - template_key (str): Key of work templates in anatomy templates. If not - passed `get_workfile_template_key_from_context` is used to get it. - dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key' - and 'project_name' are not passed. - - Returns: - TemplateResult: Workdir path. - - Raises: - ValueError: When both `anatomy` and `project_name` are set to None. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - if not anatomy and not project_name: - raise ValueError(( - "Missing required arguments one of `project_name` or `anatomy`" - " must be entered." - )) - - if not project_name: - project_name = anatomy.project_name - - from openpype.pipeline.workfile import get_workdir_with_workdir_data - - return get_workdir_with_workdir_data( - workdir_data, project_name, anatomy, template_key - ) - - -@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") -def get_workdir( - project_doc, - asset_doc, - task_name, - host_name, - anatomy=None, - template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. In `Session` - is stored under `AVALON_APP` key. - anatomy (Anatomy): Optional argument. Anatomy object is created using - project name from `project_doc`. It is preferred to pass this - argument as initialization of a new Anatomy object may be time - consuming. - template_key (str): Key of work templates in anatomy templates. Default - value is defined in `get_workdir_with_workdir_data`. - - Returns: - TemplateResult: Workdir path. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.workfile import get_workdir - # Output is TemplateResult object which contain useful data - return get_workdir( - project_doc, - asset_doc, - task_name, - host_name, - anatomy, - template_key - ) - - -@deprecated("openpype.pipeline.context_tools.get_template_data_from_session") -def template_data_from_session(session=None): - """ Return dictionary with template from session keys. - - Args: - session (dict, Optional): The Session to use. If not provided use the - currently active global Session. - - Returns: - dict: All available data from session. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import get_template_data_from_session - - return get_template_data_from_session(session) - - @deprecated("openpype.pipeline.context_tools.compute_session_changes") def compute_session_changes( session, task=None, asset=None, app=None, template_key=None @@ -588,133 +322,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return change_current_context(asset, task, template_key) -@deprecated("openpype.client.get_workfile_info") -def get_workfile_doc(asset_id, task_name, filename, dbcon=None): - """Return workfile document for entered context. - - Do not use this method to get more than one document. In that cases use - custom query as this will return documents from database one by one. - - Args: - asset_id (ObjectId): Mongo ID of an asset under which workfile belongs. - task_name (str): Name of task under which the workfile belongs. - filename (str): Name of a workfile. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `legacy_io` is used if not entered. - - Returns: - dict: Workfile document or None. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - project_name = dbcon.active_project() - return get_workfile_info(project_name, asset_id, task_name, filename) - - -@deprecated -def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): - """Creates or replace workfile document in mongo. - - Do not use this method to update data. This method will remove all - additional data from existing document. - - Args: - asset_doc (dict): Document of asset under which workfile belongs. - task_name (str): Name of task for which is workfile related to. - filename (str): Filename of workfile. - workdir (str): Path to directory where `filename` is located. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `legacy_io` is used if not entered. - """ - - from openpype.pipeline import Anatomy - from openpype.pipeline.template_data import get_template_data - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - # Filter of workfile document - doc_filter = { - "type": "workfile", - "parent": asset_doc["_id"], - "task_name": task_name, - "filename": filename - } - # Document data are copy of filter - doc_data = copy.deepcopy(doc_filter) - - # Prepare project for workdir data - project_name = dbcon.active_project() - project_doc = get_project(project_name) - workdir_data = get_template_data( - project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] - ) - # Prepare anatomy - anatomy = Anatomy(project_name) - # Get workdir path (result is anatomy.TemplateResult) - template_workdir = get_workdir_with_workdir_data( - workdir_data, anatomy - ) - template_workdir_path = str(template_workdir).replace("\\", "/") - - # Replace slashses in workdir path where workfile is located - mod_workdir = workdir.replace("\\", "/") - - # Replace workdir from templates with rootless workdir - rootles_workdir = mod_workdir.replace( - template_workdir_path, - template_workdir.rootless.replace("\\", "/") - ) - - doc_data["schema"] = "pype:workfile-1.0" - doc_data["files"] = ["/".join([rootles_workdir, filename])] - doc_data["data"] = {} - - dbcon.replace_one( - doc_filter, - doc_data, - upsert=True - ) - - -@deprecated -def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): - if not workfile_doc: - # TODO add log message - return - - if not data: - return - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - # Convert data to mongo modification keys/values - # - this is naive implementation which does not expect nested - # dictionaries - set_data = {} - for key, value in data.items(): - new_key = "data.{}".format(key) - set_data[new_key] = value - - # Update workfile document with data - dbcon.update_one( - {"_id": workfile_doc["_id"]}, - {"$set": set_data} - ) - - @deprecated("openpype.pipeline.workfile.BuildWorkfile") def BuildWorkfile(): """Build workfile class was moved to workfile pipeline. @@ -747,38 +354,6 @@ def get_creator_by_name(creator_name, case_sensitive=False): return get_legacy_creator_by_name(creator_name, case_sensitive) -@deprecated -def change_timer_to_current_context(): - """Called after context change to change timers. - - Deprecated: - This method is specific for TimersManager module so please use the - functionality from there. Function will be removed after release - version 3.15.* - """ - - from openpype.pipeline import legacy_io - - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - if not webserver_url: - log.warning("Couldn't find webserver url") - return - - rest_api_url = "{}/timers_manager/start_timer".format(webserver_url) - try: - import requests - except Exception: - log.warning("Couldn't start timer") - return - data = { - "project_name": legacy_io.Session["AVALON_PROJECT"], - "asset_name": legacy_io.Session["AVALON_ASSET"], - "task_name": legacy_io.Session["AVALON_TASK"] - } - - requests.post(rest_api_url, json=data) - - def _get_task_context_data_for_anatomy( project_doc, asset_doc, task_name, anatomy=None ): @@ -800,6 +375,8 @@ def _get_task_context_data_for_anatomy( dict: With Anatomy context data. """ + from openpype.pipeline.template_data import get_general_template_data + if anatomy is None: from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) @@ -840,7 +417,7 @@ def _get_task_context_data_for_anatomy( } } - system_general_data = get_system_general_anatomy_data() + system_general_data = get_general_template_data() data.update(system_general_data) return data diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 1e157dfbfd..10fd3940b8 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -8,7 +8,6 @@ import warnings import functools from openpype.client import get_asset_by_id -from openpype.settings import get_project_settings log = logging.getLogger(__name__) @@ -101,8 +100,6 @@ def get_subset_name_with_asset_doc( is not passed. dynamic_data (dict): Dynamic data specific for a creator which creates instance. - dbcon (AvalonMongoDB): Mongo connection to be able query asset document - if 'asset_doc' is not passed. """ from openpype.pipeline.create import get_subset_name @@ -202,122 +199,6 @@ def prepare_template_data(fill_pairs): return fill_data -@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins") -def filter_pyblish_plugins(plugins): - """Filter pyblish plugins by presets. - - This servers as plugin filter / modifier for pyblish. It will load plugin - definitions from presets and filter those needed to be excluded. - - Args: - plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` - `discover()` method. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.publish.lib import filter_pyblish_plugins - - filter_pyblish_plugins(plugins) - - -@deprecated -def set_plugin_attributes_from_settings( - plugins, superclass, host_name=None, project_name=None -): - """Change attribute values on Avalon plugins by project settings. - - This function should be used only in host context. Modify - behavior of plugins. - - Args: - plugins (list): Plugins discovered by origin avalon discover method. - superclass (object): Superclass of plugin type (e.g. Cretor, Loader). - host_name (str): Name of host for which plugins are loaded and from. - Value from environment `AVALON_APP` is used if not entered. - project_name (str): Name of project for which settings will be loaded. - Value from environment `AVALON_PROJECT` is used if not entered. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - # Function is not used anymore - from openpype.pipeline import LegacyCreator, LoaderPlugin - - # determine host application to use for finding presets - if host_name is None: - host_name = os.environ.get("AVALON_APP") - - if project_name is None: - project_name = os.environ.get("AVALON_PROJECT") - - # map plugin superclass to preset json. Currently supported is load and - # create (LoaderPlugin and LegacyCreator) - plugin_type = None - if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin): - plugin_type = "load" - elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator): - plugin_type = "create" - - if not host_name or not project_name or plugin_type is None: - msg = "Skipped attributes override from settings." - if not host_name: - msg += " Host name is not defined." - - if not project_name: - msg += " Project name is not defined." - - if plugin_type is None: - msg += " Plugin type is unsupported for class {}.".format( - superclass.__name__ - ) - - print(msg) - return - - print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type)) - - project_settings = get_project_settings(project_name) - plugin_type_settings = ( - project_settings - .get(host_name, {}) - .get(plugin_type, {}) - ) - global_type_settings = ( - project_settings - .get("global", {}) - .get(plugin_type, {}) - ) - if not global_type_settings and not plugin_type_settings: - return - - for plugin in plugins: - plugin_name = plugin.__name__ - - plugin_settings = None - # Look for plugin settings in host specific settings - if plugin_name in plugin_type_settings: - plugin_settings = plugin_type_settings[plugin_name] - - # Look for plugin settings in global settings - elif plugin_name in global_type_settings: - plugin_settings = global_type_settings[plugin_name] - - if not plugin_settings: - continue - - print(">>> We have preset for {}".format(plugin_name)) - for option, value in plugin_settings.items(): - if option == "enabled" and value is False: - setattr(plugin, "active", False) - print(" - is disabled by preset") - else: - setattr(plugin, option, value) - print(" - setting `{}`: `{}`".format(option, value)) - - def source_hash(filepath, *args): """Generate simple identifier for a source file. This is used to identify whether a source file has previously been diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index f26047bb9d..83dd5b49e2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -12,6 +12,7 @@ from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build @attr.s @@ -87,9 +88,13 @@ class AfterEffectsSubmitDeadline( "AVALON_APP_NAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS", - "OPENPYPE_VERSION", "IS_TEST" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 425883393f..84fca11d9d 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -14,6 +14,7 @@ from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build class _ZipFile(ZipFile): @@ -279,10 +280,14 @@ class HarmonySubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS", - "OPENPYPE_VERSION", + "OPENPYPE_LOG_NO_COLORS" "IS_TEST" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index 6a62f83cae..68aa653804 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -9,6 +9,7 @@ import pyblish.api from openpype.pipeline import legacy_io from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): @@ -133,9 +134,13 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): # Submit along the current Avalon tool setup that we launched # this application with so the Render Slave can build its own # similar environment using it, e.g. "houdini17.5;pluginx2.3" - "AVALON_TOOLS", - "OPENPYPE_VERSION" + "AVALON_TOOLS" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 2b17b644b8..73ab689c9a 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -10,6 +10,7 @@ import pyblish.api from openpype.pipeline import legacy_io from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): @@ -105,9 +106,13 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): # Submit along the current Avalon tool setup that we launched # this application with so the Render Slave can build its own # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" - "AVALON_TOOLS", - "OPENPYPE_VERSION" + "AVALON_TOOLS" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py new file mode 100644 index 0000000000..417a03de74 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py @@ -0,0 +1,218 @@ +import os +import getpass +import copy + +import attr +from openpype.pipeline import legacy_io +from openpype.settings import get_project_settings +from openpype.hosts.max.api.lib import ( + get_current_renderer, + get_multipass_setting +) +from openpype.hosts.max.api.lib_rendersettings import RenderSettings +from openpype_modules.deadline import abstract_submit_deadline +from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo + + +@attr.s +class MaxPluginInfo(object): + SceneFile = attr.ib(default=None) # Input + Version = attr.ib(default=None) # Mandatory for Deadline + SaveFile = attr.ib(default=True) + IgnoreInputs = attr.ib(default=True) + + +class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): + + label = "Submit Render to Deadline" + hosts = ["max"] + families = ["maxrender"] + targets = ["local"] + + use_published = True + priority = 50 + tile_priority = 50 + chunk_size = 1 + jobInfo = {} + pluginInfo = {} + group = None + deadline_pool = None + deadline_pool_secondary = None + framePerTask = 1 + + def get_job_info(self): + job_info = DeadlineJobInfo(Plugin="3dsmax") + + # todo: test whether this works for existing production cases + # where custom jobInfo was stored in the project settings + job_info.update(self.jobInfo) + + instance = self._instance + context = instance.context + + # Always use the original work file name for the Job name even when + # rendering is done from the published Work File. The original work + # file name is clearer because it can also have subversion strings, + # etc. which are stripped for the published file. + src_filepath = context.data["currentFile"] + src_filename = os.path.basename(src_filepath) + + job_info.Name = "%s - %s" % (src_filename, instance.name) + job_info.BatchName = src_filename + job_info.Plugin = instance.data["plugin"] + job_info.UserName = context.data.get("deadlineUser", getpass.getuser()) + + # Deadline requires integers in frame range + frames = "{start}-{end}".format( + start=int(instance.data["frameStart"]), + end=int(instance.data["frameEnd"]) + ) + job_info.Frames = frames + + job_info.Pool = instance.data.get("primaryPool") + job_info.SecondaryPool = instance.data.get("secondaryPool") + job_info.ChunkSize = instance.data.get("chunkSize", 1) + job_info.Comment = context.data.get("comment") + job_info.Priority = instance.data.get("priority", self.priority) + job_info.FramesPerTask = instance.data.get("framesPerTask", 1) + + if self.group: + job_info.Group = self.group + + # Add options from RenderGlobals + render_globals = instance.data.get("renderGlobals", {}) + job_info.update(render_globals) + + keys = [ + "FTRACK_API_KEY", + "FTRACK_API_USER", + "FTRACK_SERVER", + "OPENPYPE_SG_USER", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "AVALON_APP_NAME", + "OPENPYPE_DEV", + "OPENPYPE_VERSION", + "IS_TEST" + ] + # Add mongo url if it's enabled + if self._instance.context.data.get("deadlinePassMongoUrl"): + keys.append("OPENPYPE_MONGO") + + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **legacy_io.Session) + + for key in keys: + value = environment.get(key) + if not value: + continue + job_info.EnvironmentKeyValue[key] = value + + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" + job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" + + # Add list of expected files to job + # --------------------------------- + exp = instance.data.get("expectedFiles") + for filepath in exp: + job_info.OutputDirectory += os.path.dirname(filepath) + job_info.OutputFilename += os.path.basename(filepath) + + return job_info + + def get_plugin_info(self): + instance = self._instance + + plugin_info = MaxPluginInfo( + SceneFile=self.scene_path, + Version=instance.data["maxversion"], + SaveFile=True, + IgnoreInputs=True + ) + + plugin_payload = attr.asdict(plugin_info) + + # Patching with pluginInfo from settings + for key, value in self.pluginInfo.items(): + plugin_payload[key] = value + + return plugin_payload + + def process_submission(self): + + instance = self._instance + filepath = self.scene_path + + expected_files = instance.data["expectedFiles"] + if not expected_files: + raise RuntimeError("No Render Elements found!") + output_dir = os.path.dirname(expected_files[0]) + instance.data["outputDir"] = output_dir + instance.data["toBeRenderedOn"] = "deadline" + + filename = os.path.basename(filepath) + + payload_data = { + "filename": filename, + "dirname": output_dir + } + + self.log.debug("Submitting 3dsMax render..") + payload = self._use_published_name(payload_data) + job_info, plugin_info = payload + self.submit(self.assemble_payload(job_info, plugin_info)) + + def _use_published_name(self, data): + instance = self._instance + job_info = copy.deepcopy(self.job_info) + plugin_info = copy.deepcopy(self.plugin_info) + plugin_data = {} + project_setting = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + multipass = get_multipass_setting(project_setting) + if multipass: + plugin_data["DisableMultipass"] = 0 + else: + plugin_data["DisableMultipass"] = 1 + + expected_files = instance.data.get("expectedFiles") + if not expected_files: + raise RuntimeError("No render elements found") + old_output_dir = os.path.dirname(expected_files[0]) + output_beauty = RenderSettings().get_render_output(instance.name, + old_output_dir) + filepath = self.from_published_scene() + + def _clean_name(path): + return os.path.splitext(os.path.basename(path))[0] + + new_scene = _clean_name(filepath) + orig_scene = _clean_name(instance.context.data["currentFile"]) + + output_beauty = output_beauty.replace(orig_scene, new_scene) + output_beauty = output_beauty.replace("\\", "/") + plugin_data["RenderOutput"] = output_beauty + + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + if renderer in [ + "ART_Renderer", + "Redshift_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + render_elem_list = RenderSettings().get_render_element() + for i, element in enumerate(render_elem_list): + element = element.replace(orig_scene, new_scene) + plugin_data["RenderElementOutputFilename%d" % i] = element # noqa + + self.log.debug("plugin data:{}".format(plugin_data)) + plugin_info.update(plugin_data) + + return job_info, plugin_info diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index ed37ff1897..22b5c02296 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -38,6 +38,7 @@ from openpype.hosts.maya.api.lib import get_attr_in_layer from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build def _validate_deadline_bool_value(instance, attribute, value): @@ -165,10 +166,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_VERSION", + "OPENPYPE_DEV" "IS_TEST" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index bab6591c7f..25f859554f 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -7,6 +7,7 @@ from maya import cmds from openpype.pipeline import legacy_io, PublishXmlValidationError from openpype.settings import get_project_settings from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build import pyblish.api @@ -104,9 +105,13 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", - "FTRACK_SERVER", - "OPENPYPE_VERSION" + "FTRACK_SERVER" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index d1948d8d50..cca2a4d896 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -10,6 +10,7 @@ import pyblish.api import nuke from openpype.pipeline import legacy_io from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build class NukeSubmitDeadline(pyblish.api.InstancePlugin): @@ -265,9 +266,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "PYBLISHPLUGINPATH", "NUKE_PATH", "TOOL_ENV", - "FOUNDRY_LICENSE", - "OPENPYPE_VERSION" + "FOUNDRY_LICENSE" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 7e39a644a2..c7a559466c 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -20,6 +20,7 @@ from openpype.pipeline import ( ) from openpype.tests.lib import is_in_tests from openpype.pipeline.farm.patterning import match_aov_pattern +from openpype.lib import is_running_from_build def get_resources(project_name, version, extension=None): @@ -117,15 +118,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): deadline_plugin = "OpenPype" targets = ["local"] - hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"] + hosts = ["fusion", "max", "maya", "nuke", + "celaction", "aftereffects", "harmony"] families = ["render.farm", "prerender.farm", - "renderlayer", "imagesequence", "vrayscene"] + "renderlayer", "imagesequence", "maxrender", "vrayscene"] aov_filter = {"maya": [r".*([Bb]eauty).*"], "aftereffects": [r".*"], # for everything from AE "harmony": [r".*"], # for everything from AE - "celaction": [r".*"]} + "celaction": [r".*"], + "max": [r".*"]} environ_job_filter = [ "OPENPYPE_METADATA_FILE" @@ -136,10 +139,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_KEY", "FTRACK_SERVER", "AVALON_APP_NAME", - "OPENPYPE_USERNAME", - "OPENPYPE_VERSION" + "OPENPYPE_USERNAME" ] + # Add OpenPype version if we are running from build. + if is_running_from_build(): + environ_keys.append("OPENPYPE_VERSION") + # custom deadline attributes deadline_department = "" deadline_pool = "" @@ -292,8 +298,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Group": self.deadline_group, "Pool": instance.data.get("primaryPool"), "SecondaryPool": instance.data.get("secondaryPool"), - - "OutputDirectory0": output_dir + # ensure the outputdirectory with correct slashes + "OutputDirectory0": output_dir.replace("\\", "/") }, "PluginInfo": { "Version": self.plugin_pype_version, @@ -514,6 +520,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # toggle preview on if multipart is on if instance_data.get("multipartExr"): + self.log.debug("Adding preview tag because its multipartExr") preview = True self.log.debug("preview:{}".format(preview)) new_instance = deepcopy(instance_data) @@ -593,6 +600,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if instance["useSequenceForReview"]: # toggle preview on if multipart is on if instance.get("multipartExr", False): + self.log.debug( + "Adding preview tag because its multipartExr" + ) preview = True else: render_file_name = list(collection)[0] @@ -700,8 +710,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if preview: if "ftrack" not in families: if os.environ.get("FTRACK_SERVER"): + self.log.debug( + "Adding \"ftrack\" to families because of preview tag." + ) families.append("ftrack") if "review" not in families: + self.log.debug( + "Adding \"review\" to families because of preview tag." + ) families.append("review") instance["families"] = families @@ -960,6 +976,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ''' render_job = None + submission_type = "" if instance.data.get("toBeRenderedOn") == "deadline": render_job = data.pop("deadlineSubmissionJob", None) submission_type = "deadline" diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index b0560ce1e8..e4fc64269a 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -201,19 +201,21 @@ def get_openpype_versions(dir_list): print(">>> Getting OpenPype executable ...") openpype_versions = [] - install_dir = DirectoryUtils.SearchDirectoryList(dir_list) - if install_dir: - print("--- Looking for OpenPype at: {}".format(install_dir)) - sub_dirs = [ - f.path for f in os.scandir(install_dir) - if f.is_dir() - ] - for subdir in sub_dirs: - version = get_openpype_version_from_path(subdir) - if not version: - continue - print(" - found: {} - {}".format(version, subdir)) - openpype_versions.append((version, subdir)) + # special case of multiple install dirs + for dir_list in dir_list.split(","): + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print("--- Looking for OpenPype at: {}".format(install_dir)) + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(" - found: {} - {}".format(version, subdir)) + openpype_versions.append((version, subdir)) return openpype_versions diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index ab4a3d5e9b..6e1b973fb9 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -107,20 +107,23 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): "Scanning for compatible requested " f"version {requested_version}")) dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") + # clean '\ ' for MacOS pasting if platform.system().lower() == "darwin": dir_list = dir_list.replace("\\ ", " ") - install_dir = DirectoryUtils.SearchDirectoryList(dir_list) - if install_dir: - sub_dirs = [ - f.path for f in os.scandir(install_dir) - if f.is_dir() - ] - for subdir in sub_dirs: - version = self.get_openpype_version_from_path(subdir) - if not version: - continue - openpype_versions.append((version, subdir)) + + for dir_list in dir_list.split(","): + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = self.get_openpype_version_from_path(subdir) + if not version: + continue + openpype_versions.append((version, subdir)) exe_list = self.GetConfigEntry("OpenPypeExecutable") # clean '\ ' for MacOS pasting diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index 2d06e2ab02..75f43cb22f 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,6 +3,7 @@ import json import copy import pyblish.api +from openpype.pipeline.publish import get_publish_repre_path from openpype.lib.openpype_version import get_openpype_version from openpype.lib.transcoding import ( get_ffprobe_streams, @@ -55,6 +56,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "reference": "reference" } keep_first_subset_name_for_review = True + upload_reviewable_with_origin_name = False asset_versions_status_profiles = [] additional_metadata_keys = [] @@ -153,7 +155,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): if not review_representations or has_movie_review: for repre in thumbnail_representations: - repre_path = self._get_repre_path(instance, repre, False) + repre_path = get_publish_repre_path(instance, repre, False) if not repre_path: self.log.warning( "Published path is not set and source was removed." @@ -210,7 +212,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "from {}".format(repre)) continue - repre_path = self._get_repre_path(instance, repre, False) + repre_path = get_publish_repre_path(instance, repre, False) if not repre_path: self.log.warning( "Published path is not set and source was removed." @@ -293,6 +295,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ) # Add item to component list component_list.append(review_item) + if self.upload_reviewable_with_origin_name: + origin_name_component = copy.deepcopy(review_item) + filename = os.path.basename(repre_path) + origin_name_component["component_data"]["name"] = ( + os.path.splitext(filename)[0] + ) + component_list.append(origin_name_component) # Duplicate thumbnail component for all not first reviews if first_thumbnail_component is not None: @@ -324,7 +333,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add others representations as component for repre in other_representations: - published_path = self._get_repre_path(instance, repre, True) + published_path = get_publish_repre_path(instance, repre, True) if not published_path: continue # Create copy of base comp item and append it @@ -364,51 +373,6 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): def _collect_additional_metadata(self, streams): pass - def _get_repre_path(self, instance, repre, only_published): - """Get representation path that can be used for integration. - - When 'only_published' is set to true the validation of path is not - relevant. In that case we just need what is set in 'published_path' - as "reference". The reference is not used to get or upload the file but - for reference where the file was published. - - Args: - instance (pyblish.Instance): Processed instance object. Used - for source of staging dir if representation does not have - filled it. - repre (dict): Representation on instance which could be and - could not be integrated with main integrator. - only_published (bool): Care only about published paths and - ignore if filepath is not existing anymore. - - Returns: - str: Path to representation file. - None: Path is not filled or does not exists. - """ - - published_path = repre.get("published_path") - if published_path: - published_path = os.path.normpath(published_path) - if os.path.exists(published_path): - return published_path - - if only_published: - return published_path - - comp_files = repre["files"] - if isinstance(comp_files, (tuple, list, set)): - filename = comp_files[0] - else: - filename = comp_files - - staging_dir = repre.get("stagingDir") - if not staging_dir: - staging_dir = instance.data["stagingDir"] - src_path = os.path.normpath(os.path.join(staging_dir, filename)) - if os.path.exists(src_path): - return src_path - return None - def _get_asset_version_status_name(self, instance): if not self.asset_versions_status_profiles: return None diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py index cfd2d10fd9..fc15d5515f 100644 --- a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py @@ -1,6 +1,8 @@ import os import pyblish.api +from openpype.pipeline.publish import get_publish_repre_path + class IntegrateShotgridPublish(pyblish.api.InstancePlugin): """ @@ -22,7 +24,9 @@ class IntegrateShotgridPublish(pyblish.api.InstancePlugin): for representation in instance.data.get("representations", []): - local_path = representation.get("published_path") + local_path = get_publish_repre_path( + instance, representation, False + ) code = os.path.basename(local_path) if representation.get("tags", []): diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py index a1b7140e22..adfdca718c 100644 --- a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py @@ -1,6 +1,7 @@ -import os import pyblish.api +from openpype.pipeline.publish import get_publish_repre_path + class IntegrateShotgridVersion(pyblish.api.InstancePlugin): """Integrate Shotgrid Version""" @@ -41,8 +42,9 @@ class IntegrateShotgridVersion(pyblish.api.InstancePlugin): data_to_update["sg_status_list"] = status for representation in instance.data.get("representations", []): - local_path = representation.get("published_path") - code = os.path.basename(local_path) + local_path = get_publish_repre_path( + instance, representation, False + ) if "shotgridreview" in representation.get("tags", []): diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 612031efac..4e2557ccc7 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -8,6 +8,7 @@ from abc import ABCMeta, abstractmethod import time from openpype.client import OpenPypeMongoConnection +from openpype.pipeline.publish import get_publish_repre_path from openpype.lib.plugin_tools import prepare_template_data @@ -167,9 +168,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): thumbnail_path = None for repre in instance.data.get("representations", []): if repre.get('thumbnail') or "thumbnail" in repre.get('tags', []): - repre_thumbnail_path = ( - repre.get("published_path") or - os.path.join(repre["stagingDir"], repre["files"]) + repre_thumbnail_path = get_publish_repre_path( + instance, repre, False ) if os.path.exists(repre_thumbnail_path): thumbnail_path = repre_thumbnail_path @@ -184,9 +184,8 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): if (repre.get("review") or "review" in tags or "burnin" in tags): - repre_review_path = ( - repre.get("published_path") or - os.path.join(repre["stagingDir"], repre["files"]) + repre_review_path = get_publish_repre_path( + instance, repre, False ) if os.path.exists(repre_review_path): review_path = repre_review_path diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index bce304ab55..7672c49eb3 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -8,7 +8,10 @@ import inspect from uuid import uuid4 from contextlib import contextmanager -from openpype.client import get_assets +import pyblish.logic +import pyblish.api + +from openpype.client import get_assets, get_asset_by_name from openpype.settings import ( get_system_settings, get_project_settings @@ -17,13 +20,11 @@ from openpype.lib.attribute_definitions import ( UnknownDef, serialize_attr_defs, deserialize_attr_defs, + get_default_values, ) from openpype.host import IPublishHost from openpype.pipeline import legacy_io -from openpype.pipeline.mongodb import ( - AvalonMongoDB, - session_data_from_environment, -) +from openpype.pipeline.plugin_discover import DiscoverResult from .creator_plugins import ( Creator, @@ -1338,8 +1339,6 @@ class CreateContext: Args: host(ModuleType): Host implementation which handles implementation and global metadata. - dbcon(AvalonMongoDB): Connection to mongo with context (at least - project). headless(bool): Context is created out of UI (Current not used). reset(bool): Reset context on initialization. discover_publish_plugins(bool): Discover publish plugins during reset @@ -1347,16 +1346,8 @@ class CreateContext: """ def __init__( - self, host, dbcon=None, headless=False, reset=True, - discover_publish_plugins=True + self, host, headless=False, reset=True, discover_publish_plugins=True ): - # Create conncetion if is not passed - if dbcon is None: - session = session_data_from_environment(True) - dbcon = AvalonMongoDB(session) - dbcon.install() - - self.dbcon = dbcon self.host = host # Prepare attribute for logger (Created on demand in `log` property) @@ -1380,6 +1371,10 @@ class CreateContext: " Missing methods: {}" ).format(joined_methods)) + self._current_project_name = None + self._current_asset_name = None + self._current_task_name = None + self._host_is_valid = host_is_valid # Currently unused variable self.headless = headless @@ -1387,6 +1382,8 @@ class CreateContext: # Instances by their ID self._instances_by_id = {} + self.creator_discover_result = None + self.convertor_discover_result = None # Discovered creators self.creators = {} # Prepare categories of creators @@ -1499,11 +1496,20 @@ class CreateContext: @property def host_name(self): + if hasattr(self.host, "name"): + return self.host.name return os.environ["AVALON_APP"] - @property - def project_name(self): - return self.dbcon.active_project() + def get_current_project_name(self): + return self._current_project_name + + def get_current_asset_name(self): + return self._current_asset_name + + def get_current_task_name(self): + return self._current_task_name + + project_name = property(get_current_project_name) @property def log(self): @@ -1520,7 +1526,7 @@ class CreateContext: self.reset_preparation() - self.reset_avalon_context() + self.reset_current_context() self.reset_plugins(discover_publish_plugins) self.reset_context_data() @@ -1567,14 +1573,22 @@ class CreateContext: self._collection_shared_data = None self.refresh_thumbnails() - def reset_avalon_context(self): - """Give ability to reset avalon context. + def reset_current_context(self): + """Refresh current context. Reset is based on optional host implementation of `get_current_context` function or using `legacy_io.Session`. Some hosts have ability to change context file without using workfiles - tool but that change is not propagated to + tool but that change is not propagated to 'legacy_io.Session' + nor 'os.environ'. + + Todos: + UI: Current context should be also checked on save - compare + initial values vs. current values. + Related to UI checks: Current workfile can be also considered + as current context information as that's where the metadata + are stored. We should store the workfile (if is available) too. """ project_name = asset_name = task_name = None @@ -1592,12 +1606,9 @@ class CreateContext: if not task_name: task_name = legacy_io.Session.get("AVALON_TASK") - if project_name: - self.dbcon.Session["AVALON_PROJECT"] = project_name - if asset_name: - self.dbcon.Session["AVALON_ASSET"] = asset_name - if task_name: - self.dbcon.Session["AVALON_TASK"] = task_name + self._current_project_name = project_name + self._current_asset_name = asset_name + self._current_task_name = task_name def reset_plugins(self, discover_publish_plugins=True): """Reload plugins. @@ -1611,18 +1622,15 @@ class CreateContext: self._reset_convertor_plugins() def _reset_publish_plugins(self, discover_publish_plugins): - import pyblish.logic - from openpype.pipeline import OpenPypePyblishPluginMixin from openpype.pipeline.publish import ( - publish_plugins_discover, - DiscoverResult + publish_plugins_discover ) # Reset publish plugins self._attr_plugins_by_family = {} - discover_result = DiscoverResult() + discover_result = DiscoverResult(pyblish.api.Plugin) plugins_with_defs = [] plugins_by_targets = [] plugins_mismatch_targets = [] @@ -1661,7 +1669,9 @@ class CreateContext: creators = {} autocreators = {} manual_creators = {} - for creator_class in discover_creator_plugins(): + report = discover_creator_plugins(return_report=True) + self.creator_discover_result = report + for creator_class in report.plugins: if inspect.isabstract(creator_class): self.log.info( "Skipping abstract Creator {}".format(str(creator_class)) @@ -1706,7 +1716,9 @@ class CreateContext: def _reset_convertor_plugins(self): convertors_plugins = {} - for convertor_class in discover_convertor_plugins(): + report = discover_convertor_plugins(return_report=True) + self.convertor_discover_result = report + for convertor_class in report.plugins: if inspect.isabstract(convertor_class): self.log.info( "Skipping abstract Creator {}".format(str(convertor_class)) @@ -1792,40 +1804,128 @@ class CreateContext: with self.bulk_instances_collection(): self._bulk_instances_to_process.append(instance) - def create(self, identifier, *args, **kwargs): - """Wrapper for creators to trigger created. + def _get_creator_in_create(self, identifier): + """Creator by identifier with unified error. - Different types of creators may expect different arguments thus the - hints for args are blind. + Helper method to get creator by identifier with same error when creator + is not available. Args: - identifier (str): Creator's identifier. - *args (Tuple[Any]): Arguments for create method. - **kwargs (Dict[Any, Any]): Keyword argument for create method. + identifier (str): Identifier of creator plugin. + + Returns: + BaseCreator: Creator found by identifier. + + Raises: + CreatorError: When identifier is not known. """ - error_message = "Failed to run Creator with identifier \"{}\". {}" creator = self.creators.get(identifier) - label = getattr(creator, "label", None) - failed = False - add_traceback = False - exc_info = None - try: - # Fake CreatorError (Could be maybe specific exception?) - if creator is None: + # Fake CreatorError (Could be maybe specific exception?) + if creator is None: + raise CreatorError( + "Creator {} was not found".format(identifier) + ) + return creator + + def create( + self, + creator_identifier, + variant, + asset_doc=None, + task_name=None, + pre_create_data=None + ): + """Trigger create of plugins with standartized arguments. + + Arguments 'asset_doc' and 'task_name' use current context as default + values. If only 'task_name' is provided it will be overriden by + task name from current context. If 'task_name' is not provided + when 'asset_doc' is, it is considered that task name is not specified, + which can lead to error if subset name template requires task name. + + Args: + creator_identifier (str): Identifier of creator plugin. + variant (str): Variant used for subset name. + asset_doc (Dict[str, Any]): Asset document which define context of + creation (possible context of created instance/s). + task_name (str): Name of task to which is context related. + pre_create_data (Dict[str, Any]): Pre-create attribute values. + + Returns: + Any: Output of triggered creator's 'create' method. + + Raises: + CreatorError: If creator was not found or asset is empty. + """ + + creator = self._get_creator_in_create(creator_identifier) + + project_name = self.project_name + if asset_doc is None: + asset_name = self.get_current_asset_name() + asset_doc = get_asset_by_name(project_name, asset_name) + task_name = self.get_current_task_name() + if asset_doc is None: raise CreatorError( - "Creator {} was not found".format(identifier) + "Asset with name {} was not found".format(asset_name) ) - creator.create(*args, **kwargs) + if pre_create_data is None: + pre_create_data = {} + + precreate_attr_defs = creator.get_pre_create_attr_defs() or [] + # Create default values of precreate data + _pre_create_data = get_default_values(precreate_attr_defs) + # Update passed precreate data to default values + # TODO validate types + _pre_create_data.update(pre_create_data) + + subset_name = creator.get_subset_name( + variant, + task_name, + asset_doc, + project_name, + self.host_name + ) + instance_data = { + "asset": asset_doc["name"], + "task": task_name, + "family": creator.family, + "variant": variant + } + return creator.create( + subset_name, + instance_data, + _pre_create_data + ) + + def _create_with_unified_error( + self, identifier, creator, *args, **kwargs + ): + error_message = "Failed to run Creator with identifier \"{}\". {}" + + label = None + add_traceback = False + result = None + fail_info = None + success = False + + try: + # Try to get creator and his label + if creator is None: + creator = self._get_creator_in_create(identifier) + label = getattr(creator, "label", label) + + # Run create + result = creator.create(*args, **kwargs) + success = True except CreatorError: - failed = True exc_info = sys.exc_info() self.log.warning(error_message.format(identifier, exc_info[1])) except: - failed = True add_traceback = True exc_info = sys.exc_info() self.log.warning( @@ -1833,12 +1933,35 @@ class CreateContext: exc_info=True ) - if failed: - raise CreatorsCreateFailed([ - prepare_failed_creator_operation_info( - identifier, label, exc_info, add_traceback - ) - ]) + if not success: + fail_info = prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + return result, fail_info + + def create_with_unified_error(self, identifier, *args, **kwargs): + """Trigger create but raise only one error if anything fails. + + Added to raise unified exception. Capture any possible issues and + reraise it with unified information. + + Args: + identifier (str): Identifier of creator. + *args (Tuple[Any]): Arguments for create method. + **kwargs (Dict[Any, Any]): Keyword argument for create method. + + Raises: + CreatorsCreateFailed: When creation fails due to any possible + reason. If anything goes wrong this is only possible exception + the method should raise. + """ + + result, fail_info = self._create_with_unified_error( + identifier, None, *args, **kwargs + ) + if fail_info is not None: + raise CreatorsCreateFailed([fail_info]) + return result def _remove_instance(self, instance): self._instances_by_id.pop(instance.id, None) @@ -1968,38 +2091,12 @@ class CreateContext: Reset instances if any autocreator executed properly. """ - error_message = "Failed to run AutoCreator with identifier \"{}\". {}" failed_info = [] for creator in self.sorted_autocreators: identifier = creator.identifier - label = creator.label - failed = False - add_traceback = False - try: - creator.create() - - except CreatorError: - failed = True - exc_info = sys.exc_info() - self.log.warning(error_message.format(identifier, exc_info[1])) - - # Use bare except because some hosts raise their exceptions that - # do not inherit from python's `BaseException` - except: - failed = True - add_traceback = True - exc_info = sys.exc_info() - self.log.warning( - error_message.format(identifier, ""), - exc_info=True - ) - - if failed: - failed_info.append( - prepare_failed_creator_operation_info( - identifier, label, exc_info, add_traceback - ) - ) + _, fail_info = self._create_with_unified_error(identifier, creator) + if fail_info is not None: + failed_info.append(fail_info) if failed_info: raise CreatorsCreateFailed(failed_info) diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 53acb618ed..bd3fbaf78f 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -79,6 +79,10 @@ class SubsetConvertorPlugin(object): self._log = Logger.get_logger(self.__class__.__name__) return self._log + @property + def host(self): + return self._create_context.host + @abstractproperty def identifier(self): """Converted identifier. @@ -153,6 +157,12 @@ class BaseCreator: Single object should be used for multiple instances instead of single instance per one creator object. Do not store temp data or mid-process data to `self` if it's not Plugin specific. + + Args: + project_settings (Dict[str, Any]): Project settings. + system_settings (Dict[str, Any]): System settings. + create_context (CreateContext): Context which initialized creator. + headless (bool): Running in headless mode. """ # Label shown in UI @@ -605,12 +615,12 @@ class AutoCreator(BaseCreator): pass -def discover_creator_plugins(): - return discover(BaseCreator) +def discover_creator_plugins(*args, **kwargs): + return discover(BaseCreator, *args, **kwargs) -def discover_convertor_plugins(): - return discover(SubsetConvertorPlugin) +def discover_convertor_plugins(*args, **kwargs): + return discover(SubsetConvertorPlugin, *args, **kwargs) def discover_legacy_creator_plugins(): diff --git a/openpype/pipeline/create/subset_name.py b/openpype/pipeline/create/subset_name.py index ed05dd6083..3f0692b46a 100644 --- a/openpype/pipeline/create/subset_name.py +++ b/openpype/pipeline/create/subset_name.py @@ -70,7 +70,8 @@ def get_subset_name( host_name=None, default_template=None, dynamic_data=None, - project_settings=None + project_settings=None, + family_filter=None, ): """Calculate subset name based on passed context and OpenPype settings. @@ -82,23 +83,35 @@ def get_subset_name( That's main reason why so many arguments are required to calculate subset name. + Option to pass family filter was added for special cases when creator or + automated publishing require special subset name template which would be + hard to maintain using its family value. + Why not just pass the right family? -> Family is also used as fill + value and for filtering of publish plugins. + + Todos: + Find better filtering options to avoid requirement of + argument 'family_filter'. + Args: family (str): Instance family. variant (str): In most of the cases it is user input during creation. task_name (str): Task name on which context is instance created. asset_doc (dict): Queried asset document with its tasks in data. Used to get task type. - project_name (str): Name of project on which is instance created. - Important for project settings that are loaded. - host_name (str): One of filtering criteria for template profile - filters. - default_template (str): Default template if any profile does not match - passed context. Constant 'DEFAULT_SUBSET_TEMPLATE' is used if - is not passed. - dynamic_data (dict): Dynamic data specific for a creator which creates - instance. - project_settings (Union[Dict[str, Any], None]): Prepared settings for - project. Settings are queried if not passed. + project_name (Optional[str]): Name of project on which is instance + created. Important for project settings that are loaded. + host_name (Optional[str]): One of filtering criteria for template + profile filters. + default_template (Optional[str]): Default template if any profile does + not match passed context. Constant 'DEFAULT_SUBSET_TEMPLATE' + is used if is not passed. + dynamic_data (Optional[Dict[str, Any]]): Dynamic data specific for + a creator which creates instance. + project_settings (Optional[Union[Dict[str, Any]]]): Prepared settings + for project. Settings are queried if not passed. + family_filter (Optional[str]): Use different family for subset template + filtering. Value of 'family' is used when not passed. """ if not family: @@ -119,7 +132,7 @@ def get_subset_name( template = get_subset_name_template( project_name, - family, + family_filter or family, task_name, task_type, host_name, diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index e30923f922..fefdb8537b 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -28,7 +28,6 @@ from openpype.lib import ( TemplateUnsolved, ) from openpype.pipeline import ( - schema, legacy_io, Anatomy, ) @@ -643,7 +642,10 @@ def get_representation_path(representation, root=None, dbcon=None): def path_from_config(): try: - version_, subset, asset, project = dbcon.parenthood(representation) + project_name = dbcon.active_project() + version_, subset, asset, project = get_representation_parents( + project_name, representation + ) except ValueError: log.debug( "Representation %s wasn't found in database, " diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py index 7edd9ac290..e5257b801a 100644 --- a/openpype/pipeline/plugin_discover.py +++ b/openpype/pipeline/plugin_discover.py @@ -135,11 +135,12 @@ class PluginDiscoverContext(object): allow_duplicates (bool): Validate class name duplications. ignore_classes (list): List of classes that will be ignored and not added to result. + return_report (bool): Output will be full report if set to 'True'. Returns: - DiscoverResult: Object holding succesfully discovered plugins, - ignored plugins, plugins with missing abstract implementation - and duplicated plugin. + Union[DiscoverResult, list[Any]]: Object holding successfully + discovered plugins, ignored plugins, plugins with missing + abstract implementation and duplicated plugin. """ if not ignore_classes: @@ -268,9 +269,34 @@ class _GlobalDiscover: return cls._context -def discover(superclass, allow_duplicates=True): +def discover( + superclass, + allow_duplicates=True, + ignore_classes=None, + return_report=False +): + """Find and return subclasses of `superclass` + + Args: + superclass (type): Class which determines discovered subclasses. + allow_duplicates (bool): Validate class name duplications. + ignore_classes (list): List of classes that will be ignored + and not added to result. + return_report (bool): Output will be full report if set to 'True'. + + Returns: + Union[DiscoverResult, list[Any]]: Object holding successfully + discovered plugins, ignored plugins, plugins with missing + abstract implementation and duplicated plugin. + """ + context = _GlobalDiscover.get_context() - return context.discover(superclass, allow_duplicates) + return context.discover( + superclass, + allow_duplicates, + ignore_classes, + return_report + ) def get_last_discovered_plugins(superclass): diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py index dc6fc0f97a..05ba1c9c33 100644 --- a/openpype/pipeline/publish/__init__.py +++ b/openpype/pipeline/publish/__init__.py @@ -25,7 +25,6 @@ from .publish_plugins import ( from .lib import ( get_publish_template_name, - DiscoverResult, publish_plugins_discover, load_help_content_from_plugin, load_help_content_from_filepath, @@ -36,6 +35,7 @@ from .lib import ( filter_instances_for_context_plugin, context_plugin_should_run, get_instance_staging_dir, + get_publish_repre_path, ) from .abstract_expected_files import ExpectedFiles @@ -68,7 +68,6 @@ __all__ = ( "get_publish_template_name", - "DiscoverResult", "publish_plugins_discover", "load_help_content_from_plugin", "load_help_content_from_filepath", @@ -79,6 +78,7 @@ __all__ = ( "filter_instances_for_context_plugin", "context_plugin_should_run", "get_instance_staging_dir", + "get_publish_repre_path", "ExpectedFiles", diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index c76671fa39..bbc511fc5a 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -10,11 +10,18 @@ import six import pyblish.plugin import pyblish.api -from openpype.lib import Logger, filter_profiles +from openpype.lib import ( + Logger, + filter_profiles +) from openpype.settings import ( get_project_settings, get_system_settings, ) +from openpype.pipeline import ( + tempdir +) +from openpype.pipeline.plugin_discover import DiscoverResult from .contants import ( DEFAULT_PUBLISH_TEMPLATE, @@ -196,28 +203,6 @@ def get_publish_template_name( return template or default_template -class DiscoverResult: - """Hold result of publish plugins discovery. - - Stores discovered plugins duplicated plugins and file paths which - crashed on execution of file. - """ - def __init__(self): - self.plugins = [] - self.crashed_file_paths = {} - self.duplicated_plugins = [] - - def __iter__(self): - for plugin in self.plugins: - yield plugin - - def __getitem__(self, item): - return self.plugins[item] - - def __setitem__(self, item, value): - self.plugins[item] = value - - class HelpContent: def __init__(self, title, description, detail=None): self.title = title @@ -285,7 +270,7 @@ def publish_plugins_discover(paths=None): """ # The only difference with `pyblish.api.discover` - result = DiscoverResult() + result = DiscoverResult(pyblish.api.Plugin) plugins = dict() plugin_names = [] @@ -595,7 +580,7 @@ def context_plugin_should_run(plugin, context): Args: plugin (pyblish.api.Plugin): Plugin with filters. - context (pyblish.api.Context): Pyblish context with insances. + context (pyblish.api.Context): Pyblish context with instances. Returns: bool: Context plugin should run based on valid instances. @@ -609,12 +594,21 @@ def context_plugin_should_run(plugin, context): def get_instance_staging_dir(instance): """Unified way how staging dir is stored and created on instances. - First check if 'stagingDir' is already set in instance data. If there is - not create new in tempdir. + First check if 'stagingDir' is already set in instance data. + In case there already is new tempdir will not be created. + + It also supports `OPENPYPE_TMPDIR`, so studio can define own temp + shared repository per project or even per more granular context. + Template formatting is supported also with optional keys. Folder is + created in case it doesn't exists. + + Available anatomy formatting keys: + - root[work | ] + - project[name | code] Note: - Staging dir does not have to be necessarily in tempdir so be carefull - about it's usage. + Staging dir does not have to be necessarily in tempdir so be careful + about its usage. Args: instance (pyblish.lib.Instance): Instance for which we want to get @@ -623,12 +617,73 @@ def get_instance_staging_dir(instance): Returns: str: Path to staging dir of instance. """ + staging_dir = instance.data.get('stagingDir') + if staging_dir: + return staging_dir - staging_dir = instance.data.get("stagingDir") - if not staging_dir: + anatomy = instance.context.data.get("anatomy") + + # get customized tempdir path from `OPENPYPE_TMPDIR` env var + custom_temp_dir = tempdir.create_custom_tempdir( + anatomy.project_name, anatomy) + + if custom_temp_dir: + staging_dir = os.path.normpath( + tempfile.mkdtemp( + prefix="pyblish_tmp_", + dir=custom_temp_dir + ) + ) + else: staging_dir = os.path.normpath( tempfile.mkdtemp(prefix="pyblish_tmp_") ) - instance.data["stagingDir"] = staging_dir + instance.data['stagingDir'] = staging_dir return staging_dir + + +def get_publish_repre_path(instance, repre, only_published=False): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = get_instance_staging_dir(instance) + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None diff --git a/openpype/pipeline/tempdir.py b/openpype/pipeline/tempdir.py new file mode 100644 index 0000000000..55a1346b08 --- /dev/null +++ b/openpype/pipeline/tempdir.py @@ -0,0 +1,59 @@ +""" +Temporary folder operations +""" + +import os +from openpype.lib import StringTemplate +from openpype.pipeline import Anatomy + + +def create_custom_tempdir(project_name, anatomy=None): + """ Create custom tempdir + + Template path formatting is supporting: + - optional key formatting + - available keys: + - root[work | ] + - project[name | code] + + Args: + project_name (str): project name + anatomy (openpype.pipeline.Anatomy)[optional]: Anatomy object + + Returns: + str | None: formatted path or None + """ + openpype_tempdir = os.getenv("OPENPYPE_TMPDIR") + if not openpype_tempdir: + return + + custom_tempdir = None + if "{" in openpype_tempdir: + if anatomy is None: + anatomy = Anatomy(project_name) + # create base formate data + data = { + "root": anatomy.roots, + "project": { + "name": anatomy.project_name, + "code": anatomy.project_code, + } + } + # path is anatomy template + custom_tempdir = StringTemplate.format_template( + openpype_tempdir, data).normalized() + + else: + # path is absolute + custom_tempdir = openpype_tempdir + + # create the dir path if it doesn't exists + if not os.path.exists(custom_tempdir): + try: + # create it if it doesn't exists + os.makedirs(custom_tempdir) + except IOError as error: + raise IOError( + "Path couldn't be created: {}".format(error)) + + return custom_tempdir diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index d3398c885e..5fcf8feb56 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -32,7 +32,7 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): thumbnail_paths_by_instance_id.get(None) ) - project_name = create_context.project_name + project_name = create_context.get_current_project_name() if project_name: context.data["projectName"] = project_name @@ -53,11 +53,15 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): context.data.update(create_context.context_data_to_store()) context.data["newPublishing"] = True # Update context data - for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"): - value = create_context.dbcon.Session.get(key) - if value is not None: - legacy_io.Session[key] = value - os.environ[key] = value + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + for key, value in ( + ("AVALON_PROJECT", project_name), + ("AVALON_ASSET", asset_name), + ("AVALON_TASK", task_name) + ): + legacy_io.Session[key] = value + os.environ[key] = value def create_instance( self, diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index 32230e0625..02c0e35377 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -55,7 +55,7 @@ }, "source": { "folder": "{root[work]}/{originalDirname}", - "file": "{originalBasename}<.{@frame}><_{udim}>.{ext}", + "file": "{originalBasename}.{ext}", "path": "{@folder}/{@file}" }, "__dynamic_keys_labels__": { @@ -66,4 +66,4 @@ "source": "source" } } -} \ No newline at end of file +} diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 0a4318a659..7183603c4b 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -36,6 +36,18 @@ "scene_patches": [], "strict_error_checking": true }, + "MaxSubmitDeadline": { + "enabled": true, + "optional": false, + "active": true, + "use_published": true, + "priority": 50, + "chunk_size": 10, + "group": "none", + "deadline_pool": "", + "deadline_pool_secondary": "", + "framePerTask": 1 + }, "NukeSubmitDeadline": { "enabled": true, "optional": false, @@ -103,8 +115,11 @@ ], "harmony": [ ".*" + ], + "max": [ + ".*" ] } } } -} \ No newline at end of file +} diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index cdf861df4a..ec48ba52ea 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -324,7 +324,8 @@ "animation", "look", "rig", - "camera" + "camera", + "renderlayer" ], "task_types": [], "tasks": [], @@ -488,7 +489,8 @@ }, "keep_first_subset_name_for_review": true, "asset_versions_status_profiles": [], - "additional_metadata_keys": [] + "additional_metadata_keys": [], + "upload_reviewable_with_origin_name": false }, "IntegrateFtrackFarmStatus": { "farm_status_profiles": [] diff --git a/openpype/settings/defaults/project_settings/max.json b/openpype/settings/defaults/project_settings/max.json new file mode 100644 index 0000000000..84e0c7dba7 --- /dev/null +++ b/openpype/settings/defaults/project_settings/max.json @@ -0,0 +1,8 @@ +{ + "RenderSettings": { + "default_render_image_folder": "renders/3dsmax", + "aov_separator": "underscore", + "image_format": "exr", + "multipass": true + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 64bba7b28c..03c2d325bb 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -815,6 +815,11 @@ "twoSidedLighting": true, "lineAAEnable": true, "multiSample": 8, + "useDefaultMaterial": false, + "wireframeOnShaded": false, + "xray": false, + "jointXray": false, + "backfaceCulling": false, "ssaoEnable": false, "ssaoAmount": 1, "ssaoRadius": 16, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index cd8ea02272..2999d1427d 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -246,6 +246,7 @@ "sourcetype": "python", "title": "Gizmo Note", "command": "nuke.nodes.StickyNote(label='You can create your own toolbar menu in the Nuke GizmoMenu of OpenPype')", + "icon": "", "shortcut": "" } ] diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json index 5a3e1dc2df..340181b3a4 100644 --- a/openpype/settings/defaults/project_settings/tvpaint.json +++ b/openpype/settings/defaults/project_settings/tvpaint.json @@ -10,11 +10,39 @@ } }, "stop_timer_on_application_exit": false, - "publish": { - "CollectRenderScene": { - "enabled": false, - "render_layer": "Main" + "create": { + "create_workfile": { + "enabled": true, + "default_variant": "Main", + "default_variants": [] }, + "create_review": { + "enabled": true, + "active_on_create": true, + "default_variant": "Main", + "default_variants": [] + }, + "create_render_scene": { + "enabled": true, + "active_on_create": false, + "mark_for_review": true, + "default_pass_name": "beauty", + "default_variant": "Main", + "default_variants": [] + }, + "create_render_layer": { + "mark_for_review": true, + "default_pass_name": "beauty", + "default_variant": "Main", + "default_variants": [] + }, + "create_render_pass": { + "mark_for_review": true, + "default_variant": "Main", + "default_variants": [] + } + }, + "publish": { "ExtractSequence": { "review_bg": [ 255, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index 0b9fbf7470..ebe59c7942 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -82,6 +82,10 @@ "type": "schema", "name": "schema_project_slack" }, + { + "type": "schema", + "name": "schema_project_max" + }, { "type": "schema", "name": "schema_project_maya" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index 03f6489a41..a320dfca4f 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -204,6 +204,65 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "MaxSubmitDeadline", + "label": "3dsMax Submit to Deadline", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "boolean", + "key": "use_published", + "label": "Use Published scene" + }, + { + "type": "number", + "key": "priority", + "label": "Priority" + }, + { + "type": "number", + "key": "chunk_size", + "label": "Chunk Size" + }, + { + "type": "text", + "key": "group", + "label": "Group Name" + }, + { + "type": "text", + "key": "deadline_pool", + "label": "Deadline pool" + }, + { + "type": "text", + "key": "deadline_pool_secondary", + "label": "Deadline pool (secondary)" + }, + { + "type": "number", + "key": "framePerTask", + "label": "Frame Per Task" + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index da414cc961..7050721742 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -1037,6 +1037,21 @@ {"fps": "FPS"}, {"code": "Codec"} ] + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "upload_reviewable_with_origin_name", + "label": "Upload reviewable with origin name" + }, + { + "type": "label", + "label": "Note: Reviewable will be uploaded twice into ftrack when enabled. One with original name and second with required 'ftrackreview-mp4'. That may cause dramatic increase of ftrack storage usage." + }, + { + "type": "separator" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_max.json b/openpype/settings/entities/schemas/projects_schema/schema_project_max.json new file mode 100644 index 0000000000..8a283c1acc --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_max.json @@ -0,0 +1,56 @@ +{ + "type": "dict", + "collapsible": true, + "key": "max", + "label": "Max", + "is_file": true, + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "RenderSettings", + "label": "Render Settings", + "children": [ + { + "type": "text", + "key": "default_render_image_folder", + "label": "Default render image folder" + }, + { + "key": "aov_separator", + "label": "AOV Separator character", + "type": "enum", + "multiselection": false, + "default": "underscore", + "enum_items": [ + {"dash": "- (dash)"}, + {"underscore": "_ (underscore)"}, + {"dot": ". (dot)"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"bmp": "bmp"}, + {"exr": "exr"}, + {"tif": "tif"}, + {"tiff": "tiff"}, + {"jpg": "jpg"}, + {"png": "png"}, + {"tga": "tga"}, + {"dds": "dds"} + ] + }, + { + "type": "boolean", + "key": "multipass", + "label": "multipass" + } + ] + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json index b1a8cc1812..26c64e6219 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json @@ -17,6 +17,11 @@ "key": "menu", "label": "OpenPype Menu shortcuts", "children": [ + { + "type": "text", + "key": "create", + "label": "Create..." + }, { "type": "text", "key": "publish", @@ -288,4 +293,4 @@ "name": "schema_publish_gui_filter" } ] -} \ No newline at end of file +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json index db38c938dc..55e60357e5 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json @@ -30,14 +30,14 @@ { "type": "dict", "collapsible": true, - "key": "publish", - "label": "Publish plugins", + "key": "create", + "label": "Create plugins", "children": [ { "type": "dict", "collapsible": true, - "key": "CollectRenderScene", - "label": "Collect Render Scene", + "key": "create_workfile", + "label": "Create Workfile", "is_group": true, "checkbox_key": "enabled", "children": [ @@ -47,16 +47,163 @@ "label": "Enabled" }, { - "type": "label", - "label": "It is possible to fill 'render_layer' or 'variant' in subset name template with custom value.
- value of 'render_pass' is always \"beauty\"." + "type": "text", + "key": "default_variant", + "label": "Default variant" }, { - "type": "text", - "key": "render_layer", - "label": "Render Layer" + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } } ] }, + { + "type": "dict", + "collapsible": true, + "key": "create_review", + "label": "Create Review", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "active_on_create", + "label": "Active by default" + }, + { + "type": "text", + "key": "default_variant", + "label": "Default variant" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "create_render_scene", + "label": "Create Render Scene", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "active_on_create", + "label": "Active by default" + }, + { + "type": "boolean", + "key": "mark_for_review", + "label": "Review by default" + }, + { + "type": "text", + "key": "default_pass_name", + "label": "Default beauty pass" + }, + { + "type": "text", + "key": "default_variant", + "label": "Default variant" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "create_render_layer", + "label": "Create Render Layer", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "mark_for_review", + "label": "Review by default" + }, + { + "type": "text", + "key": "default_pass_name", + "label": "Default beauty pass" + }, + { + "type": "text", + "key": "default_variant", + "label": "Default variant" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "create_render_pass", + "label": "Create Render Pass", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "mark_for_review", + "label": "Review by default" + }, + { + "type": "text", + "key": "default_variant", + "label": "Default variant" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + } + ] + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json index 62c33f55fc..1f0e4eeffb 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json @@ -184,6 +184,10 @@ { "type": "splitter" }, + { + "type": "label", + "label": "Display" + }, { "type":"boolean", "key": "renderDepthOfField", @@ -221,6 +225,31 @@ { "type": "splitter" }, + { + "type": "boolean", + "key": "useDefaultMaterial", + "label": "Use Default Material" + }, + { + "type": "boolean", + "key": "wireframeOnShaded", + "label": "Wireframe On Shaded" + }, + { + "type": "boolean", + "key": "xray", + "label": "X-Ray" + }, + { + "type": "boolean", + "key": "jointXray", + "label": "X-Ray Joints" + }, + { + "type": "boolean", + "key": "backfaceCulling", + "label": "Backface Culling" + }, { "type": "boolean", "key": "ssaoEnable", diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index c8952e5a1c..669706d470 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -5,11 +5,9 @@ def test_backward_compatibility(printer): printer("Test if imports still work") try: - from openpype.lib import filter_pyblish_plugins from openpype.lib import execute_hook from openpype.lib import PypeHook - from openpype.lib import get_latest_version from openpype.lib import ApplicationLaunchFailed from openpype.lib import get_ffmpeg_tool_path @@ -18,10 +16,6 @@ def test_backward_compatibility(printer): from openpype.lib import get_version_from_path from openpype.lib import version_up - from openpype.lib import is_latest - from openpype.lib import any_outdated - from openpype.lib import get_asset - from openpype.lib import get_linked_assets from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tests/test_pyblish_filter.py b/openpype/tests/test_pyblish_filter.py index ea23da26e4..b74784145f 100644 --- a/openpype/tests/test_pyblish_filter.py +++ b/openpype/tests/test_pyblish_filter.py @@ -1,9 +1,9 @@ -from . import lib +import os import pyblish.api import pyblish.util import pyblish.plugin -from openpype.lib import filter_pyblish_plugins -import os +from openpype.pipeline.publish.lib import filter_pyblish_plugins +from . import lib def test_pyblish_plugin_filter_modifier(printer, monkeypatch): diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 435db5fcb3..023a20ca5e 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -169,6 +169,8 @@ class PublishReport: def __init__(self, controller): self.controller = controller + self._create_discover_result = None + self._convert_discover_result = None self._publish_discover_result = None self._plugin_data = [] self._plugin_data_with_plugin = [] @@ -181,6 +183,10 @@ class PublishReport: def reset(self, context, create_context): """Reset report and clear all data.""" + self._create_discover_result = create_context.creator_discover_result + self._convert_discover_result = ( + create_context.convertor_discover_result + ) self._publish_discover_result = create_context.publish_discover_result self._plugin_data = [] self._plugin_data_with_plugin = [] @@ -293,9 +299,19 @@ class PublishReport: if plugin not in self._stored_plugins: plugins_data.append(self._create_plugin_data_item(plugin)) - crashed_file_paths = {} + reports = [] + if self._create_discover_result is not None: + reports.append(self._create_discover_result) + + if self._convert_discover_result is not None: + reports.append(self._convert_discover_result) + if self._publish_discover_result is not None: - items = self._publish_discover_result.crashed_file_paths.items() + reports.append(self._publish_discover_result) + + crashed_file_paths = {} + for report in reports: + items = report.crashed_file_paths.items() for filepath, exc_info in items: crashed_file_paths[filepath] = "".join( traceback.format_exception(*exc_info) @@ -1573,20 +1589,19 @@ class PublisherController(BasePublisherController): Handle both creation and publishing parts. Args: - dbcon (AvalonMongoDB): Connection to mongo with context. headless (bool): Headless publishing. ATM not implemented or used. """ _log = None - def __init__(self, dbcon=None, headless=False): + def __init__(self, headless=False): super(PublisherController, self).__init__() self._host = registered_host() self._headless = headless self._create_context = CreateContext( - self._host, dbcon, headless=headless, reset=False + self._host, headless=headless, reset=False ) self._publish_plugins_proxy = None @@ -1740,7 +1755,7 @@ class PublisherController(BasePublisherController): self._create_context.reset_preparation() # Reset avalon context - self._create_context.reset_avalon_context() + self._create_context.reset_current_context() self._asset_docs_cache.reset() @@ -2004,9 +2019,10 @@ class PublisherController(BasePublisherController): success = True try: - self._create_context.create( + self._create_context.create_with_unified_error( creator_identifier, subset_name, instance_data, options ) + except CreatorsOperationFailed as exc: success = False self._emit_event( diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py index 47f8ebb914..3fd5243ce9 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/openpype/tools/publisher/widgets/card_view_widgets.py @@ -385,6 +385,7 @@ class InstanceCardWidget(CardWidget): self._last_subset_name = None self._last_variant = None + self._last_label = None icon_widget = IconValuePixmapLabel(group_icon, self) icon_widget.setObjectName("FamilyIconLabel") @@ -462,14 +463,17 @@ class InstanceCardWidget(CardWidget): def _update_subset_name(self): variant = self.instance["variant"] subset_name = self.instance["subset"] + label = self.instance.label if ( variant == self._last_variant and subset_name == self._last_subset_name + and label == self._last_label ): return self._last_variant = variant self._last_subset_name = subset_name + self._last_label = label # Make `variant` bold label = html_escape(self.instance.label) found_parts = set(re.findall(variant, label, re.IGNORECASE)) diff --git a/openpype/tools/publisher/widgets/create_widget.py b/openpype/tools/publisher/widgets/create_widget.py index dbf075c216..ef9c5b98fe 100644 --- a/openpype/tools/publisher/widgets/create_widget.py +++ b/openpype/tools/publisher/widgets/create_widget.py @@ -457,13 +457,14 @@ class CreateWidget(QtWidgets.QWidget): # TODO add details about creator new_creators.add(identifier) if identifier in existing_items: + is_new = False item = existing_items[identifier] else: + is_new = True item = QtGui.QStandardItem() item.setFlags( QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable ) - self._creators_model.appendRow(item) item.setData(creator_item.label, QtCore.Qt.DisplayRole) item.setData(creator_item.show_order, CREATOR_SORT_ROLE) @@ -473,6 +474,8 @@ class CreateWidget(QtWidgets.QWidget): CREATOR_THUMBNAIL_ENABLED_ROLE ) item.setData(creator_item.family, FAMILY_ROLE) + if is_new: + self._creators_model.appendRow(item) # Remove families that are no more available for identifier in (old_creators - new_creators): diff --git a/openpype/tools/publisher/widgets/widgets.py b/openpype/tools/publisher/widgets/widgets.py index 587bcb059d..8da3886419 100644 --- a/openpype/tools/publisher/widgets/widgets.py +++ b/openpype/tools/publisher/widgets/widgets.py @@ -250,21 +250,25 @@ class PublishReportBtn(PublishIconBtn): self._actions = [] def add_action(self, label, identifier): - action = QtWidgets.QAction(label) - action.setData(identifier) - action.triggered.connect( - functools.partial(self._on_action_trigger, action) + self._actions.append( + (label, identifier) ) - self._actions.append(action) - def _on_action_trigger(self, action): - identifier = action.data() + def _on_action_trigger(self, identifier): self.triggered.emit(identifier) def mouseReleaseEvent(self, event): super(PublishReportBtn, self).mouseReleaseEvent(event) menu = QtWidgets.QMenu(self) - menu.addActions(self._actions) + actions = [] + for item in self._actions: + label, identifier = item + action = QtWidgets.QAction(label, menu) + action.triggered.connect( + functools.partial(self._on_action_trigger, identifier) + ) + actions.append(action) + menu.addActions(actions) menu.exec_(event.globalPos()) diff --git a/openpype/tools/publisher/window.py b/openpype/tools/publisher/window.py index 097e289f32..6f7ffdb8ea 100644 --- a/openpype/tools/publisher/window.py +++ b/openpype/tools/publisher/window.py @@ -366,7 +366,7 @@ class PublisherWindow(QtWidgets.QDialog): def make_sure_is_visible(self): if self._window_is_visible: - self.setWindowState(QtCore.Qt.ActiveWindow) + self.setWindowState(QtCore.Qt.WindowActive) else: self.show() @@ -566,24 +566,24 @@ class PublisherWindow(QtWidgets.QDialog): def _go_to_publish_tab(self): self._set_current_tab("publish") - def _go_to_details_tab(self): - self._set_current_tab("details") - def _go_to_report_tab(self): self._set_current_tab("report") + def _go_to_details_tab(self): + self._set_current_tab("details") + def _is_on_create_tab(self): return self._is_current_tab("create") def _is_on_publish_tab(self): return self._is_current_tab("publish") - def _is_on_details_tab(self): - return self._is_current_tab("details") - def _is_on_report_tab(self): return self._is_current_tab("report") + def _is_on_details_tab(self): + return self._is_current_tab("details") + def _set_publish_overlay_visibility(self, visible): if visible: widget = self._publish_overlay @@ -647,16 +647,10 @@ class PublisherWindow(QtWidgets.QDialog): # otherwise 'create' is used # - this happens only on first show if first_reset: - if self._overview_widget.has_items(): - self._go_to_publish_tab() - else: - self._go_to_create_tab() + self._go_to_create_tab() - elif ( - not self._is_on_create_tab() - and not self._is_on_publish_tab() - ): - # If current tab is not 'Create' or 'Publish' go to 'Publish' + elif self._is_on_report_tab(): + # Go to 'Publish' tab if is on 'Details' tab # - this can happen when publishing started and was reset # at that moment it doesn't make sense to stay at publish # specific tabs. diff --git a/openpype/tools/workfiles/files_widget.py b/openpype/tools/workfiles/files_widget.py index 765d32b3d5..18be746d49 100644 --- a/openpype/tools/workfiles/files_widget.py +++ b/openpype/tools/workfiles/files_widget.py @@ -621,7 +621,7 @@ class FilesWidget(QtWidgets.QWidget): "caption": "Work Files", "filter": ext_filter } - if qtpy.API in ("pyside", "pyside2"): + if qtpy.API in ("pyside", "pyside2", "pyside6"): kwargs["dir"] = self._workfiles_root else: kwargs["directory"] = self._workfiles_root diff --git a/openpype/version.py b/openpype/version.py index 8dfd638414..bb5171764c 100644 --- a/openpype/version.py +++ b/openpype/version.py @@ -1,3 +1,3 @@ # -*- coding: utf-8 -*- """Package declaring Pype version.""" -__version__ = "3.15.1-nightly.5" +__version__ = "3.15.2-nightly.1" diff --git a/pyproject.toml b/pyproject.toml index a872ed3609..2fc4f6fe39 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "OpenPype" -version = "3.15.0" # OpenPype +version = "3.15.1" # OpenPype description = "Open VFX and Animation pipeline with support." authors = ["OpenPype Team "] license = "MIT License" diff --git a/website/docs/admin_environment.md b/website/docs/admin_environment.md new file mode 100644 index 0000000000..1eb755b90b --- /dev/null +++ b/website/docs/admin_environment.md @@ -0,0 +1,30 @@ +--- +id: admin_environment +title: Environment +sidebar_label: Environment +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## OPENPYPE_TMPDIR: + - Custom staging dir directory + - Supports anatomy keys formatting. ex `{root[work]}/{project[name]}/temp` + - supported formatting keys: + - root[work] + - project[name | code] + +## OPENPYPE_DEBUG + - setting logger to debug mode + - example value: "1" (to activate) + +## OPENPYPE_LOG_LEVEL + - stringified numeric value of log level. [Here for more info](https://docs.python.org/3/library/logging.html#logging-levels) + - example value: "10" + +## OPENPYPE_MONGO +- If set it takes precedence over the one set in keyring +- for more details on how to use it go [here](admin_use#check-for-mongodb-database-connection) + +## OPENPYPE_USERNAME +- if set it overides system created username diff --git a/website/docs/admin_settings_system.md b/website/docs/admin_settings_system.md index 8aeb281109..d61713ccd5 100644 --- a/website/docs/admin_settings_system.md +++ b/website/docs/admin_settings_system.md @@ -13,18 +13,23 @@ Settings applicable to the full studio. ![general_settings](assets/settings/settings_system_general.png) -**`Studio Name`** - Full name of the studio (can be used as variable on some places) +### Studio Name +Full name of the studio (can be used as variable on some places) -**`Studio Code`** - Studio acronym or a short code (can be used as variable on some places) +### Studio Code +Studio acronym or a short code (can be used as variable on some places) -**`Admin Password`** - After setting admin password, normal user won't have access to OpenPype settings +### Admin Password +After setting admin password, normal user won't have access to OpenPype settings and Project Manager GUI. Please keep in mind that this is a studio wide password and it is meant purely as a simple barrier to prevent artists from accidental setting changes. -**`Environment`** - Globally applied environment variables that will be appended to any OpenPype process in the studio. +### Environment +Globally applied environment variables that will be appended to any OpenPype process in the studio. -**`Disk mapping`** - Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up. -Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume). +### Disk mapping +- Platform dependent configuration for mapping of virtual disk(s) on an artist's OpenPype machines before OP starts up. +- Uses `subst` command, if configured volume character in `Destination` field already exists, no re-mapping is done for that character(volume). ### FFmpeg and OpenImageIO tools We bundle FFmpeg tools for all platforms and OpenImageIO tools for Windows and Linux. By default, bundled tools are used, but it is possible to set environment variables `OPENPYPE_FFMPEG_PATHS` and `OPENPYPE_OIIO_PATHS` in system settings environments to look for them in different directory. @@ -171,4 +176,4 @@ In the image before you can see that we set most of the environment variables in In this example MTOA will automatically will the `MAYA_VERSION`(which is set by Maya Application environment) and `MTOA_VERSION` into the `MTOA` variable. We then use the `MTOA` to set all the other variables needed for it to function within Maya. ![tools](assets/settings/tools_01.png) -All of the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible. \ No newline at end of file +All the tools defined in here can then be assigned to projects. You can also change the tools versions on any project level all the way down to individual asset or shot overrides. So if you just need to upgrade you render plugin for a single shot, while not risking the incompatibilities on the rest of the project, it is possible. diff --git a/website/docs/artist_hosts_maya.md b/website/docs/artist_hosts_maya.md index 14619e52a1..9fab845e62 100644 --- a/website/docs/artist_hosts_maya.md +++ b/website/docs/artist_hosts_maya.md @@ -308,6 +308,8 @@ Select its root and Go **OpenPype → Create...** and select **Point Cache**. After that, publishing will create corresponding **abc** files. +When creating the instance, a objectset child `proxy` will be created. Meshes in the `proxy` objectset will be the viewport representation where loading supports proxies. Proxy representations are stored as `resources` of the subset. + Example setup: ![Maya - Point Cache Example](assets/maya-pointcache_setup.png) @@ -315,6 +317,7 @@ Example setup: :::note Publish on farm If your studio has Deadline configured, artists could choose to offload potentially long running export of pointache and publish it to the farm. Only thing that is necessary is to toggle `Farm` property in created pointcache instance to True. +::: ### Loading Point Caches diff --git a/website/docs/artist_hosts_maya_arnold.md b/website/docs/artist_hosts_maya_arnold.md new file mode 100644 index 0000000000..b3c02a0894 --- /dev/null +++ b/website/docs/artist_hosts_maya_arnold.md @@ -0,0 +1,30 @@ +--- +id: artist_hosts_maya_arnold +title: Arnold for Maya +sidebar_label: Arnold +--- +## Arnold Scene Source (.ass) +Arnold Scene Source can be published as a single file or a sequence of files, determined by the frame range. + +When creating the instance, two objectsets are created; `content` and `proxy`. Meshes in the `proxy` objectset will be the viewport representation when loading as `standin`. Proxy representations are stored as `resources` of the subset. + +### Arnold Scene Source Proxy Workflow +In order to utilize operators and proxies, the content and proxy nodes need to share the same names (including the shape names). This is done by parenting the content and proxy nodes into separate groups. For example: + +![Arnold Scene Source](assets/maya-arnold_scene_source.png) + +## Standin +Arnold Scene Source `ass` and Alembic `abc` are supported to load as standins. + +### Standin Proxy Workflow +If a subset has a proxy representation, this will be used as display in the viewport. At render time the standin path will be replaced using the recommended string replacement workflow; + +https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_maya_operators_am_Updating_procedural_file_paths_with_string_replace_html + +Since the content and proxy nodes share the same names and hierarchy, any manually shader assignments will be shared. + + +:::note for advanced users +You can stop the proxy swapping by disabling the string replacement operator found in the container. +![Arnold Standin](assets/maya-arnold_standin.png) +::: diff --git a/website/docs/artist_hosts_tvpaint.md b/website/docs/artist_hosts_tvpaint.md index a0ce5d5ff8..a8a6cee5f8 100644 --- a/website/docs/artist_hosts_tvpaint.md +++ b/website/docs/artist_hosts_tvpaint.md @@ -6,89 +6,77 @@ sidebar_label: TVPaint - [Work Files](artist_tools_workfiles) - [Load](artist_tools_loader) -- [Create](artist_tools_creator) -- [Subset Manager](artist_tools_subset_manager) - [Scene Inventory](artist_tools_inventory) - [Publish](artist_tools_publisher) - [Library](artist_tools_library) ## Setup -When you launch TVPaint with OpenPype for the very first time it is necessary to do some additional steps. Right after the TVPaint launching a few system windows will pop up. +When you launch TVPaint with OpenPype for the very first time it is necessary to do some additional steps. Right after the TVPaint launching a few system windows will pop up. ![permission](assets/tvp_permission.png) -Choose `Replace the file in the destination`. Then another window shows up. +Choose `Replace the file in the destination`. Then another window shows up. ![permission2](assets/tvp_permission2.png) Click on `Continue`. -After opening TVPaint go to the menu bar: `Windows → Plugins → OpenPype`. +After opening TVPaint go to the menu bar: `Windows → Plugins → OpenPype`. ![pypewindow](assets/tvp_hidden_window.gif) -Another TVPaint window pop up. Please press `Yes`. This window will be presented in every single TVPaint launching. Unfortunately, there is no other way how to workaround it. +Another TVPaint window pop up. Please press `Yes`. This window will be presented in every single TVPaint launching. Unfortunately, there is no other way how to workaround it. ![writefile](assets/tvp_write_file.png) -Now OpenPype Tools menu is in your TVPaint work area. +Now OpenPype Tools menu is in your TVPaint work area. ![openpypetools](assets/tvp_openpype_menu.png) -You can start your work. +You can start your work. --- ## Usage In TVPaint you can find the Tools in OpenPype menu extension. The OpenPype Tools menu should be available in your work area. However, sometimes it happens that the Tools menu is hidden. You can display the extension panel by going to `Windows -> Plugins -> OpenPype`. - -## Create -In TVPaint you can create and publish **[Reviews](#review)**, **[Workfile](#workfile)**, **[Render Passes](#render-pass)** and **[Render Layers](#render-layer)**. - -You have the possibility to organize your layers by using `Color group`. - -On the bottom left corner of your timeline, you will note a `Color group` button. - -![colorgroups](assets/tvp_color_groups.png) - -It allows you to choose a group by checking one of the colors of the color list. - -![colorgroups](assets/tvp_color_groups2.png) - -The timeline's animation layer can be marked by the color you pick from your Color group. Layers in the timeline with the same color are gathered into a group represents one render layer. - -![timeline](assets/tvp_timeline_color.png) +## Create & Publish +To be able to publish, you have to mark what should be published. The marking part is called **Create**. In TVPaint you can create and publish **[Reviews](#review)**, **[Workfile](#workfile)**, **[Render Layers](#render-layer)** and **[Render Passes](#render-pass)**. :::important -OpenPype specifically never tries to guess what you want to publish from the scene. Therefore, you have to tell OpenPype what you want to publish. There are three ways how to publish render from the scene. +TVPaint integration tries to not guess what you want to publish from the scene. Therefore, you should tell what you want to publish. ::: -When you want to publish `review` or `render layer` or `render pass`, open the `Creator` through the Tools menu `Create` button. +![createlayer](assets/tvp_publisher.png) ### Review -`Review` renders the whole file as is and sends the resulting QuickTime to Ftrack. -- Is automatically created during publishing. +`Review` will render all visible layers and create a reviewable output. +- Is automatically created without any manual work. +- You can disable the created instance if you want to skip review. ### Workfile -`Workfile` stores the source workfile as is during publishing (e.g. for backup). -- Is automatically created during publishing. +`Workfile` integrate the source TVPaint file during publishing. Publishing of workfile is useful for backups. +- Is automatically created without any manual work. +- You can disable the created instance if you want to skip review. ### Render Layer
+Render Layer bakes all the animation layers of one particular color group together. -Render Layer bakes all the animation layers of one particular color group together. +- In the **Create** tab, pick `Render Layer` +- Fill `variant`, type in the name that the final published RenderLayer should have according to the naming convention in your studio. *(L10, BG, Hero, etc.)* + - Color group will be renamed to the **variant** value +- Choose color group from combobox + - or select a layer of a particular color and set combobox to **<Use selection>** +- Hit `Create` button -- Choose any amount of animation layers that need to be rendered together and assign them a color group. -- Select any layer of a particular color -- Go to `Creator` and choose `RenderLayer`. -- In the `Subset`, type in the name that the final published RenderLayer should have according to the naming convention in your studio. *(L10, BG, Hero, etc.)* -- Press `Create` -- When you run [publish](#publish), the whole color group will be rendered together and published as a single `RenderLayer` +After creating a RenderLayer, choose any amount of animation layers that need to be rendered together and assign them the color group. + +You can change `variant` later in **Publish** tab.
@@ -97,27 +85,45 @@ Render Layer bakes all the animation layers of one particular color group togeth
+
+**How to mark TVPaint layer to a group** +In the bottom left corner of your timeline, you will note a **Color group** button. +![colorgroups](assets/tvp_color_groups.png) + +It allows you to choose a group by checking one of the colors of the color list. + +![colorgroups](assets/tvp_color_groups2.png) + +The timeline's animation layer can be marked by the color you pick from your Color group. Layers in the timeline with the same color are gathered into a group represents one render layer. + +![timeline](assets/tvp_timeline_color.png) ### Render Pass -Render Passes are smaller individual elements of a Render Layer. A `character` render layer might +Render Passes are smaller individual elements of a [Render Layer](artist_hosts_tvpaint.md#render-layer). A `character` render layer might consist of multiple render passes such as `Line`, `Color` and `Shadow`. +Render Passes are specific because they have to belong to a particular Render Layer. You have to select to which Render Layer the pass belongs. Try to refresh if you don't see a specific Render Layer in the options.
-Render Passes are specific because they have to belong to a particular layer. If you try to create a render pass and did not create any render layers before, an error message will pop up. -When you want to create `RenderPass` -- choose one or several animation layers within one color group that you want to publish -- In the Creator, pick `RenderPass` -- Fill the `Subset` with the name of your pass, e.g. `Color`. +When you want to create Render Pass +- choose one or several TVPaint layers. +- in the **Create** tab, pick `Render Pass`. +- fill the `variant` with desired name of pass, e.g. `Color`. +- select the Render Layer you want the Render Pass to belong to from the combobox. + - if you don't see new Render Layer try refresh first. - Press `Create` +After creating a Render Pass, selected the TVPaint layers that should be marked with color group of Render Layer. + +You can change `variant` or Render Layer later in **Publish** tab. +
@@ -126,52 +132,26 @@ When you want to create `RenderPass`
+:::warning +You cannot change TVPaint layer name once you mark it as part of Render Pass. You would have to remove created Render Pass and create it again with new TVPaint layer name. +::: +

-In this example, OpenPype will render selected animation layers within the given color group. E.i. the layers *L020_colour_fx*, *L020_colour_mouth*, and *L020_colour_eye* will be rendered as one pass belonging to the yellow RenderLayer. +In this example, OpenPype will render selected animation layers within the given color group. E.i. the layers *L020_colour_fx*, *L020_colour_mouth*, and *L020_colour_eye* will be rendered as one pass belonging to the yellow RenderLayer. ![renderpass](assets/tvp_timeline_color2.png) - -:::note -You can check your RendrePasses and RenderLayers in [Subset Manager](#subset-manager) or you can start publishing. The publisher will show you a collection of all instances on the left side. -::: - - ---- - -## Publish - -
-
- -Now that you have created the required instances, you can publish them via `Publish` tool. -- Click on `Publish` in OpenPype Tools menu. -- wait until all instances are collected. -- You can check on the left side whether all your instances have been created and are ready for publishing. +Now that you have created the required instances, you can publish them. - Fill the comment on the bottom of the window. -- Press the `Play` button to publish - -
-
- -![pyblish](assets/tvp_pyblish_render.png) - -
-
- -Once the `Publisher` turns gets green your renders have been published. +- Double check enabled instance and their context. +- Press `Publish`. +- Wait to finish. +- Once the `Publisher` turns turns green your renders have been published. --- -## Subset Manager -All created instances (render layers, passes, and reviews) will be shown as a simple list. If you don't want to publish some, right click on the item in the list and select `Remove instance`. - -![subsetmanager](assets/tvp_subset_manager.png) - ---- - -## Load +## Load When you want to load existing published work you can reach the `Loader` through the OpenPype Tools `Load` button. The supported families for TVPaint are: @@ -192,4 +172,4 @@ Scene Inventory shows you everything that you have loaded into your scene using ![sceneinventory](assets/tvp_scene_inventory.png) -You can switch to a previous version of the file or update it to the latest or delete items. +You can switch to a previous version of the file or update it to the latest or delete items. diff --git a/website/docs/assets/maya-arnold_scene_source.png b/website/docs/assets/maya-arnold_scene_source.png new file mode 100644 index 0000000000..4150b78aac Binary files /dev/null and b/website/docs/assets/maya-arnold_scene_source.png differ diff --git a/website/docs/assets/maya-arnold_standin.png b/website/docs/assets/maya-arnold_standin.png new file mode 100644 index 0000000000..74571a86fa Binary files /dev/null and b/website/docs/assets/maya-arnold_standin.png differ diff --git a/website/docs/assets/maya-pointcache_setup.png b/website/docs/assets/maya-pointcache_setup.png index 8904baa239..b2dc126901 100644 Binary files a/website/docs/assets/maya-pointcache_setup.png and b/website/docs/assets/maya-pointcache_setup.png differ diff --git a/website/docs/assets/tvp_create_layer.png b/website/docs/assets/tvp_create_layer.png index 9d243da17a..25081bdf46 100644 Binary files a/website/docs/assets/tvp_create_layer.png and b/website/docs/assets/tvp_create_layer.png differ diff --git a/website/docs/assets/tvp_create_pass.png b/website/docs/assets/tvp_create_pass.png index 7d226ea4b5..6c8e600af2 100644 Binary files a/website/docs/assets/tvp_create_pass.png and b/website/docs/assets/tvp_create_pass.png differ diff --git a/website/docs/assets/tvp_openpype_menu.png b/website/docs/assets/tvp_openpype_menu.png index cb5c2d4aac..23eaf33fc3 100644 Binary files a/website/docs/assets/tvp_openpype_menu.png and b/website/docs/assets/tvp_openpype_menu.png differ diff --git a/website/docs/assets/tvp_publisher.png b/website/docs/assets/tvp_publisher.png new file mode 100644 index 0000000000..e5b1f936df Binary files /dev/null and b/website/docs/assets/tvp_publisher.png differ diff --git a/website/sidebars.js b/website/sidebars.js index dfc3d827e0..93887e00f6 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -44,6 +44,7 @@ module.exports = { "artist_hosts_maya_multiverse", "artist_hosts_maya_yeti", "artist_hosts_maya_xgen", + "artist_hosts_maya_arnold", "artist_hosts_maya_vray", "artist_hosts_maya_redshift", ], @@ -86,6 +87,7 @@ module.exports = { type: "category", label: "Configuration", items: [ + "admin_environment", "admin_settings", "admin_settings_system", "admin_settings_project_anatomy", diff --git a/website/yarn.lock b/website/yarn.lock index 0a56928cd9..559c58f931 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -7180,9 +7180,9 @@ typedarray-to-buffer@^3.1.5: is-typedarray "^1.0.0" ua-parser-js@^0.7.30: - version "0.7.31" - resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.31.tgz#649a656b191dffab4f21d5e053e27ca17cbff5c6" - integrity sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ== + version "0.7.33" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.33.tgz#1d04acb4ccef9293df6f70f2c3d22f3030d8b532" + integrity sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw== unherit@^1.0.4: version "1.1.3"