diff --git a/.all-contributorsrc b/.all-contributorsrc index b30f3b2499..60812cdb3c 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -1,6 +1,6 @@ { "projectName": "OpenPype", - "projectOwner": "pypeclub", + "projectOwner": "ynput", "repoType": "github", "repoHost": "https://github.com", "files": [ @@ -319,8 +319,18 @@ "code", "doc" ] + }, + { + "login": "movalex", + "name": "Alexey Bogomolov", + "avatar_url": "https://avatars.githubusercontent.com/u/11698866?v=4", + "profile": "http://abogomolov.com", + "contributions": [ + "code" + ] } ], "contributorsPerLine": 7, - "skipCi": true + "skipCi": true, + "commitType": "docs" } diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 96e768e420..0000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: bug -assignees: '' - ---- -**Running version** -[ex. 3.14.1-nightly.2] - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Desktop (please complete the following information):** - - OS: [e.g. windows] - - Host: [e.g. Maya, Nuke, Houdini] - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000000..2339ec878f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,183 @@ +name: Bug Report +description: File a bug report +title: 'Bug: ' +labels: + - 'type: bug' +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: >- + Please search to see if an issue already exists for the bug you + encountered. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: 'Current Behavior:' + description: A concise description of what you're experiencing. + validations: + required: true + - type: textarea + attributes: + label: 'Expected Behavior:' + description: A concise description of what you expected to happen. + validations: + required: false + - type: dropdown + id: _version + attributes: + label: Version + description: What version are you running? Look to OpenPype Tray + options: + - 3.15.11-nightly.2 + - 3.15.11-nightly.1 + - 3.15.10 + - 3.15.10-nightly.2 + - 3.15.10-nightly.1 + - 3.15.9 + - 3.15.9-nightly.2 + - 3.15.9-nightly.1 + - 3.15.8 + - 3.15.8-nightly.3 + - 3.15.8-nightly.2 + - 3.15.8-nightly.1 + - 3.15.7 + - 3.15.7-nightly.3 + - 3.15.7-nightly.2 + - 3.15.7-nightly.1 + - 3.15.6 + - 3.15.6-nightly.3 + - 3.15.6-nightly.2 + - 3.15.6-nightly.1 + - 3.15.5 + - 3.15.5-nightly.2 + - 3.15.5-nightly.1 + - 3.15.4 + - 3.15.4-nightly.3 + - 3.15.4-nightly.2 + - 3.15.4-nightly.1 + - 3.15.3 + - 3.15.3-nightly.4 + - 3.15.3-nightly.3 + - 3.15.3-nightly.2 + - 3.15.3-nightly.1 + - 3.15.2 + - 3.15.2-nightly.6 + - 3.15.2-nightly.5 + - 3.15.2-nightly.4 + - 3.15.2-nightly.3 + - 3.15.2-nightly.2 + - 3.15.2-nightly.1 + - 3.15.1 + - 3.15.1-nightly.6 + - 3.15.1-nightly.5 + - 3.15.1-nightly.4 + - 3.15.1-nightly.3 + - 3.15.1-nightly.2 + - 3.15.1-nightly.1 + - 3.15.0 + - 3.15.0-nightly.1 + - 3.14.11-nightly.4 + - 3.14.11-nightly.3 + - 3.14.11-nightly.2 + - 3.14.11-nightly.1 + - 3.14.10 + - 3.14.10-nightly.9 + - 3.14.10-nightly.8 + - 3.14.10-nightly.7 + - 3.14.10-nightly.6 + - 3.14.10-nightly.5 + - 3.14.10-nightly.4 + - 3.14.10-nightly.3 + - 3.14.10-nightly.2 + - 3.14.10-nightly.1 + - 3.14.9 + - 3.14.9-nightly.5 + - 3.14.9-nightly.4 + - 3.14.9-nightly.3 + - 3.14.9-nightly.2 + - 3.14.9-nightly.1 + - 3.14.8 + - 3.14.8-nightly.4 + - 3.14.8-nightly.3 + - 3.14.8-nightly.2 + - 3.14.8-nightly.1 + - 3.14.7 + - 3.14.7-nightly.8 + - 3.14.7-nightly.7 + - 3.14.7-nightly.6 + - 3.14.7-nightly.5 + - 3.14.7-nightly.4 + - 3.14.7-nightly.3 + - 3.14.7-nightly.2 + - 3.14.7-nightly.1 + - 3.14.6 + - 3.14.6-nightly.3 + - 3.14.6-nightly.2 + - 3.14.6-nightly.1 + - 3.14.5 + - 3.14.5-nightly.3 + - 3.14.5-nightly.2 + - 3.14.5-nightly.1 + - 3.14.4 + - 3.14.4-nightly.4 + - 3.14.4-nightly.3 + - 3.14.4-nightly.2 + - 3.14.4-nightly.1 + - 3.14.3 + - 3.14.3-nightly.7 + - 3.14.3-nightly.6 + - 3.14.3-nightly.5 + - 3.14.3-nightly.4 + validations: + required: true + - type: dropdown + validations: + required: true + attributes: + label: What platform you are running OpenPype on? + description: | + Please specify the operating systems you are running OpenPype with. + multiple: true + options: + - Windows + - Linux / Centos + - Linux / Ubuntu + - Linux / RedHat + - MacOS + - type: textarea + id: to-reproduce + attributes: + label: 'Steps To Reproduce:' + description: Steps to reproduce the behavior. + placeholder: | + 1. How did the configuration look like + 2. What type of action was made + validations: + required: true + - type: checkboxes + attributes: + label: Are there any labels you wish to add? + description: Please search labels and identify those related to your bug. + options: + - label: I have added the relevant labels to the bug report. + required: true + - type: textarea + id: logs + attributes: + label: 'Relevant log output:' + description: >- + Please copy and paste any relevant log output. This will be + automatically formatted into code, so no need for backticks. + render: shell + - type: textarea + id: additional-context + attributes: + label: 'Additional context:' + description: Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..cc61bfd04a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Ynput Community Discussions + url: https://community.ynput.io + about: Please ask and answer questions here. + - name: Ynput Discord Server + url: https://discord.gg/ynput + about: For community quick chats. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/enhancement_request.yml b/.github/ISSUE_TEMPLATE/enhancement_request.yml new file mode 100644 index 0000000000..52b49e0481 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement_request.yml @@ -0,0 +1,52 @@ +name: Enhancement Request +description: Create a report to help us enhance a particular feature +title: "Enhancement: " +labels: + - "type: enhancement" +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this enhancement request report! + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the bug you encountered. + options: + - label: I have searched the existing issues. + required: true + - type: textarea + id: related-feature + attributes: + label: Please describe the feature you have in mind and explain what the current shortcomings are? + description: A clear and concise description of what the problem is. + validations: + required: true + - type: textarea + id: enhancement-proposal + attributes: + label: How would you imagine the implementation of the feature? + description: A clear and concise description of what you want to happen. + validations: + required: true + - type: checkboxes + attributes: + label: Are there any labels you wish to add? + description: Please search labels and identify those related to your enhancement. + options: + - label: I have added the relevant labels to the enhancement request. + required: true + - type: textarea + id: alternatives + attributes: + label: "Describe alternatives you've considered:" + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: false + - type: textarea + id: additional-context + attributes: + label: "Additional context:" + description: Add any other context or screenshots about the enhancement request here. + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 11fc491ef1..0000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: enhancement -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/pr-branch-labeler.yml b/.github/pr-branch-labeler.yml new file mode 100644 index 0000000000..b434326236 --- /dev/null +++ b/.github/pr-branch-labeler.yml @@ -0,0 +1,15 @@ +# Apply label "feature" if head matches "feature/*" +'type: feature': + head: "feature/*" + +# Apply label "feature" if head matches "feature/*" +'type: enhancement': + head: "enhancement/*" + +# Apply label "bugfix" if head matches one of "bugfix/*" or "hotfix/*" +'type: bug': + head: ["bugfix/*", "hotfix/*"] + +# Apply label "release" if base matches "release/*" +'Bump Minor': + base: "release/next-minor" diff --git a/.github/pr-glob-labeler.yml b/.github/pr-glob-labeler.yml new file mode 100644 index 0000000000..286e7768b5 --- /dev/null +++ b/.github/pr-glob-labeler.yml @@ -0,0 +1,102 @@ +# Add type: unittest label if any changes in tests folders +'type: unittest': +- '*/*tests*/**/*' + +# any changes in documentation structure +'type: documentation': +- '*/**/*website*/**/*' +- '*/**/*docs*/**/*' + +# hosts triage +'host: Nuke': +- '*/**/*nuke*' +- '*/**/*nuke*/**/*' + +'host: Photoshop': +- '*/**/*photoshop*' +- '*/**/*photoshop*/**/*' + +'host: Harmony': +- '*/**/*harmony*' +- '*/**/*harmony*/**/*' + +'host: UE': +- '*/**/*unreal*' +- '*/**/*unreal*/**/*' + +'host: Houdini': +- '*/**/*houdini*' +- '*/**/*houdini*/**/*' + +'host: Maya': +- '*/**/*maya*' +- '*/**/*maya*/**/*' + +'host: Resolve': +- '*/**/*resolve*' +- '*/**/*resolve*/**/*' + +'host: Blender': +- '*/**/*blender*' +- '*/**/*blender*/**/*' + +'host: Hiero': +- '*/**/*hiero*' +- '*/**/*hiero*/**/*' + +'host: Fusion': +- '*/**/*fusion*' +- '*/**/*fusion*/**/*' + +'host: Flame': +- '*/**/*flame*' +- '*/**/*flame*/**/*' + +'host: TrayPublisher': +- '*/**/*traypublisher*' +- '*/**/*traypublisher*/**/*' + +'host: 3dsmax': +- '*/**/*max*' +- '*/**/*max*/**/*' + +'host: TV Paint': +- '*/**/*tvpaint*' +- '*/**/*tvpaint*/**/*' + +'host: CelAction': +- '*/**/*celaction*' +- '*/**/*celaction*/**/*' + +'host: After Effects': +- '*/**/*aftereffects*' +- '*/**/*aftereffects*/**/*' + +'host: Substance Painter': +- '*/**/*substancepainter*' +- '*/**/*substancepainter*/**/*' + +# modules triage +'module: Deadline': +- '*/**/*deadline*' +- '*/**/*deadline*/**/*' + +'module: RoyalRender': +- '*/**/*royalrender*' +- '*/**/*royalrender*/**/*' + +'module: Sitesync': +- '*/**/*sync_server*' +- '*/**/*sync_server*/**/*' + +'module: Ftrack': +- '*/**/*ftrack*' +- '*/**/*ftrack*/**/*' + +'module: Shotgrid': +- '*/**/*shotgrid*' +- '*/**/*shotgrid*/**/*' + +'module: Kitsu': +- '*/**/*kitsu*' +- '*/**/*kitsu*/**/*' diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 20ae298f70..2adaffd23d 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,16 +1,9 @@ -## Brief description -First sentence is brief description. - -## Description -Next paragraf is more elaborate text with more info. This will be displayed for example in collapsed form under the first sentence in a changelog. +## Changelog Description +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. ## Additional info -The rest will be ignored in changelog and should contain any additional -technical information. - -## Documentation (add _"type: documentation"_ label) -[feature_documentation](future_url_after_it_will_be_merged) +Paragraphs of text giving context of additional technical information or code examples. ## Testing notes: 1. start with this step -2. follow this step \ No newline at end of file +2. follow this step diff --git a/.github/workflows/automate-projects.yml b/.github/workflows/automate-projects.yml deleted file mode 100644 index b605071c2d..0000000000 --- a/.github/workflows/automate-projects.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Automate Projects - -on: - issues: - types: [opened, labeled] -env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - -jobs: - assign_one_project: - runs-on: ubuntu-latest - name: Assign to One Project - steps: - - name: Assign NEW bugs to triage - uses: srggrs/assign-one-project-github-action@1.2.0 - if: contains(github.event.issue.labels.*.name, 'bug') - with: - project: 'https://github.com/pypeclub/pype/projects/2' - column_name: 'Needs triage' diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index f78e95528f..f2e7d1058f 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -1,4 +1,4 @@ -name: documentation +name: ๐Ÿ“œ Documentation on: pull_request: diff --git a/.github/workflows/milestone_assign.yml b/.github/workflows/milestone_assign.yml index 4b52dfc30d..df4625c225 100644 --- a/.github/workflows/milestone_assign.yml +++ b/.github/workflows/milestone_assign.yml @@ -1,4 +1,4 @@ -name: Milestone - assign to PRs +name: ๐Ÿ‘‰๐Ÿป Milestone - assign to PRs on: pull_request_target: @@ -13,7 +13,7 @@ jobs: if: github.event.pull_request.milestone == null uses: zoispag/action-assign-milestone@v1 with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" + repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" milestone: 'next-minor' run_if_develop: @@ -24,5 +24,5 @@ jobs: if: github.event.pull_request.milestone == null uses: zoispag/action-assign-milestone@v1 with: - repo-token: "${{ secrets.GITHUB_TOKEN }}" - milestone: 'next-patch' \ No newline at end of file + repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" + milestone: 'next-patch' diff --git a/.github/workflows/milestone_create.yml b/.github/workflows/milestone_create.yml index b56ca81dc1..437c9e31b4 100644 --- a/.github/workflows/milestone_create.yml +++ b/.github/workflows/milestone_create.yml @@ -1,4 +1,4 @@ -name: Milestone - create default +name: โž• Milestone - create default on: milestone: @@ -12,7 +12,7 @@ jobs: uses: "WyriHaximus/github-action-get-milestones@master" id: milestones env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') id: querymilestone @@ -31,7 +31,7 @@ jobs: with: title: 'next-patch' env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" generate-next-minor: runs-on: ubuntu-latest @@ -40,7 +40,7 @@ jobs: uses: "WyriHaximus/github-action-get-milestones@master" id: milestones env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') id: querymilestone @@ -59,4 +59,4 @@ jobs: with: title: 'next-minor' env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" \ No newline at end of file + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" diff --git a/.github/workflows/miletone_release_trigger.yml b/.github/workflows/miletone_release_trigger.yml new file mode 100644 index 0000000000..4a031be7f9 --- /dev/null +++ b/.github/workflows/miletone_release_trigger.yml @@ -0,0 +1,50 @@ +name: ๐Ÿšฉ Milestone Release [trigger] + +on: + workflow_dispatch: + inputs: + milestone: + required: true + release-type: + type: choice + description: What release should be created + options: + - release + - pre-release + milestone: + types: closed + + +jobs: + milestone-title: + runs-on: ubuntu-latest + outputs: + milestone: ${{ steps.milestoneTitle.outputs.value }} + steps: + - name: Switch input milestone + uses: haya14busa/action-cond@v1 + id: milestoneTitle + with: + cond: ${{ inputs.milestone == '' }} + if_true: ${{ github.event.milestone.title }} + if_false: ${{ inputs.milestone }} + - name: Print resulted milestone + run: | + echo "${{ steps.milestoneTitle.outputs.value }}" + + call-ci-tools-milestone-release: + needs: milestone-title + uses: ynput/ci-tools/.github/workflows/milestone_release_ref.yml@main + with: + milestone: ${{ needs.milestone-title.outputs.milestone }} + repo-owner: ${{ github.event.repository.owner.login }} + repo-name: ${{ github.event.repository.name }} + version-py-path: "./openpype/version.py" + pyproject-path: "./pyproject.toml" + secrets: + token: ${{ secrets.YNPUT_BOT_TOKEN }} + user_email: ${{ secrets.CI_EMAIL }} + user_name: ${{ secrets.CI_USER }} + cu_api_key: ${{ secrets.CLICKUP_API_KEY }} + cu_team_id: ${{ secrets.CLICKUP_TEAM_ID }} + cu_field_id: ${{ secrets.CLICKUP_RELEASE_FIELD_ID }} diff --git a/.github/workflows/nightly_merge.yml b/.github/workflows/nightly_merge.yml index 1d36c89cc7..3f8c75dce3 100644 --- a/.github/workflows/nightly_merge.yml +++ b/.github/workflows/nightly_merge.yml @@ -1,4 +1,4 @@ -name: Dev -> Main +name: ๐Ÿ”€ Dev -> Main on: schedule: @@ -14,10 +14,10 @@ jobs: - name: ๐Ÿš› Checkout Code uses: actions/checkout@v2 - - name: ๐Ÿ”จ Merge develop to main + - name: ๐Ÿ”จ Merge develop to main uses: everlytic/branch-merge@1.1.0 with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'develop' target_branch: 'main' commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' @@ -25,5 +25,5 @@ jobs: - name: Invoke pre-release workflow uses: benc-uk/workflow-dispatch@v1 with: - workflow: Nightly Prerelease - token: ${{ secrets.ADMIN_TOKEN }} \ No newline at end of file + workflow: prerelease.yml + token: ${{ secrets.YNPUT_BOT_TOKEN }} diff --git a/.github/workflows/pr_labels.yml b/.github/workflows/pr_labels.yml new file mode 100644 index 0000000000..ecc95051aa --- /dev/null +++ b/.github/workflows/pr_labels.yml @@ -0,0 +1,49 @@ +name: ๐Ÿ”– PR labels + +on: + pull_request_target: + types: [opened, assigned] + +jobs: + size-label: + name: pr_size_label + runs-on: ubuntu-latest + if: github.event.action == 'assigned' || github.event.action == 'opened' + steps: + - name: Add size label + uses: "pascalgn/size-label-action@v0.4.3" + env: + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" + IGNORED: ".gitignore\n*.md\n*.json" + with: + sizes: > + { + "0": "XS", + "100": "S", + "500": "M", + "1000": "L", + "1500": "XL", + "2500": "XXL" + } + + label_prs_branch: + name: pr_branch_label + runs-on: ubuntu-latest + if: github.event.action == 'assigned' || github.event.action == 'opened' + steps: + - name: Label PRs - Branch name detection + uses: ffittschen/pr-branch-labeler@v1 + with: + repo-token: ${{ secrets.YNPUT_BOT_TOKEN }} + + label_prs_globe: + name: pr_globe_label + runs-on: ubuntu-latest + if: github.event.action == 'assigned' || github.event.action == 'opened' + steps: + - name: Label PRs - Globe detection + uses: actions/labeler@v4.0.3 + with: + repo-token: ${{ secrets.YNPUT_BOT_TOKEN }} + configuration-path: ".github/pr-glob-labeler.yml" + sync-labels: false diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index 078f6c85bb..8c5c733c08 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -1,4 +1,4 @@ -name: Nightly Prerelease +name: โณ Nightly Prerelease on: workflow_dispatch: @@ -17,7 +17,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: 3.9 - name: Install Python requirements run: pip install gitpython semver PyGithub @@ -25,43 +25,15 @@ jobs: - name: ๐Ÿ”Ž Determine next version type id: version_type run: | - TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }}) - - echo ::set-output name=type::$TYPE + TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) + echo "type=${TYPE}" >> $GITHUB_OUTPUT - name: ๐Ÿ’‰ Inject new version into files id: version if: steps.version_type.outputs.type != 'skip' run: | - RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }}) - - echo ::set-output name=next_tag::$RESULT - - # - name: "โœ๏ธ Generate full changelog" - # if: steps.version_type.outputs.type != 'skip' - # id: generate-full-changelog - # uses: heinrichreimer/github-changelog-generator-action@v2.3 - # with: - # token: ${{ secrets.ADMIN_TOKEN }} - # addSections: '{"documentation":{"prefix":"### ๐Ÿ“– Documentation","labels":["type: documentation"]},"tests":{"prefix":"### โœ… Testing","labels":["tests"]},"feature":{"prefix":"**๐Ÿ†• New features**", "labels":["type: feature"]},"breaking":{"prefix":"**๐Ÿ’ฅ Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**๐Ÿš€ Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**๐Ÿ› Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**โš ๏ธ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**๐Ÿ”€ Refactored code**", "labels":["refactor"]}}' - # issues: false - # issuesWoLabels: false - # sinceTag: "3.12.0" - # maxIssues: 100 - # pullRequests: true - # prWoLabels: false - # author: false - # unreleased: true - # compareLink: true - # stripGeneratorNotice: true - # verbose: true - # unreleasedLabel: ${{ steps.version.outputs.next_tag }} - # excludeTagsRegex: "CI/.+" - # releaseBranch: "main" - - - name: "๐Ÿ–จ๏ธ Print changelog to console" - if: steps.version_type.outputs.type != 'skip' - run: cat CHANGELOG.md + NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) + echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT - name: ๐Ÿ’พ Commit and Tag id: git_commit @@ -80,7 +52,7 @@ jobs: - name: Push to protected main branch uses: CasperWA/push-protected@v2.10.0 with: - token: ${{ secrets.ADMIN_TOKEN }} + token: ${{ secrets.YNPUT_BOT_TOKEN }} branch: main tags: true unprotect_reviews: true @@ -89,7 +61,13 @@ jobs: uses: everlytic/branch-merge@1.1.0 if: steps.version_type.outputs.type != 'skip' with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'main' target_branch: 'develop' commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' + + - name: Invoke Update bug report workflow + uses: benc-uk/workflow-dispatch@v1 + with: + workflow: update_bug_report.yml + token: ${{ secrets.YNPUT_BOT_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/project_task_statuses.yml b/.github/workflows/project_task_statuses.yml new file mode 100644 index 0000000000..d078c08b70 --- /dev/null +++ b/.github/workflows/project_task_statuses.yml @@ -0,0 +1,70 @@ +name: ๐Ÿ“Š Project task statuses + +on: + pull_request_review: + types: [submitted] + issue_comment: + types: [created] + pull_request_review_comment: + types: [created] + +jobs: + + pr_review_started: + name: pr_review_started + runs-on: ubuntu-latest + # ----------------------------- + # conditions are: + # - PR issue comment which is not form Ynbot + # - PR review comment which is not Hound (or any other bot) + # - PR review submitted which is not from Hound (or any other bot) and is not 'Changes requested' + # - make sure it only runs if not forked repo + # ----------------------------- + if: | + (github.event_name == 'issue_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.id != 82967070) || + (github.event_name == 'pull_request_review_comment' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.comment.user.type != 'Bot') || + (github.event_name == 'pull_request_review' && + github.event.pull_request.head.repo.owner.login == 'ynput' && + github.event.review.state != 'changes_requested' && + github.event.review.state != 'approved' && + github.event.review.user.type != 'Bot') + steps: + - name: Move PR to 'Review In Progress' + uses: leonsteinhaeuser/project-beta-automations@v2.1.0 + with: + gh_token: ${{ secrets.YNPUT_BOT_TOKEN }} + organization: ynput + project_id: 11 + resource_node_id: ${{ github.event.pull_request.node_id || github.event.issue.node_id }} + status_value: Review In Progress + + pr_review_requested: + # ----------------------------- + # Resets Clickup Task status to 'In Progress' after 'Changes Requested' were submitted to PR + # It only runs if custom clickup task id was found in ref branch of PR + # ----------------------------- + name: pr_review_requested + runs-on: ubuntu-latest + if: github.event_name == 'pull_request_review' && github.event.pull_request.head.repo.owner.login == 'ynput' && github.event.review.state == 'changes_requested' + steps: + - name: Set branch env + run: echo "BRANCH_NAME=${{ github.event.pull_request.head.ref}}" >> $GITHUB_ENV + - name: Get ClickUp ID from ref head name + id: get_cuID + run: | + echo ${{ env.BRANCH_NAME }} + echo "cuID=$(echo $BRANCH_NAME | sed 's/.*\/\(OP\-[0-9]\{4\}\).*/\1/')" >> $GITHUB_OUTPUT + + - name: Print ClickUp ID + run: echo ${{ steps.get_cuID.outputs.cuID }} + + - name: Move found Clickup task to 'Review in Progress' + if: steps.get_cuID.outputs.cuID + run: | + curl -i -X PUT \ + 'https://api.clickup.com/api/v2/task/${{ steps.get_cuID.outputs.cuID }}?custom_task_ids=true&team_id=${{secrets.CLICKUP_TEAM_ID}}' \ + -H 'Authorization: ${{secrets.CLICKUP_API_KEY}}' \ + -H 'Content-Type: application/json' \ + -d '{ + "status": "in progress" + }' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 754f3d32d6..0000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,124 +0,0 @@ -name: Stable Release - -on: - release: - types: - - prereleased - -jobs: - create_release: - runs-on: ubuntu-latest - if: github.actor != 'pypebot' - - steps: - - name: ๐Ÿš› Checkout Code - uses: actions/checkout@v2 - with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.7 - - name: Install Python requirements - run: pip install gitpython semver PyGithub - - - name: ๐Ÿ’‰ Inject new version into files - id: version - run: | - echo ::set-output name=current_version::${GITHUB_REF#refs/*/} - RESULT=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/}) - LASTRELEASE=$(python ./tools/ci_tools.py --lastversion release) - - echo ::set-output name=last_release::$LASTRELEASE - echo ::set-output name=release_tag::$RESULT - - # - name: "โœ๏ธ Generate full changelog" - # if: steps.version.outputs.release_tag != 'skip' - # id: generate-full-changelog - # uses: heinrichreimer/github-changelog-generator-action@v2.3 - # with: - # token: ${{ secrets.ADMIN_TOKEN }} - # addSections: '{"documentation":{"prefix":"### ๐Ÿ“– Documentation","labels":["type: documentation"]},"tests":{"prefix":"### โœ… Testing","labels":["tests"]},"feature":{"prefix":"**๐Ÿ†• New features**", "labels":["type: feature"]},"breaking":{"prefix":"**๐Ÿ’ฅ Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**๐Ÿš€ Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**๐Ÿ› Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**โš ๏ธ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**๐Ÿ”€ Refactored code**", "labels":["refactor"]}}' - # issues: false - # issuesWoLabels: false - # sinceTag: "3.12.0" - # maxIssues: 100 - # pullRequests: true - # prWoLabels: false - # author: false - # unreleased: true - # compareLink: true - # stripGeneratorNotice: true - # verbose: true - # futureRelease: ${{ steps.version.outputs.release_tag }} - # excludeTagsRegex: "CI/.+" - # releaseBranch: "main" - - - name: ๐Ÿ’พ Commit and Tag - id: git_commit - if: steps.version.outputs.release_tag != 'skip' - run: | - git config user.email ${{ secrets.CI_EMAIL }} - git config user.name ${{ secrets.CI_USER }} - git add . - git commit -m "[Automated] Release" - tag_name="${{ steps.version.outputs.release_tag }}" - git tag -a $tag_name -m "stable release" - - - name: ๐Ÿ” Push to protected main branch - if: steps.version.outputs.release_tag != 'skip' - uses: CasperWA/push-protected@v2.10.0 - with: - token: ${{ secrets.ADMIN_TOKEN }} - branch: main - tags: true - unprotect_reviews: true - - - name: "โœ๏ธ Generate last changelog" - if: steps.version.outputs.release_tag != 'skip' - id: generate-last-changelog - uses: heinrichreimer/github-changelog-generator-action@v2.2 - with: - token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### ๐Ÿ“– Documentation","labels":["type: documentation"]},"tests":{"prefix":"### โœ… Testing","labels":["tests"]},"feature":{"prefix":"**๐Ÿ†• New features**", "labels":["type: feature"]},"breaking":{"prefix":"**๐Ÿ’ฅ Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**๐Ÿš€ Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**๐Ÿ› Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**โš ๏ธ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**๐Ÿ”€ Refactored code**", "labels":["refactor"]}}' - issues: false - issuesWoLabels: false - sinceTag: ${{ steps.version.outputs.last_release }} - maxIssues: 100 - pullRequests: true - prWoLabels: false - author: false - unreleased: true - compareLink: true - stripGeneratorNotice: true - verbose: true - futureRelease: ${{ steps.version.outputs.release_tag }} - excludeTagsRegex: "CI/.+" - releaseBranch: "main" - stripHeaders: true - base: 'none' - - - - name: ๐Ÿš€ Github Release - if: steps.version.outputs.release_tag != 'skip' - uses: ncipollo/release-action@v1 - with: - body: ${{ steps.generate-last-changelog.outputs.changelog }} - tag: ${{ steps.version.outputs.release_tag }} - token: ${{ secrets.ADMIN_TOKEN }} - - - name: โ˜  Delete Pre-release - if: steps.version.outputs.release_tag != 'skip' - uses: cb80/delrel@latest - with: - tag: "${{ steps.version.outputs.current_version }}" - - - name: ๐Ÿ” Merge main back to develop - if: steps.version.outputs.release_tag != 'skip' - uses: everlytic/branch-merge@1.1.0 - with: - github_token: ${{ secrets.ADMIN_TOKEN }} - source_ref: 'main' - target_branch: 'develop' - commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}' diff --git a/.github/workflows/test_build.yml b/.github/workflows/test_build.yml index ac7279117a..fd8e0e642d 100644 --- a/.github/workflows/test_build.yml +++ b/.github/workflows/test_build.yml @@ -1,7 +1,7 @@ # This workflow will upload a Python Package using Twine when a release is created # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries -name: Test Build +name: ๐Ÿ—๏ธ Test Build on: pull_request: @@ -18,7 +18,7 @@ jobs: runs-on: windows-latest strategy: matrix: - python-version: [3.7] + python-version: [3.9] steps: - name: ๐Ÿš› Checkout Code @@ -28,7 +28,7 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - + - name: ๐Ÿงต Install Requirements shell: pwsh run: | @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7] + python-version: [3.9] steps: - name: ๐Ÿš› Checkout Code @@ -64,27 +64,3 @@ jobs: run: | export SKIP_THIRD_PARTY_VALIDATION="1" ./tools/build.sh - - # MacOS-latest: - - # runs-on: macos-latest - # strategy: - # matrix: - # python-version: [3.7] - - # steps: - # - name: ๐Ÿš› Checkout Code - # uses: actions/checkout@v2 - - # - name: Set up Python - # uses: actions/setup-python@v2 - # with: - # python-version: ${{ matrix.python-version }} - - # - name: ๐Ÿงต Install Requirements - # run: | - # ./tools/create_env.sh - - # - name: ๐Ÿ”จ Build - # run: | - # ./tools/build.sh \ No newline at end of file diff --git a/.github/workflows/update_bug_report.yml b/.github/workflows/update_bug_report.yml new file mode 100644 index 0000000000..1e5da414bb --- /dev/null +++ b/.github/workflows/update_bug_report.yml @@ -0,0 +1,33 @@ +name: ๐Ÿž Update Bug Report + +on: + workflow_dispatch: + release: + # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#release + types: [published] + +jobs: + update-bug-report: + runs-on: ubuntu-latest + name: Update bug report + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.release.target_commitish }} + - name: Update version + uses: ynput/gha-populate-form-version@main + with: + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} + registry: github + dropdown: _version + limit_to: 100 + form: .github/ISSUE_TEMPLATE/bug_report.yml + commit_message: 'chore(): update bug report / version' + dry_run: no-push + + - name: Push to protected develop branch + uses: CasperWA/push-protected@v2.10.0 + with: + token: ${{ secrets.YNPUT_BOT_TOKEN }} + branch: develop + unprotect_reviews: true \ No newline at end of file diff --git a/.gitignore b/.gitignore index 18e7cd7bf2..50f52f65a3 100644 --- a/.gitignore +++ b/.gitignore @@ -112,3 +112,9 @@ tools/run_eventserver.* tools/dev_* .github_changelog_generator + + +# Addons +######## +/openpype/addons/* +!/openpype/addons/README.md diff --git a/.gitmodules b/.gitmodules index fe93791c4e..4de92471f7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,4 +4,7 @@ [submodule "tools/modules/powershell/PSWriteColor"] path = tools/modules/powershell/PSWriteColor - url = https://github.com/EvotecIT/PSWriteColor.git \ No newline at end of file + url = https://github.com/EvotecIT/PSWriteColor.git +[submodule "openpype/hosts/unreal/integration"] + path = openpype/hosts/unreal/integration + url = https://github.com/ynput/ayon-unreal-plugin.git diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..eec388924e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,12 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - id: no-commit-to-branch + args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-_]+)$).*' ] diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 0000000000..912780d803 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,77 @@ +# Architecture + +OpenPype is a monolithic Python project that bundles several parts, this document will try to give a birds eye overview of the project and, to a certain degree, each of the sub-projects. +The current file structure looks like this: + +``` +. +โ”œโ”€โ”€ common - Code in this folder is backend portion of Addon distribution logic for v4 server. +โ”œโ”€โ”€ docs - Documentation of the source code. +โ”œโ”€โ”€ igniter - The OpenPype bootstrapper, deals with running version resolution and setting up the connection to the mongodb. +โ”œโ”€โ”€ openpype - The actual OpenPype core package. +โ”œโ”€โ”€ schema - Collection of JSON files describing schematics of objects. This follows Avalon's convention. +โ”œโ”€โ”€ tests - Integration and unit tests. +โ”œโ”€โ”€ tools - Conveninece scripts to perform common actions (in both bash and ps1). +โ”œโ”€โ”€ vendor - When using the igniter, it deploys third party tools in here, such as ffmpeg. +โ””โ”€โ”€ website - Source files for https://openpype.io/ which is Docusaursus (https://docusaurus.io/). +``` + +The core functionality of the pipeline can be found in `igniter` and `openpype`, which in turn rely on the `schema` files, whenever you build (or download a pre-built) version of OpenPype, these two are bundled in there, and `Igniter` is the entry point. + + +## Igniter + +It's the setup and update tool for OpenPype, unless you want to package `openpype` separately and deal with all the config manually, this will most likely be your entry point. + +``` +igniter/ +โ”œโ”€โ”€ bootstrap_repos.py - Module that will find or install OpenPype versions in the system. +โ”œโ”€โ”€ __init__.py - Igniter entry point. +โ”œโ”€โ”€ install_dialog.py- Show dialog for choosing central pype repository. +โ”œโ”€โ”€ install_thread.py - Threading helpers for the install process. +โ”œโ”€โ”€ __main__.py - Like `__init__.py` ? +โ”œโ”€โ”€ message_dialog.py - Qt Dialog with a message and "Ok" button. +โ”œโ”€โ”€ nice_progress_bar.py - Fancy Qt progress bar. +โ”œโ”€โ”€ splash.txt - ASCII art for the terminal installer. +โ”œโ”€โ”€ stylesheet.css - Installer Qt styles. +โ”œโ”€โ”€ terminal_splash.py - Terminal installer animation, relies in `splash.txt`. +โ”œโ”€โ”€ tools.py - Collection of methods that don't fit in other modules. +โ”œโ”€โ”€ update_thread.py - Threading helper to update existing OpenPype installs. +โ”œโ”€โ”€ update_window.py - Qt UI to update OpenPype installs. +โ”œโ”€โ”€ user_settings.py - Interface for the OpenPype user settings. +โ””โ”€โ”€ version.py - Igniter's version number. +``` + +## OpenPype + +This is the main package of the OpenPype logic, it could be loosely described as a combination of [Avalon](https://getavalon.github.io), [Pyblish](https://pyblish.com/) and glue around those with custom OpenPype only elements, things are in progress of being moved around to better prepare for V4, which will be released under a new name AYON. + +``` +openpype/ +โ”œโ”€โ”€ client - Interface for the MongoDB. +โ”œโ”€โ”€ hooks - Hooks to be executed on certain OpenPype Applications defined in `openpype.lib.applications`. +โ”œโ”€โ”€ host - Base class for the different hosts. +โ”œโ”€โ”€ hosts - Integration with the different DCCs (hosts) using the `host` base class. +โ”œโ”€โ”€ lib - Libraries that stitch together the package, some have been moved into other parts. +โ”œโ”€โ”€ modules - OpenPype modules should contain separated logic of specific kind of implementation, such as Ftrack connection and its python API. +โ”œโ”€โ”€ pipeline - Core of the OpenPype pipeline, handles creation of data, publishing, etc. +โ”œโ”€โ”€ plugins - Global/core plugins for loader and publisher tool. +โ”œโ”€โ”€ resources - Icons, fonts, etc. +โ”œโ”€โ”€ scripts - Loose scipts that get run by tools/publishers. +โ”œโ”€โ”€ settings - OpenPype settings interface. +โ”œโ”€โ”€ style - Qt styling. +โ”œโ”€โ”€ tests - Unit tests. +โ”œโ”€โ”€ tools - Core tools, check out https://openpype.io/docs/artist_tools. +โ”œโ”€โ”€ vendor - Vendoring of needed required Python packes. +โ”œโ”€โ”€ widgets - Common re-usable Qt Widgets. +โ”œโ”€โ”€ action.py - LEGACY: Lives now in `openpype.pipeline.publish.action` Pyblish actions. +โ”œโ”€โ”€ cli.py - Command line interface, leverages `click`. +โ”œโ”€โ”€ __init__.py - Sets two constants. +โ”œโ”€โ”€ __main__.py - Entry point, calls the `cli.py` +โ”œโ”€โ”€ plugin.py - Pyblish plugins. +โ”œโ”€โ”€ pype_commands.py - Implementation of OpenPype commands. +โ””โ”€โ”€ version.py - Current version number. +``` + + + diff --git a/CHANGELOG.md b/CHANGELOG.md index 530622f491..882620f26c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,5855 @@ # Changelog + +## [3.15.10](https://github.com/ynput/OpenPype/tree/3.15.10) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.9...3.15.10) + +### **๐Ÿ†• New features** + + +
+ImageIO: Adding ImageIO activation toggle to all hosts #4700 + +Colorspace management can now be enabled at the project level, although it is disabled by default. Once enabled, all hosts will use the OCIO config file defined in the settings. If settings are disabled, the system switches to DCC's native color space management, and we do not store colorspace information at the representative level. + + +___ + +
+ + +
+Redshift Proxy Support in 3dsMax #4625 + +Redshift Proxy Support for 3dsMax. +- [x] Creator +- [x] Loader +- [x] Extractor +- [x] Validator +- [x] Add documentation + + +___ + +
+ + +
+Houdini farm publishing and rendering #4825 + +Deadline Farm publishing and Rendering for Houdini +- [x] Mantra +- [x] Karma(including usd renders) +- [x] Arnold +- [x] Elaborate Redshift ROP for deadline submission +- [x] fix the existing bug in Redshift ROP +- [x] Vray +- [x] add docs + + +___ + +
+ + +
+Feature: Blender hook to execute python scripts at launch #4905 + +Hook to allow hooks to add path to a python script that will be executed when Blender starts. + + +___ + +
+ + +
+Feature: Resolve: Open last workfile on launch through .scriptlib #5047 + +Added implementation to Resolve integration to open last workfile on launch. + + +___ + +
+ + +
+General: Remove default windowFlags from publisher #5089 + +The default windowFlags is making the publisher window (in Linux at least) only show the close button and it's frustrating as many times you just want to minimize the window and get back to the validation after. Removing that line I get what I'd expect.**Before:****After:** + + +___ + +
+ + +
+General: Show user who created the workfile on the details pane of workfile manager #5093 + +New PR for https://github.com/ynput/OpenPype/pull/5087, which was closed after merging `next-minor` branch and then realizing we don't need to target it as it was decided it's not required to support windows. More info on that PR discussion.Small addition to add name of the `user` who created the workfile on the details pane of the workfile manager: + + +___ + +
+ + +
+Loader: Hide inactive versions in UI #5100 + +Hide versions with `active` set to `False` in Loader UI. + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Maya: Repair RenderPass token when merging AOVs. #5055 + +Validator was flagging that `` was in the image prefix, but did not repair the issue. + + +___ + +
+ + +
+Maya: Improve error feedback when no renderable cameras exist for ASS family. #5092 + +When collecting cameras for `ass` family, this improves the error message when no cameras are renderable. + + +___ + +
+ + +
+Nuke: Custom script to set frame range of read nodes #5039 + +Adding option to set frame range specifically for the read nodes in Openpype Panel. User can set up their preferred frame range with the frame range dialog, which can be showed after clicking `Set Frame Range (Read Node)` in Openpype Tools + + +___ + +
+ + +
+Update extract review letterbox docs #5074 + +Update Extract Review - Letter Box section in Docs. Letterbox type description is removed. + + +___ + +
+ + +
+Project pack: Documents only skips roots validation #5082 + +Single roots validation is skipped if only documents are extracted. + + +___ + +
+ + +
+Nuke: custom settings for write node without publish #5084 + +Set Render Output and other settings to write nodes for non-publish purposes. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Maya: Deadline servers #5052 + +Fix working with multiple Deadline servers in Maya. +- Pools (primary and secondary) attributes were not recreated correctly. +- Order of collector plugins were wrong, so collected data was not injected into render instances. +- Server attribute was not converted to string so comparing with settings was incorrect. +- Improve debug logging for where the webservice url is getting fetched from. + + +___ + +
+ + +
+Maya: Fix Load Reference. #5091 + +Fix bug introduced with https://github.com/ynput/OpenPype/pull/4751 where `cmds.ls` returns a list. + + +___ + +
+ + +
+3dsmax: Publishing Deadline jobs from RedShift #4960 + +Fix the bug of being uable to publish deadline jobs from RedshiftUse Current File instead of Published Scene for just Redshift. +- add save scene before rendering to ensure the scene is saved after the modification. +- add separated aov files option to allow users to choose to have aovs in render output +- add validator for render publish to aovid overriding the previous renders + + +___ + +
+ + +
+Houdini: Fix missing frame range for pointcache and camera exports #5026 + +Fix missing frame range for pointcache and camera exports on published version. + + +___ + +
+ + +
+Global: collect_frame_fix plugin fix and cleanup #5064 + +Previous implementation https://github.com/ynput/OpenPype/pull/5036 was broken this is fixing the issue where attribute is found in instance data although the settings were disabled for the plugin. + + +___ + +
+ + +
+Hiero: Fix apply settings Clip Load #5073 + +Changed `apply_settings` to classmethod which fixes the issue with settings. + + +___ + +
+ + +
+Resolve: Make sure scripts dir exists #5078 + +Make sure the scripts directory exists before looping over it's content. + + +___ + +
+ + +
+removing info knob from nuke creators #5083 + +- removing instance node if removed via publisher +- removing info knob since it is not needed any more (was there only for the transition phase) + + +___ + +
+ + +
+Tray: Fix restart arguments on update #5085 + +Fix arguments on restart. + + +___ + +
+ + +
+Maya: bug fix on repair action in Arnold Scene Source CBID Validator #5096 + +Fix the bug of not being able to use repair action in Arnold Scene Source CBID Validator + + +___ + +
+ + +
+Nuke: batch of small fixes #5103 + +- default settings for `imageio.requiredNodes` **CreateWriteImage** +- default settings for **LoadImage** representations +- **Create** and **Publish** menu items with `parent=main_window` (version > 14) + + +___ + +
+ + +
+Deadline: make prerender check safer #5104 + +Prerender wasn't correctly recognized and was replaced with just 'render' family.In Nuke it is correctly `prerender.farm` in families, which wasn't handled here. It resulted into using `render` in templates even if `render` and `prerender` templates were split. + + +___ + +
+ + +
+General: Sort launcher actions alphabetically #5106 + +The launcher actions weren't being sorted by its label but its name (which on the case of the apps it's the version number) and thus the order wasn't consistent and we kept getting a different order on every launch. From my debugging session, this was the result of what the `actions` variable held after the `filter_compatible_actions` function before these changes: +``` +(Pdb) for p in actions: print(p.order, p.name) +0 14-02 +0 14-02 +0 14-02 +0 14-02 +0 14-02 +0 19-5-493 +0 2023 +0 3-41 +0 6-01 +```This caused already a couple bugs from our artists thinking they had launched Nuke X and instead launched Nuke and telling us their Nuke was missing nodes**Before:****After:** + + +___ + +
+ + +
+TrayPublisher: Editorial video stream discovery #5120 + +Editorial create plugin in traypublisher does not expect that first stream in input is video. + + +___ + +
+ +### **๐Ÿ”€ Refactored code** + + +
+3dsmax: Move from deprecated interface #5117 + +`INewPublisher` interface is deprecated, this PR is changing the use to `IPublishHost` instead. + + +___ + +
+ +### **Merged pull requests** + + +
+add movalex as a contributor for code #5076 + +Adds @movalex as a contributor for code. + +This was requested by mkolar [in this comment](https://github.com/ynput/OpenPype/pull/4916#issuecomment-1571498425) + +[skip ci] +___ + +
+ + +
+3dsmax: refactor load plugins #5079 + + +___ + +
+ + + + +## [3.15.9](https://github.com/ynput/OpenPype/tree/3.15.9) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.8...3.15.9) + +### **๐Ÿ†• New features** + + +
+Blender: Implemented Loading of Alembic Camera #4990 + +Implemented loading of Alembic cameras in Blender. + + +___ + +
+ + +
+Unreal: Implemented Creator, Loader and Extractor for Levels #5008 + +Creator, Loader and Extractor for Unreal Levels have been implemented. + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Blender: Added setting for base unit scale #4987 + +A setting for the base unit scale has been added for Blender.The unit scale is automatically applied when opening a file or creating a new one. + + +___ + +
+ + +
+Unreal: Changed naming and path of Camera Levels #5010 + +The levels created for the camera in Unreal now include `_camera` in the name, to be better identifiable, and are placed in the camera folder. + + +___ + +
+ + +
+Settings: Added option to nest settings templates #5022 + +It is possible to nest settings templates in another templates. + + +___ + +
+ + +
+Enhancement/publisher: Remove "hit play to continue" label on continue #5029 + +Remove "hit play to continue" message on continue so that it doesn't show anymore when play was clicked. + + +___ + +
+ + +
+Ftrack: Limit number of ftrack events to query at once #5033 + +Limit the amount of ftrack events received from mongo at once to 100. + + +___ + +
+ + +
+General: Small code cleanups #5034 + +Small code cleanup and updates. + + +___ + +
+ + +
+Global: collect frames to fix with settings #5036 + +Settings for `Collect Frames to Fix` will allow disable per project the plugin. Also `Rewriting latest version` attribute is hiddable from settings. + + +___ + +
+ + +
+General: Publish plugin apply settings can expect only project settings #5037 + +Only project settings are passed to optional `apply_settings` method, if the method expects only one argument. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Maya: Load Assembly fix invalid imports #4859 + +Refactors imports so they are now correct. + + +___ + +
+ + +
+Maya: Skipping rendersetup for members. #4973 + +When publishing a `rendersetup`, the objectset is and should be empty. + + +___ + +
+ + +
+Maya: Validate Rig Output IDs #5016 + +Absolute names of node were not used, so plugin did not fetch the nodes properly.Also missed pymel command. + + +___ + +
+ + +
+Deadline: escape rootless path in publish job #4910 + +If the publish path on Deadline job contains spaces or other characters, command was failing because the path wasn't properly escaped. This is fixing it. + + +___ + +
+ + +
+General: Company name and URL changed #4974 + +The current records were obsolete in inno_setup, changed to the up-to-date. +___ + +
+ + +
+Unreal: Fix usage of 'get_full_path' function #5014 + +This PR changes all the occurrences of `get_full_path` functions to alternatives to get the path of the objects. + + +___ + +
+ + +
+Unreal: Fix sequence frames validator to use correct data #5021 + +Fix sequence frames validator to use clipIn and clipOut data instead of frameStart and frameEnd. + + +___ + +
+ + +
+Unreal: Fix render instances collection to use correct data #5023 + +Fix render instances collection to use `frameStart` and `frameEnd` from the Project Manager, instead of the sequence's ones. + + +___ + +
+ + +
+Resolve: loader is opening even if no timeline in project #5025 + +Loader is opening now even no timeline is available in a project. + + +___ + +
+ + +
+nuke: callback for dirmapping is on demand #5030 + +Nuke was slowed down on processing due this callback. Since it is disabled by default it made sense to add it only on demand. + + +___ + +
+ + +
+Publisher: UI works with instances without label #5032 + +Publisher UI does not crash if instance don't have filled 'label' key in instance data. + + +___ + +
+ + +
+Publisher: Call explicitly prepared tab methods #5044 + +It is not possible to go to Create tab during publishing from OpenPype menu. + + +___ + +
+ + +
+Ftrack: Role names are not case sensitive in ftrack event server status action #5058 + +Event server status action is not case sensitive for role names of user. + + +___ + +
+ + +
+Publisher: Fix border widget #5063 + +Fixed border lines in Publisher UI to be painted correctly with correct indentation and size. + + +___ + +
+ + +
+Unreal: Fix Commandlet Project and Permissions #5066 + +Fix problem when creating an Unreal Project when Commandlet Project is in a protected location. + + +___ + +
+ + +
+Unreal: Added verification for Unreal app name format #5070 + +The Unreal app name is used to determine the Unreal version folder, so it is necessary that if follows the format `x-x`, where `x` is any integer. This PR adds a verification that the app name follows that format. + + +___ + +
+ +### **๐Ÿ“ƒ Documentation** + + +
+Docs: Display wrong image in ExtractOIIOTranscode #5045 + +Wrong image display in `https://openpype.io/docs/project_settings/settings_project_global#extract-oiio-transcode`. + + +___ + +
+ +### **Merged pull requests** + + +
+Drop-down menu to list all families in create placeholder #4928 + +Currently in the create placeholder window, we need to write the family manually. This replace the text field by an enum field with all families for the current software. + + +___ + +
+ + +
+add sync to specific projects or listen only #4919 + +Extend kitsu sync service with additional arguments to sync specific projects. + + +___ + +
+ + + + +## [3.15.8](https://github.com/ynput/OpenPype/tree/3.15.8) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.7...3.15.8) + +### **๐Ÿ†• New features** + + +
+Publisher: Show instances in report page #4915 + +Show publish instances in report page. Also added basic log view with logs grouped by instance. Validation error detail now have 2 colums, one with erro details second with logs. Crashed state shows fast access to report action buttons. Success will show only logs. Publish frame is shrunked automatically on publish stop. + + +___ + +
+ + +
+Fusion - Loader plugins updates #4920 + +Update to some Fusion loader plugins:The sequence loader can now load footage from the image and online family.The FBX loader can now import all formats Fusions FBX node can read.You can now import the content of another workfile into your current comp with the workfile loader. + + +___ + +
+ + +
+Fusion: deadline farm rendering #4955 + +Enabling Fusion for deadline farm rendering. + + +___ + +
+ + +
+AfterEffects: set frame range and resolution #4983 + +Frame information (frame start, duration, fps) and resolution (width and height) is applied to selected composition from Asset Management System (Ftrack or DB) automatically when published instance is created.It is also possible explicitly propagate both values from DB to selected composition by newly added menu buttons. + + +___ + +
+ + +
+Publish: Enhance automated publish plugin settings #4986 + +Added plugins option to define settings category where to look for settings of a plugin and added public helper functions to apply settings `get_plugin_settings` and `apply_plugin_settings_automatically`. + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Load Rig References - Change Rig to Animation in Animation instance #4877 + +We are using the template builder to build an animation scene. All the rig placeholders are imported correctly, but the automatically created animation instances retain the rig family in their names and subsets. In our example, we need animationMain instead of rigMain, because this name will be used in the following steps like lighting.Here is the result we need. I checked, and it's not a template builder problem, because even if I load a rig as a reference, the result is the same. For me, since we are in the animation instance, it makes more sense to have animation instead of rig in the name. The naming is just fine if we use create from the Openpype menu. + + +___ + +
+ + +
+Enhancement: Resolve prelaunch code refactoring and update defaults #4916 + +The main reason of this PR is wrong default settings in `openpype/settings/defaults/system_settings/applications.json` for Resolve host. The `bin` folder should not be a part of the macos and Linux `RESOLVE_PYTHON3_PATH` variable.The rest of this PR is some code cleanups for Resolve prelaunch hook to simplify further development.Also added a .gitignore for vscode workspace files. + + +___ + +
+ + +
+Unreal: ๐Ÿšš move Unreal plugin to separate repository #4980 + +To support Epic Marketplace have to move AYON Unreal integration plugins to separate repository. This is replacing current files with git submodule, so the change should be functionally without impact.New repository lives here: https://github.com/ynput/ayon-unreal-plugin + + +___ + +
+ + +
+General: Lib code cleanup #5003 + +Small cleanup in lib files in openpype. + + +___ + +
+ + +
+Allow to open with djv by extension instead of representation name #5004 + +Filter open in djv action by extension instead of representation. + + +___ + +
+ + +
+DJV open action `extensions` as `set` #5005 + +Change `extensions` attribute to `set`. + + +___ + +
+ + +
+Nuke: extract thumbnail with multiple reposition nodes #5011 + +Added support for multiple reposition nodes. + + +___ + +
+ + +
+Enhancement: Improve logging levels and messages for artist facing publish reports #5018 + +Tweak the logging levels and messages to try and only show those logs that an artist should see and could understand. Move anything that's slightly more involved into a "debug" message instead. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Bugfix/frame variable fix #4978 + +Renamed variables to match OpenPype terminology to reduce confusion and add consistency. +___ + +
+ + +
+Global: plugins cleanup plugin will leave beauty rendered files #4790 + +Attempt to mark more files to be cleaned up explicitly in intermediate `renders` folder in work area for farm jobs. + + +___ + +
+ + +
+Fix: Download last workfile doesn't work if not already downloaded #4942 + +Some optimization condition is messing with the feature: if the published workfile is not already downloaded, it won't download it... + + +___ + +
+ + +
+Unreal: Fix transform when loading layout to match existing assets #4972 + +Fixed transform when loading layout to match existing assets. + + +___ + +
+ + +
+fix the bug of fbx loaders in Max #4977 + +bug fix of fbx loaders for not being able to parent to the CON instances while importing cameras(and models) which is published from other DCCs such as Maya. + + +___ + +
+ + +
+AfterEffects: allow returning stub with not saved workfile #4984 + +Allows to use Workfile app to Save first empty workfile. + + +___ + +
+ + +
+Blender: Fix Alembic loading #4985 + +Fixed problem occurring when trying to load an Alembic model in Blender. + + +___ + +
+ + +
+Unreal: Addon Py2 compatibility #4994 + +Fixed Python 2 compatibility of unreal addon. + + +___ + +
+ + +
+Nuke: fixed missing files key in representation #4999 + +Issue with missing keys once rendering target set to existing frames is fixed. Instance has to be evaluated in validation for missing files. + + +___ + +
+ + +
+Unreal: Fix the frame range when loading camera #5002 + +The keyframes of the camera, when loaded, were not using the correct frame range. + + +___ + +
+ + +
+Fusion: fixing frame range targeting #5013 + +Frame range targeting at Rendering instances is now following configured options. + + +___ + +
+ + +
+Deadline: fix selection from multiple webservices #5015 + +Multiple different DL webservice could be configured. First they must by configured in System Settings., then they could be configured per project in `project_settings/deadline/deadline_servers`.Only single webservice could be a target of publish though. + + +___ + +
+ +### **Merged pull requests** + + +
+3dsmax: Refactored publish plugins to use proper implementation of pymxs #4988 + + +___ + +
+ + + + +## [3.15.7](https://github.com/ynput/OpenPype/tree/3.15.7) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.6...3.15.7) + +### **๐Ÿ†• New features** + + +
+Addons directory #4893 + +This adds a directory for Addons, for easier distribution of studio specific code. + + +___ + +
+ + +
+Kitsu - Add "image", "online" and "plate" to review families #4923 + +This PR adds "image", "online" and "plate" to the review families so they also can be uploaded to Kitsu.It also adds the `Add review to Kitsu` tag to the default png review. Without it the user would manually need to add it for single image uploads to Kitsu and might confuse users (it confused me first for a while as movies did work). + + +___ + +
+ + +
+Feature/remove and load inv action #4930 + +Added the ability to remove and load a container, as a way to reset it.This can be useful in cases where a container breaks in a way that can be fixed by removing it, then reloading it.Also added the ability to add `InventoryAction` plugins by placing them in `openpype/plugins/inventory`. + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Load Rig References - Change Rig to Animation in Animation instance #4877 + +We are using the template builder to build an animation scene. All the rig placeholders are imported correctly, but the automatically created animation instances retain the rig family in their names and subsets. In our example, we need animationMain instead of rigMain, because this name will be used in the following steps like lighting.Here is the result we need. I checked, and it's not a template builder problem, because even if I load a rig as a reference, the result is the same. For me, since we are in the animation instance, it makes more sense to have animation instead of rig in the name. The naming is just fine if we use create from the Openpype menu. + + +___ + +
+ + +
+Maya template builder - preserve all references when importing a template #4797 + +When building a template with Maya template builder, we import the template and also the references inside the template file. This causes some problems: +- We cannot use the references to version assets imported by the template. +- When we import the file, the internal reference files are also imported. As a side effect, Maya complains about a reference that no longer exists.`// Error: file: /xxx/maya/2023.3/linux/scripts/AETemplates/AEtransformRelated.mel line 58: Reference node 'turntable_mayaSceneMain_01_RN' is not associated with a reference file.` + + +___ + +
+ + +
+Unreal: Renaming the integration plugin to Ayon. #4646 + +Renamed the .h, and .cpp files to Ayon. Also renamed the classes to with the Ayon keyword. + + +___ + +
+ + +
+3dsMax: render dialogue needs to be closed #4729 + +Make sure the render setup dialog is in a closed state for the update of resolution and other render settings + + +___ + +
+ + +
+Maya Template Builder - Remove default cameras from renderable cameras #4815 + +When we build an asset workfile with build workfile from template inside Maya, we load our turntable camera. But then we end up with 2 renderables camera : **persp** the one imported from the template.We need to remove the **persp** camera (or any other default camera) from renderable cameras when building the work file. + + +___ + +
+ + +
+Validators for Frame Range in Max #4914 + +Switch Render Frame Range Type to 3 for specific ranges (initial setup for the range type is 4)Reset Frame Range will also set the frame range for render settingsRender Collector won't take the frame range from context data but take the range directly from render settingAdd validators for render frame range type and frame range respectively with repair action + + +___ + +
+ + +
+Fusion: Saver creator settings #4943 + +Adding Saver creator settings and enhanced rendering path with template. + + +___ + +
+ + +
+General: Project Anatomy on creators #4962 + +Anatomy object of current project is available on `CreateContext` and create plugins. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Maya: Validate shader name - OP-5903 #4971 + +Running the plugin would error with: +``` +// TypeError: 'str' object cannot be interpreted as an integer +```Fixed and added setting `active`. + + +___ + +
+ + +
+Houdini: Fix slow Houdini launch due to shelves generation #4829 + +Shelf generation during Houdini startup would add an insane amount of delay for the Houdini UI to launch correctly. By deferring the shelf generation this takes away the 5+ minutes of delay for the Houdini UI to launch. + + +___ + +
+ + +
+Fusion - Fixed "optional validation" #4912 + +Added OptionalPyblishPluginMixin and is_active checks for all publish tools that should be optional + + +___ + +
+ + +
+Bug: add missing `pyblish.util` import #4937 + +remote publishing was missing import of `remote_publish`. This is adding it back. + + +___ + +
+ + +
+Unreal: Fix missing 'object_path' property #4938 + +Epic removed the `object_path` property from `AssetData`. This PR fixes usages of that property.Fixes #4936 + + +___ + +
+ + +
+Remove obsolete global validator #4939 + +Removing `Validate Sequence Frames` validator from global plugins as it wasn't handling correctly many things and was by mistake enabled, breaking functionality on Deadline. + + +___ + +
+ + +
+General: fix build_workfile get_linked_assets missing project_name arg #4940 + +Linked assets collection don't work within `build_workfile` because `get_linked_assets` function call has a missing `project_name`argument. +- Added the `project_name` arg to the `get_linked_assets` function call. + + +___ + +
+ + +
+General: fix Scene Inventory switch version error dialog missing parent arg on init #4941 + +QuickFix for the switch version error dialog to set inventory widget as parent. + + +___ + +
+ + +
+Unreal: Fix camera frame range #4956 + +Fix the frame range of the level sequence for the Camera in Unreal. + + +___ + +
+ + +
+Unreal: Fix missing parameter when updating Alembic StaticMesh #4957 + +Fix an error when updating an Alembic StaticMesh in Unreal, due to a missing parameter in a function call. + + +___ + +
+ + +
+Unreal: Fix render extraction #4963 + +Fix a problem with the extraction of renders in Unreal. + + +___ + +
+ + +
+Unreal: Remove Python 3.8 syntax from addon #4965 + +Removed Python 3.8 syntax from addon. + + +___ + +
+ + +
+Ftrack: Fix editorial task creation #4966 + +Fix key assignment on instance data during editorial publishing in ftrack hierarchy integration. + + +___ + +
+ +### **Merged pull requests** + + +
+Add "shortcut" to Scripts Menu Definition #4927 + +Add the possibility to associate a shorcut for an entry in the script menu definition with the key "shortcut" + + +___ + +
+ + + + +## [3.15.6](https://github.com/ynput/OpenPype/tree/3.15.6) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.5...3.15.6) + +### **๐Ÿ†• New features** + + +
+Substance Painter Integration #4283 + +This implements a part of #4205 by implementing a Substance Painter integration + +Status: +- [x] Implement Host +- [x] start substance with last workfile using `AddLastWorkfileToLaunchArgs` prelaunch hook +- [x] Implement Qt tools +- [x] Implement loaders +- [x] Implemented a Set project mesh loader (this is relatively special case because a Project will always have exactly one mesh - a Substance Painter project cannot exist without a mesh). +- [x] Implement project open callback +- [x] On project open it notifies the user if the loaded model is outdated +- [x] Implement publishing logic +- [x] Workfile publishing +- [x] Export Texture Sets +- [x] Support OCIO using #4195 (draft brach is set up - see comment) +- [ ] Likely needs more testing on the OCIO front +- [x] Validate all outputs of the Export template are exported/generated +- [x] Allow validation to be optional **(issue: there's no API method to detect what maps will be exported without doing an actual export to disk)** +- [x] Support extracting/integration if not all outputs are generated +- [x] Support multiple materials/texture sets per instance +- [ ] Add validator that can enforce only a single texture set output if studio prefers that. +- [ ] Implement Export File Format (extensions) override in Creator +- [ ] Add settings so Admin can choose which extensions are available. + + +___ + +
+ + +
+Data Exchange: Geometry in 3dsMax #4555 + +Introduces and updates a creator, extractors and loaders for model family + +Introduces new creator, extractors and loaders for model family while adding model families into the existing max scene loader and extractor +- [x] creators +- [x] adding model family into max scene loader and extractor +- [x] fbx loader +- [x] fbx extractor +- [x] usd loader +- [x] usd extractor +- [x] validator for model family +- [x] obj loader(update function) +- [x] fix the update function of the loader as #4675 +- [x] Add documentation + + +___ + +
+ + +
+AfterEffects: add review flag to each instance #4884 + +Adds `mark_for_review` flag to the Creator to allow artists to disable review if necessary.Exposed this flag in Settings, by default set to True (eg. same behavior as previously). + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Houdini: Fix Validate Output Node (VDB) #4819 + +- Removes plug-in that was a duplicate of this plug-in. +- Optimize logging of many prims slightly +- Fix error reporting like https://github.com/ynput/OpenPype/pull/4818 did + + +___ + +
+ + +
+Houdini: Add null node as output indicator when using TAB search #4834 + + +___ + +
+ + +
+Houdini: Don't error in collect review if camera is not set correctly #4874 + +Do not raise an error in collector when invalid path is set as camera path. Allow camera path to not be set correctly in review instance until validation so it's nicely shown in a validation report. + + +___ + +
+ + +
+Project packager: Backup and restore can store only database #4879 + +Pack project functionality have option to zip only project database without project files. Unpack project can skip project copy if the folder is not found.Added helper functions to `openpype.client.mongo` that can be also used for tests as replacement of mongo dump. + + +___ + +
+ + +
+Houdini: ExtractOpenGL for Review instance not optional #4881 + +Don't make ExtractOpenGL optional for review instance optional. + + +___ + +
+ + +
+Publisher: Small style changes #4894 + +Small changes in styles and form of publisher UI. + + +___ + +
+ + +
+Houdini: Workfile icon in new publisher #4898 + +Fix icon for the workfile instance in new publisher + + +___ + +
+ + +
+Fusion: Simplify creator icons code #4899 + +Simplify code for setting the icons for the Fusion creators + + +___ + +
+ + +
+Enhancement: Fix PySide 6.5 support for loader #4900 + +Fixes PySide 6.5 support in Loader. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Maya: Validate Attributes #4917 + +This plugin was broken due to bad fetching of data and wrong repair action. + + +___ + +
+ + +
+Fix: Locally copied version of last published workfile is not incremented #4722 + +### Fix 1 +When copied, the local workfile version keeps the published version number, when it must be +1 to follow OP's naming convention. + +### Fix 2 +Local workfile version's name is built from anatomy. This avoids to get workfiles with their publish template naming. + +### Fix 3 +In the case a subset has at least two tasks with published workfiles, for example `Modeling` and `Rigging`, launching `Rigging` was getting the first one with the `next` and trying to find representations, therefore `workfileModeling` and trying to match the current `task_name` (`Rigging`) with the `representation["context"]["task"]["name"]` of a Modeling representation, which was ending up to a `workfile_representation` to `None`, and exiting the process. + +Trying to find the `task_name` in the `subset['name']` fixes it. + +### Fix 4 +Fetch input dependencies of workfile. + +Replacing https://github.com/ynput/OpenPype/pull/4102 for changes to bring this home. +___ + +
+ + +
+Maya: soft-fail when pan/zoom locked on camera when playblasting #4929 + +When pan/zoom enabled attribute on camera is locked, playblasting with pan/zoom fails because it is trying to restore it. This is fixing it by skipping over with warning. + + +___ + +
+ +### **Merged pull requests** + + +
+Maya Load References - Add Display Handle Setting #4904 + +When we load a reference in Maya using OpenPype loader, display handle is checked by default and prevent us to select easily the object in the viewport. I understand that some productions like to keep this option, so I propose to add display handle to the reference loader settings. + + +___ + +
+ + +
+Photoshop: add autocreators for review and flat image #4871 + +Review and flatten image (produced when no instance of `image` family was created) were created somehow magically. This PRintroduces two new auto creators which allow artists to disable review or flatten image.For all `image` instances `Review` flag was added to provide functionality to create separate review per `image` instance. Previously was possible only to have separate instance of `review` family.Review is not enabled on `image` family by default. (Eg. follows original behavior)Review auto creator is enabled by default as it was before.Flatten image creator must be set in Settings in `project_settings/photoshop/create/AutoImageCreator`. + + +___ + +
+ + + + +## [3.15.5](https://github.com/ynput/OpenPype/tree/3.15.5) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.4...3.15.5) + +### **๐Ÿš€ Enhancements** + + +
+Maya: Playblast profiles #4777 + +Support playblast profiles.This enables studios to customize what playblast settings should be on a per task and/or subset basis. For example `modeling` should have `Wireframe On Shaded` enabled, while all other tasks should have it disabled. + + +___ + +
+ + +
+Maya: Support .abc files directly for Arnold standin look assignment #4856 + +If `.abc` file is loaded into arnold standin support look assignment through the `cbId` attributes in the alembic file. + + +___ + +
+ + +
+Maya: Hide animation instance in creator #4872 + +- Hide animation instance in creator +- Add inventory action to recreate animation publish instance for loaded rigs + + +___ + +
+ + +
+Unreal: Render Creator enhancements #4477 + +Improvements to the creator for render family + +This PR introduces some enhancements to the creator for the render family in Unreal Engine: +- Added the option to create a new, empty sequence for the render. +- Added the option to not include the whole hierarchy for the selected sequence. +- Improvements of the error messages. + + +___ + +
+ + +
+Unreal: Added settings for rendering #4575 + +Added settings for rendering in Unreal Engine. + +Two settings has been added: +- Pre roll frames, to set how many frames are used to load the scene before starting the actual rendering. +- Configuration path, to allow to save a preset of settings from Unreal, and use it for rendering. + + +___ + +
+ + +
+Global: Optimize anatomy formatting by only formatting used templates instead #4784 + +Optimization to not format full anatomy when only a single template is used. Instead format only the single template instead. + + +___ + +
+ + +
+Patchelf version locked #4853 + +For Centos dockerfile it is necessary to lock the patchelf version to the older, otherwise the build process fails. + +___ + +
+ + +
+Houdini: Implement `switch` method on loaders #4866 + +Implement `switch` method on loaders + + +___ + +
+ + +
+Code: Tweak docstrings and return type hints #4875 + +Tweak docstrings and return type hints for functions in `openpype.client.entities`. + + +___ + +
+ + +
+Publisher: Clear comment on successful publish and on window close #4885 + +Clear comment text field on successful publish and on window close. + + +___ + +
+ + +
+Publisher: Make sure to reset asset widget when hidden and reshown #4886 + +Make sure to reset asset widget when hidden and reshown. Without this the asset list would never refresh in the set asset widget when changing context on an existing instance and thus would not show new assets from after the first time launching that widget. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Maya: Fix nested model instances. #4852 + +Fix nested model instance under review instance, where data collection was not including "Display Lights" and "Focal Length". + + +___ + +
+ + +
+Maya: Make default namespace naming backwards compatible #4873 + +Namespaces of loaded references are now _by default_ back to what they were before #4511 + + +___ + +
+ + +
+Nuke: Legacy convertor skips deprecation warnings #4846 + +Nuke legacy convertor was triggering deprecated function which is causing a lot of logs which slows down whole process. Changed the convertor to skip all nodes without `AVALON_TAB` to avoid the warnings. + + +___ + +
+ + +
+3dsmax: move startup script logic to hook #4849 + +Startup script for OpenPype was interfering with Open Last Workfile feature. Moving this loggic from simple command line argument in the Settings to pre-launch hook is solving the order of command line arguments and making both features work. + + +___ + +
+ + +
+Maya: Don't change time slider ranges in `get_frame_range` #4858 + +Don't change time slider ranges in `get_frame_range` + + +___ + +
+ + +
+Maya: Looks - calculate hash for tx texture #4878 + +Texture hash is calculated for textures used in published look and it is used as key in dictionary. In recent changes, this hash is not calculated for TX files, resulting in `None` value as key in dictionary, crashing publishing. This PR is adding texture hash for TX files to solve that issue. + + +___ + +
+ + +
+Houdini: Collect `currentFile` context data separate from workfile instance #4883 + +Fix publishing without an active workfile instance due to missing `currentFile` data.Now collect `currentFile` into context in houdini through context plugin no matter the active instances. + + +___ + +
+ + +
+Nuke: fixed broken slate workflow once published on deadline #4887 + +Slate workflow is now working as expected and Validate Sequence Frames is not raising the once slate frame is included. + + +___ + +
+ + +
+Add fps as instance.data in collect review in Houdini. #4888 + +fix the bug of failing to publish extract review in HoudiniOriginal error: +```python + File "OpenPype\build\exe.win-amd64-3.9\openpype\plugins\publish\extract_review.py", line 516, in prepare_temp_data + "fps": float(instance.data["fps"]), +KeyError: 'fps' +``` + + +___ + +
+ + +
+TrayPublisher: Fill missing data for instances with review #4891 + +Fill required data to instance in traypublisher if instance has review family. The data are required by ExtractReview and it would be complicated to do proper fix at this moment! The collector does for review instances what did https://github.com/ynput/OpenPype/pull/4383 + + +___ + +
+ + +
+Publisher: Keep track about current context and fix context selection widget #4892 + +Change selected context to current context on reset. Fix bug when context widget is re-enabled. + + +___ + +
+ + +
+Scene inventory: Model refresh fix with cherry picking #4895 + +Fix cherry pick issue in scene inventory. + + +___ + +
+ + +
+Nuke: Pre-render and missing review flag on instance causing crash #4897 + +If instance created in nuke was missing `review` flag, collector crashed. + + +___ + +
+ +### **Merged pull requests** + + +
+After Effects: fix handles KeyError #4727 + +Sometimes when publishing with AE (we only saw this error on AE 2023), we got a KeyError for the handles in the "Collect Workfile" step. So I did get the handles from the context if ther's no handles in the asset entity. + + +___ + +
+ + + + +## [3.15.4](https://github.com/ynput/OpenPype/tree/3.15.4) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.3...3.15.4) + +### **๐Ÿ†• New features** + + +
+Maya: Cant assign shaders to the ass file - OP-4859 #4460 + +Support AiStandIn nodes for look assignment. + +Using operators we assign shaders and attribute/parameters to nodes within standins. Initially there is only support for a limited mount of attributes but we can add support as needed; +``` +primaryVisibility +castsShadows +receiveShadows +aiSelfShadows +aiOpaque +aiMatte +aiVisibleInDiffuseTransmission +aiVisibleInSpecularTransmission +aiVisibleInVolume +aiVisibleInDiffuseReflection +aiVisibleInSpecularReflection +aiSubdivUvSmoothing +aiDispHeight +aiDispPadding +aiDispZeroValue +aiStepSize +aiVolumePadding +aiSubdivType +aiSubdivIterations +``` + + +___ + +
+ + +
+Maya: GPU cache representation #4649 + +Implement GPU cache for model, animation and pointcache. + + +___ + +
+ + +
+Houdini: Implement review family with opengl node #3839 + +Implements a first pass for Reviews publishing in Houdini. Resolves #2720 + +Uses the `opengl` ROP node to produce PNG images. + + +___ + +
+ + +
+Maya: Camera focal length visible in review - OP-3278 #4531 + +Camera focal length visible in review. + +Support camera focal length in review; static and dynamic.Resolves #3220 + + +___ + +
+ + +
+Maya: Defining plugins to load on Maya start - OP-4994 #4714 + +Feature to define plugins to load on Maya launch. + + +___ + +
+ + +
+Nuke, DL: Returning Suspended Publishing attribute #4715 + +Old Nuke Publisher's feature for suspended publishing job on render farm was added back to the current Publisher. + + +___ + +
+ + +
+Settings UI: Allow setting a size hint for text fields #4821 + +Text entity have `minimum_lines_count` which allows to change minimum size hint of UI input. + + +___ + +
+ + +
+TrayPublisher: Move 'BatchMovieCreator' settings to 'create' subcategory #4827 + +Moved settings for `BatchMoviewCreator` into subcategory `create` in settings. Changes are made to match other hosts settings chema and structure. + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Maya looks: support for native Redshift texture format #2971 + +Add support for native Redshift textures handling. Closes #2599 + +Uses Redshift's Texture Processor executable to convert textures being used in renders to the Redshift ".rstexbin" format. + + +___ + +
+ + +
+Maya: custom namespace for references #4511 + +Adding an option in Project Settings > Maya > Loader plugins to set custom namespace. If no namespace is set, the default one is used. + + +___ + +
+ + +
+Maya: Set correct framerange with handles on file opening #4664 + +Set the range of playback from the asset data, counting handles, to get the correct data when calling the "collect_animation_data" function. + + +___ + +
+ + +
+Maya: Fix camera update #4751 + +Fix resetting any modelPanel to a different camera when loading a camera and updating. + + +___ + +
+ + +
+Maya: Remove single assembly validation for animation instances #4840 + +Rig groups may now be parented to others groups when `includeParentHierarchy` attribute on the instance is "off". + + +___ + +
+ + +
+Maya: Optional control of display lights on playblast. #4145 + +Optional control of display lights on playblast. + +Giving control to what display lights are on the playblasts. + + +___ + +
+ + +
+Kitsu: note family requirements #4551 + +Allowing to add family requirements to `IntegrateKitsuNote` task status change. + +Adds a `Family requirements` setting to `Integrate Kitsu Note`, so you can add requirements to determine if kitsu task status should be changed based on which families are published or not. For instance you could have the status change only if another subset than workfile is published (but workfile can still be included) by adding an item set to `Not equal` and `workfile`. + + +___ + +
+ + +
+Deactivate closed Kitsu projects on OP #4619 + +Deactivate project on OP when the project is closed on Kitsu. + + +___ + +
+ + +
+Maya: Suggestion to change capture labels. #4691 + +Change capture labels. + + +___ + +
+ + +
+Houdini: Change node type for OpenPypeContext `null` -> `subnet` #4745 + +Change the node type for OpenPype's hidden context node in Houdini from `null` to `subnet`. This fixes #4734 + + +___ + +
+ + +
+General: Extract burnin hosts filters #4749 + +Removed hosts filter from ExtractBurnin plugin. Instance without representations won't cause crash but just skip the instance. We've discovered because Blender already has review but did not create burnins. + + +___ + +
+ + +
+Global: Improve speed of Collect Custom Staging Directory #4768 + +Improve speed of Collect Custom Staging Directory. + + +___ + +
+ + +
+General: Anatomy templates formatting #4773 + +Added option to format only single template from anatomy instead of formatting all of them all the time. Formatting of all templates is causing slowdowns e.g. during publishing of hundreds of instances. + + +___ + +
+ + +
+Harmony: Handle zip files with deeper structure #4782 + +External Harmony zip files might contain one additional level with scene name. + + +___ + +
+ + +
+Unreal: Use common logic to configure executable #4788 + +Unreal Editor location and version was autodetected. This easied configuration in some cases but was not flexible enought. This PR is changing the way Unreal Editor location is set, unifying it with the logic other hosts are using. + + +___ + +
+ + +
+Github: Grammar tweaks + uppercase issue title #4813 + +Tweak some of the grammar in the issue form templates. + + +___ + +
+ + +
+Houdini: Allow creation of publish instances via Houdini TAB menu #4831 + +Register the available Creator's as houdini tools so an artist can add publish instances via the Houdini TAB node search menu from within the network editor. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Maya: Fix Collect Render for V-Ray, Redshift and Renderman for missing colorspace #4650 + +Fix Collect Render not working for Redshift, V-Ray and Renderman due to missing `colorspace` argument to `RenderProduct` dataclass. + + +___ + +
+ + +
+Maya: Xgen fixes #4707 + +Fix for Xgen extraction of world parented nodes and validation for required namespace. + + +___ + +
+ + +
+Maya: Fix extract review and thumbnail for Maya 2020 #4744 + +Fix playblasting in Maya 2020 with override viewport options enabled. Fixes #4730. + + +___ + +
+ + +
+Maya: local variable 'arnold_standins' referenced before assignment - OP-5542 #4778 + +MayaLookAssigner erroring when MTOA is not loaded: +``` +# Traceback (most recent call last): +# File "\openpype\hosts\maya\tools\mayalookassigner\app.py", line 272, in on_process_selected +# nodes = list(set(item["nodes"]).difference(arnold_standins)) +# UnboundLocalError: local variable 'arnold_standins' referenced before assignment +``` + + +___ + +
+ + +
+Maya: Fix getting view and display in Maya 2020 - OP-5035 #4795 + +The `view_transform` returns a different format in Maya 2020. Fixes #4540 (hopefully). + + +___ + +
+ + +
+Maya: Fix Look Maya 2020 Py2 support for Extract Look #4808 + +Fix Extract Look supporting python 2.7 for Maya 2020. + + +___ + +
+ + +
+Maya: Fix Validate Mesh Overlapping UVs plugin #4816 + +Fix typo in the code where a maya command returns a `list` instead of `str`. + + +___ + +
+ + +
+Maya: Fix tile rendering with Vray - OP-5566 #4832 + +Fixes tile rendering with Vray. + + +___ + +
+ + +
+Deadline: checking existing frames fails when there is number in file name #4698 + +Previous implementation of validator failed on files with any other number in rendered file names.Used regular expression pattern now handles numbers in the file names (eg "Main_beauty.v001.1001.exr", "Main_beauty_v001.1001.exr", "Main_beauty.1001.1001.exr") but not numbers behind frames (eg. "Main_beauty.1001.v001.exr") + + +___ + +
+ + +
+Maya: Validate Render Settings. #4735 + +Fixes error message when using attribute validation. + + +___ + +
+ + +
+General: Hero version sites recalculation #4737 + +Sites recalculation in integrate hero version did expect that it is integrated exactly same amount of files as in previous integration. This is not the case in many cases, so the sites recalculation happens in a different way, first are prepared all sites from previous representation files, and all of them are added to each file in new representation. + + +___ + +
+ + +
+Houdini: Fix collect current file #4739 + +Fixes the Workfile publishing getting added into every instance being published from Houdini + + +___ + +
+ + +
+Global: Fix Extract Burnin + Colorspace functions for conflicting python environments with PYTHONHOME #4740 + +This fixes the running of openpype processes from e.g. a host with conflicting python versions that had `PYTHONHOME` said additionally to `PYTHONPATH`, like e.g. Houdini Py3.7 together with OpenPype Py3.9 when using Extract Burnin for a review in #3839This fix applies to Extract Burnin and some of the colorspace functions that use `run_openpype_process` + + +___ + +
+ + +
+Harmony: render what is in timeline in Harmony locally #4741 + +Previously it wasn't possible to render according to what was set in Timeline in scene start/end, just by what it was set in whole timeline.This allows artist to override what is in DB with what they require (with disabled `Validate Scene Settings`). Now artist can extend scene by additional frames, that shouldn't be rendered, but which might be desired.Removed explicit set scene settings (eg. applying frames and resolution directly to the scene after launch), added separate menu item to allow artist to do it themselves. + + +___ + +
+ + +
+Maya: Extract Review settings add Use Background Gradient #4747 + +Add Display Gradient Background toggle in settings to fix support for setting flat background color for reviews. + + +___ + +
+ + +
+Nuke: publisher is offering review on write families on demand #4755 + +Original idea where reviewable toggle will be offered in publisher on demand is fixed and now `review` attribute can be disabled in settings. + + +___ + +
+ + +
+Workfiles: keep Browse always enabled #4766 + +Browse might make sense even if there are no workfiles present, actually in that case it makes the most sense (eg. I want to locate workfile from outside - from Desktop for example). + + +___ + +
+ + +
+Global: label key in instance data is optional #4779 + +Collect OTIO review plugin is not crashing if `label` key is missing in instance data. + + +___ + +
+ + +
+Loader: Fix missing variable #4781 + +There is missing variable `handles` in loader tool after https://github.com/ynput/OpenPype/pull/4746. The variable was renamed to `handles_label` and is initialized to `None` if handles are not available. + + +___ + +
+ + +
+Nuke: Workfile Template builder fixes #4783 + +Popup window after Nuke start is not showing. Knobs with X/Y coordination on nodes where were converted from placeholders are not added if `keepPlaceholders` is witched off. + + +___ + +
+ + +
+Maya: Add family filter 'review' to burnin profile with focal length #4791 + +Avoid profile burnin with `focalLength` key for renders, but use only for playblast reviews. + + +___ + +
+ + +
+add farm instance to the render collector in 3dsMax #4794 + +bug fix for the failure of submitting publish job in 3dsmax + + +___ + +
+ + +
+Publisher: Plugin active attribute is respected #4798 + +Publisher consider plugin's `active` attribute, so the plugin is not processed when `active` is set to `False`. But we use the attribute in `OptionalPyblishPluginMixin` for different purposes, so I've added hack bypass of the active state validation when plugin inherit from the mixin. This is temporary solution which cannot be changed until all hosts use Publisher otherwise global plugins would be broken. Also plugins which have `enabled` set to `False` are filtered out -> this happened only when automated settings were applied and the settings contained `"enabled"` key se to `False`. + + +___ + +
+ + +
+Nuke: settings and optional attribute in publisher for some validators #4811 + +New publisher is supporting optional switch for plugins which is offered in Publisher in Right panel. Some plugins were missing this switch and also settings which would offer the optionality. + + +___ + +
+ + +
+Settings: Version settings popup fix #4822 + +Version completer popup have issues on some platforms, this should fix those edge cases. Also fixed issue when completer stayed shown fater reset (save). + + +___ + +
+ + +
+Hiero/Nuke: adding monitorOut key to settings #4826 + +New versions of Hiero were introduced with new colorspace property for Monitor Out. It have been added into project settings. Also added new config names into settings enumerator option. + + +___ + +
+ + +
+Nuke: removed default workfile template builder preset #4835 + +Default for workfile template builder should have been empty. + + +___ + +
+ + +
+TVPaint: Review can be made from any instance #4843 + +Add `"review"` tag to output of extract sequence if instance is marked for review. At this moment only instances with family `"review"` were able to define input for `ExtractReview` plugin which is not right. + + +___ + +
+ +### **๐Ÿ”€ Refactored code** + + +
+Deadline: Remove unused FramesPerTask job info submission #4657 + +Remove unused `FramesPerTask` job info submission to Deadline. + + +___ + +
+ + +
+Maya: Remove pymel dependency #4724 + +Refactors code written using `pymel` to use standard maya python libraries instead like `maya.cmds` or `maya.api.OpenMaya` + + +___ + +
+ + +
+Remove "preview" data from representation #4759 + +Remove "preview" data from representation + + +___ + +
+ + +
+Maya: Collect Review cleanup code for attached subsets #4720 + +Refactor some code for Maya: Collect Review for attached subsets. + + +___ + +
+ + +
+Refactor: Remove `handles`, `edit_in` and `edit_out` backwards compatibility #4746 + +Removes backward compatibiliy fallback for data called `handles`, `edit_in` and `edit_out`. + + +___ + +
+ +### **๐Ÿ“ƒ Documentation** + + +
+Bump webpack from 5.69.1 to 5.76.1 in /website #4624 + +Bumps [webpack](https://github.com/webpack/webpack) from 5.69.1 to 5.76.1. +
+Release notes +

Sourced from webpack's releases.

+
+

v5.76.1

+

Fixed

+
    +
  • Added assert/strict built-in to NodeTargetPlugin
  • +
+

Revert

+ +

v5.76.0

+

Bugfixes

+ +

Features

+ +

Security

+ +

Repo Changes

+ +

New Contributors

+ +

Full Changelog: https://github.com/webpack/webpack/compare/v5.75.0...v5.76.0

+

v5.75.0

+

Bugfixes

+
    +
  • experiments.* normalize to false when opt-out
  • +
  • avoid NaN%
  • +
  • show the correct error when using a conflicting chunk name in code
  • +
  • HMR code tests existance of window before trying to access it
  • +
  • fix eval-nosources-* actually exclude sources
  • +
  • fix race condition where no module is returned from processing module
  • +
  • fix position of standalong semicolon in runtime code
  • +
+

Features

+
    +
  • add support for @import to extenal CSS when using experimental CSS in node
  • +
+ +
+

... (truncated)

+
+
+Commits + +
+
+Maintainer changes +

This version was pushed to npm by evilebottnawi, a new releaser for webpack since your current version.

+
+
+ + +[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=webpack&package-manager=npm_and_yarn&previous-version=5.69.1&new-version=5.76.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) + +Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. + +[//]: # (dependabot-automerge-start) +[//]: # (dependabot-automerge-end) + +--- + +
+Dependabot commands and options +
+ +You can trigger Dependabot actions by commenting on this PR: +- `@dependabot rebase` will rebase this PR +- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it +- `@dependabot merge` will merge this PR after your CI passes on it +- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it +- `@dependabot cancel merge` will cancel a previously requested merge and block automerging +- `@dependabot reopen` will reopen this PR if it is closed +- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually +- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) +- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language +- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language +- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language +- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language + +You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). + +
+___ + +
+ + +
+Documentation: Add Extract Burnin documentation #4765 + +Add documentation for Extract Burnin global plugin settings. + + +___ + +
+ + +
+Documentation: Move publisher related tips to publisher area #4772 + +Move publisher related tips for After Effects artist documentation to the correct position. + + +___ + +
+ + +
+Documentation: Add extra terminology to the key concepts glossary #4838 + +Tweak some of the key concepts in the documentation. + + +___ + +
+ +### **Merged pull requests** + + +
+Maya: Refactor Extract Look with dedicated processors for maketx #4711 + +Refactor Maya extract look to fix some issues: +- [x] Allow Extraction with maketx with OCIO Color Management enabled in Maya. +- [x] Fix file hashing so it includes arguments to maketx, so that when arguments change it correctly generates a new hash +- [x] Fix maketx destination colorspace when OCIO is enabled +- [x] Use pre-collected colorspaces of the resources instead of trying to retrieve again in Extract Look +- [x] Fix colorspace attributes being reinterpreted by maya on export (fix remapping) - goal is to resolve #2337 +- [x] Fix support for checking config path of maya default OCIO config (due to using `lib.get_color_management_preferences` which remaps that path) +- [x] Merged in #2971 to refactor MakeTX into TextureProcessor and also support generating Redshift `.rstexbin` files. - goal is to resolve #2599 +- [x] Allow custom arguments to `maketx` from OpenPype Settings like mentioned here by @fabiaserra for arguments like: `--monochrome-detect`, `--opaque-detect`, `--checknan`. +- [x] Actually fix the code and make it work. :) (I'll try to keep below checkboxes in sync with my code changes) +- [x] Publishing without texture processor should work (no maketx + no rstexbin) +- [x] Publishing with maketx should work +- [x] Publishing with rstexbin should work +- [x] Test it. (This is just me doing some test-runs, please still test the PR!) + + +___ + +
+ + +
+Maya template builder load all assets linked to the shot #4761 + +Problem +All the assets of the ftrack project are loaded and not those linked to the shot + +How get error +Open maya in the context of shot, then build a new scene with the "Build Workfile from template" button in "OpenPype" menu. +![image](https://user-images.githubusercontent.com/7068597/229124652-573a23d7-a2b2-4d50-81bf-7592c00d24dc.png) + + +___ + +
+ + +
+Global: Do not force instance data with frame ranges of the asset #4383 + +This aims to resolve #4317 + + +___ + +
+ + +
+Cosmetics: Fix some grammar in docstrings and messages (and some code) #4752 + +Tweak some grammar in codebase + + +___ + +
+ + +
+Deadline: Submit publish job fails due root work hardcode - OP-5528 #4775 + +Generating config templates was hardcoded to `root[work]`. This PR fixes that. + + +___ + +
+ + +
+CreateContext: Added option to remove Unknown attributes #4776 + +Added option to remove attributes with UnkownAttrDef on instances. Pop of key will also remove the attribute definition from attribute values, so they're not recreated again. + + +___ + +
+ + + +## [3.15.3](https://github.com/ynput/OpenPype/tree/3.15.3) + + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.2...3.15.3) + +### **๐Ÿ†• New features** + + +
+Blender: Extract Review #3616 + +Added Review to Blender. + +This implementation is based on #3508 but made compatible for the current implementation of OpenPype for Blender. + + +___ + +
+ + +
+Data Exchanges: Point Cloud for 3dsMax #4532 + +Publish PRT format with tyFlow in 3dsmax + +Publish PRT format with tyFlow in 3dsmax and possibly set up loader to load the format too. +- [x] creator +- [x] extractor +- [x] validator +- [x] loader + + +___ + +
+ + +
+Global: persistent staging directory for renders #4583 + +Allows configure if staging directory (`stagingDir`) should be persistent with use of profiles. + +With this feature, users can specify a transient data folder path based on presets, which can be used during the creation and publishing stages. In some cases, these DCCs automatically add a rendering path during the creation stage, which is then used in publishing.One of the key advantages of this feature is that it allows users to take advantage of faster storages for rendering, which can help improve workflow efficiency. Additionally, this feature allows users to keep their rendered data persistent, and use their own infrastructure for regular cleaning.However, it should be noted that some productions may want to use this feature without persistency. Furthermore, there may be a need for retargeting the rendering folder to faster storages, which is also not supported at the moment.It is studio responsibility to clean up obsolete folders with data.Location of the folder is configured in `project_anatomy/templates/others`. ('transient' key is expected, with 'folder' key, could be more templates)Which family/task type/subset is applicable is configured in:`project_settings/global/tools/publish/transient_dir_profiles` + + +___ + +
+ + +
+Kitsu custom comment template #4599 + +Kitsu allows to write markdown in its comment field. This can be something very powerful to deliver dynamic comments with the help the data from the instance.This feature is defaults to off so the admin have to manually set up the comment field the way they want.I have added a basic example on how the comment can look like as the comment-fields default value.To this I want to add some documentation also but that's on its way when the code itself looks good for the reviewers. + + +___ + +
+ + +
+MaxScene Family #4615 + +Introduction of the Max Scene Family + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Maya: Multiple values on single render attribute - OP-4131 #4631 + +When validating render attributes, this adds support for multiple values. When repairing first value in list is used. + + +___ + +
+ + +
+Maya: enable 2D Pan/Zoom for playblasts - OP-5213 #4687 + +Setting for enabling 2D Pan/Zoom on reviews. + + +___ + +
+ + +
+Copy existing or generate new Fusion profile on prelaunch #4572 + +Fusion preferences will be copied to the predefined `~/.openpype/hosts/fusion/prefs` folder (or any other folder set in system settings) on launch. + +The idea is to create a copy of existing Fusion profile, adding an OpenPype menu to the Fusion instance.By default the copy setting is turned off, so no file copying is performed. Instead the clean Fusion profile is created by Fusion in the predefined folder. The default locaion is set to `~/.openpype/hosts/fusion/prefs`, to better comply with the other os platforms. After creating the default profile, some modifications are applied: +- forced Python3 +- forced English interface +- setup Openpype specific path maps.If the `copy_prefs` checkbox is toggled, a copy of existing Fusion profile folder will be placed in the mentioned location. Then they are altered the same way as described above. The operation is run only once, on the first launch, unless the `force_sync [Resync profile on each launch]` is toggled.English interface is forced because the `FUSION16_PROFILE_DIR` environment variable is not read otherwise (seems to be a Fusion bug). + + +___ + +
+ + +
+Houdini: Create button open new publisher's "create" tab #4601 + +During a talk with @maxpareschi he mentioned that the new publisher in Houdini felt super confusing due to "Create" going to the older creator but now being completely empty and the publish button directly went to the publish tab.This resolves that by fixing the Create button to now open the new publisher but on the Create tab.Also made publish button enforce going to the "publish" tab for consistency in usage.@antirotor I think changing the Create button's callback was just missed in this commit or was there a specific reason to not change that around yet? + + +___ + +
+ + +
+Clockify: refresh and fix the integration #4607 + +Due to recent API changes, Clockify requires `user_id` to operate with the timers. I updated this part and currently it is a WIP for making it fully functional. Most functions, such as start and stop timer, and projects sync are currently working. For the rate limiting task new dependency is added: https://pypi.org/project/ratelimiter/ + + +___ + +
+ + +
+Fusion publish existing frames #4611 + +This PR adds the function to publish existing frames instead of having to re-render all of them for each new publish.I have split the render_locally plugin so the review-part is its own plugin now.I also change the saver-creator-plugin's label from Saver to Render (saver) as I intend to add a Prerender creator like in Nuke. + + +___ + +
+ + +
+Resolution settings referenced from DB record for 3dsMax #4652 + +- Add Callback for setting the resolution according to DB after the new scene is created. +- Add a new Action into openpype menu which allows the user to reset the resolution in 3dsMax + + +___ + +
+ + +
+3dsmax: render instance settings in Publish tab #4658 + +Allows user preset the pools, group and use_published settings in Render Creator in the Max Hosts.User can set the settings before or after creating instance in the new publisher + + +___ + +
+ + +
+scene length setting referenced from DB record for 3dsMax #4665 + +Setting the timeline length based on DB record in 3dsMax Hosts + + +___ + +
+ + +
+Publisher: Windows reduce command window pop-ups during Publishing #4672 + +Reduce the command line pop-ups that show on Windows during publishing. + + +___ + +
+ + +
+Publisher: Explicit save #4676 + +Publisher have explicit button to save changes, so reset can happen without saving any changes. Save still happens automatically when publishing is started or on publisher window close. But a popup is shown if context of host has changed. Important context was enhanced by workfile path (if host integration supports it) so workfile changes are captured too. In that case a dialog with confirmation is shown to user. All callbacks that may require save of context were moved to main window to be able handle dialog show at one place. Save changes now returns success so the rest of logic is skipped -> publishing won't start, when save of instances fails.Save and reset buttons have shortcuts (Ctrl + s and Ctrls + r). + + +___ + +
+ + +
+CelAction: conditional workfile parameters from settings #4677 + +Since some productions were requesting excluding some workfile parameters from publishing submission, we needed to move them to settings so those could be altered per project. + + +___ + +
+ + +
+Improve logging of used app + tool envs on application launch #4682 + +Improve logging of what apps + tool environments got loaded for an application launch. + + +___ + +
+ + +
+Fix name and docstring for Create Workdir Extra Folders prelaunch hook #4683 + +Fix class name and docstring for Create Workdir Extra Folders prelaunch hookThe class name and docstring were originally copied from another plug-in and didn't match the plug-in logic.This also fixes potentially seeing this twice in your logs. Before:After:Where it was actually running both this prelaunch hook and the actual `AddLastWorkfileToLaunchArgs` plugin. + + +___ + +
+ + +
+Application launch context: Include app group name in logger #4684 + +Clarify in logs better what app group the ApplicationLaunchContext belongs to and what application is being launched.Before:After: + + +___ + +
+ + +
+increment workfile version 3dsmax #4685 + +increment workfile version in 3dsmax as if in blender and maya hosts. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+Maya: Fix getting non-active model panel. #2968 + +When capturing multiple cameras with image planes that have file sequences playing, only the active (first) camera will play through the file sequence. + + +___ + +
+ + +
+Maya: Fix broken review publishing. #4549 + +Resolves #4547 + + +___ + +
+ + +
+Maya: Avoid error on right click in Loader if `mtoa` is not loaded #4616 + +Fix an error on right clicking in the Loader when `mtoa` is not a loaded plug-in.Additionally if `mtoa` isn't loaded the loader will now load the plug-in before trying to create the arnold standin. + + +___ + +
+ + +
+Maya: Fix extract look colorspace detection #4618 + +Fix the logic which guesses the colorspace using `arnold` python library. +- Previously it'd error if `mtoa` was not available on path so it still required `mtoa` to be available. +- The guessing colorspace logic doesn't actually require `mtoa` to be loaded, but just the `arnold` python library to be available. This changes the logic so it doesn't require the `mtoa` plugin to get loaded to guess the colorspace. +- The if/else branch was likely not doing what was intended `cmds.loadPlugin("mtoa", quiet=True)` returns None if the plug-in was already loaded. So this would only ever be true if it ends up loading the `mtoa` plugin the first time. +```python +# Tested in Maya 2022.1 +print(cmds.loadPlugin("mtoa", quiet=True)) +# ['mtoa'] +print(cmds.loadPlugin("mtoa", quiet=True)) +# None +``` + + +___ + +
+ + +
+Maya: Maya Playblast Options overrides - OP-3847 #4634 + +When publishing a review in Maya, the extractor would fail due to wrong (long) panel name. + + +___ + +
+ + +
+Bugfix/op 2834 fix extract playblast #4701 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Bugfix/op 2834 fix extract playblast #4704 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Maya: bug fix for passing zoom settings if review is attached to subset #4716 + +Fix for attaching review to subset with pan/zoom option. + + +___ + +
+ + +
+Maya: tile assembly fail in draft - OP-4820 #4416 + +Tile assembly in Deadline was broken. + +Initial bug report revealed other areas of the tile assembly that needed fixing. + + +___ + +
+ + +
+Maya: Yeti Validate Rig Input - OP-3454 #4554 + +Fix Yeti Validate Rig Input + +Existing workflow was broken due to this #3297. + + +___ + +
+ + +
+Scene inventory: Fix code errors when "not found" entries are found #4594 + +Whenever a "NOT FOUND" entry is present a lot of errors happened in the Scene Inventory: +- It started spamming a lot of errors for the VersionDelegate since it had no numeric version (no version at all).Error reported on Discord: +```python +Traceback (most recent call last): + File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 65, in paint + text = self.displayText( + File "C:\Users\videopro\Documents\github\OpenPype\openpype\tools\utils\delegates.py", line 33, in displayText + assert isinstance(value, numbers.Integral), ( +AssertionError: Version is not integer. "None" +``` +- Right click menu would error on NOT FOUND entries, and thus not show. With this PR it will now _disregard_ not found items for "Set version" and "Remove" but still allow actions.This PR resolves those. + + +___ + +
+ + +
+Kitsu: Sync OP with zou, make sure value-data is int or float #4596 + +Currently the data zou pulls is a string and not a value causing some bugs in the pipe where a value is expected (like `Set frame range` in Fusion). + + + +This PR makes sure each value is set with int() or float() so these bugs can't happen later on. + + + +_(A request to cgwire has also bin sent to allow force values only for some metadata columns, but currently the user can enter what ever they want in there)_ + + +___ + +
+ + +
+Max: fix the bug of removing an instance #4617 + +fix the bug of removing an instance in 3dsMax + + +___ + +
+ + +
+Global | Nuke: fixing farm publishing workflow #4623 + +After Nuke had adopted new publisher with new creators new issues were introduced. Those issues were addressed with this PR. Those are for example broken reviewable video files publishing if published via farm. Also fixed local publishing. + + +___ + +
+ + +
+Ftrack: Ftrack additional families filtering #4633 + +Ftrack family collector makes sure the subset family is also in instance families for additional families filtering. + + +___ + +
+ + +
+Ftrack: Hierarchical <> Non-Hierarchical attributes sync fix #4635 + +Sync between hierarchical and non-hierarchical attributes should be fixed and work as expected. Action should sync the values as expected and event handler should do it too and only on newly created entities. + + +___ + +
+ + +
+bugfix for 3dsmax publishing error #4637 + +fix the bug of failing publishing job in 3dsMax + + +___ + +
+ + +
+General: Use right validation for ffmpeg executable #4640 + +Use ffmpeg exec validation for ffmpeg executables instead of oiio exec validation. The validation is used as last possible source of ffmpeg from `PATH` environment variables, which is an edge case but can cause issues. + + +___ + +
+ + +
+3dsmax: opening last workfile #4644 + +Supports opening last saved workfile in 3dsmax host. + + +___ + +
+ + +
+Fixed a bug where a QThread in the splash screen could be destroyed before finishing execution #4647 + +This should fix the occasional behavior of the QThread being destroyed before even its worker returns from the `run()` function.After quiting, it should wait for the QThread object to properly close itself. + + +___ + +
+ + +
+General: Use right plugin class for Collect Comment #4653 + +Collect Comment plugin is instance plugin so should inherit from `InstancePlugin` instead of `ContextPlugin`. + + +___ + +
+ + +
+Global: add tags field to thumbnail representation #4660 + +Thumbnail representation might be missing tags field. + + +___ + +
+ + +
+Integrator: Enforce unique destination transfers, disallow overwrites in queued transfers #4662 + +Fix #4656 by enforcing unique destination transfers in the Integrator. It's now disallowed to a destination in the file transaction queue with a new source path during the publish. + + +___ + +
+ + +
+Hiero: Creator with correct workfile numeric padding input #4666 + +Creator was showing 99 in workfile input for long time, even if users set default value to 1001 in studio settings. This has been fixed now. + + +___ + +
+ + +
+Nuke: Nukenodes family instance without frame range #4669 + +No need to add frame range data into `nukenodes` (backdrop) family publishes - since those are timeless. + + +___ + +
+ + +
+TVPaint: Optional Validation plugins can be de/activated by user #4674 + +Added `OptionalPyblishPluginMixin` to TVpaint plugins that can be optional. + + +___ + +
+ + +
+Kitsu: Slightly less strict with instance data #4678 + +- Allow to take task name from context if asset doesn't have any. Fixes an issue with Photoshop's review instance not having `task` in data. +- Allow to match "review" against both `instance.data["family"]` and `instance.data["families"]` because some instances don't have the primary family in families, e.g. in Photoshop and TVPaint. +- Do not error on Integrate Kitsu Review whenever for whatever reason Integrate Kitsu Note did not created a comment but just log the message that it was unable to connect a review. + + +___ + +
+ + +
+Publisher: Fix reset shortcut sequence #4694 + +Fix bug created in https://github.com/ynput/OpenPype/pull/4676 where key sequence is checked using unsupported method. The check was changed to convert event into `QKeySequence` object which can be compared to prepared sequence. + + +___ + +
+ + +
+Refactor _capture #4702 + +Paragraphs contain detailed information on the changes made to the product or service, providing an in-depth description of the updates and enhancements. They can be used to explain the reasoning behind the changes, or to highlight the importance of the new features. Paragraphs can often include links to further information or support documentation. + + +___ + +
+ + +
+Hiero: correct container colors if UpToDate #4708 + +Colors on loaded containers are now correctly identifying real state of version. `Red` for out of date and `green` for up to date. + + +___ + +
+ +### **๐Ÿ”€ Refactored code** + + +
+Look Assigner: Move Look Assigner tool since it's Maya only #4604 + +Fix #4357: Move Look Assigner tool to maya since it's Maya only + + +___ + +
+ + +
+Maya: Remove unused functions from Extract Look #4671 + +Remove unused functions from Maya Extract Look plug-in + + +___ + +
+ + +
+Extract Review code refactor #3930 + +Trying to reduce complexity of Extract Review plug-in +- Re-use profile filtering from lib +- Remove "combination families" additional filtering which supposedly was from OP v2 +- Simplify 'formatting' for filling gaps +- Use `legacy_io.Session` over `os.environ` + + +___ + +
+ + +
+Maya: Replace last usages of Qt module #4610 + +Replace last usage of `Qt` module with `qtpy`. This change is needed for `PySide6` support. All changes happened in Maya loader plugins. + + +___ + +
+ + +
+Update tests and documentation for `ColormanagedPyblishPluginMixin` #4612 + +Refactor `ExtractorColormanaged` to `ColormanagedPyblishPluginMixin` in tests and documentation. + + +___ + +
+ + +
+Improve logging of used app + tool envs on application launch (minor tweak) #4686 + +Use `app.full_name` for change done in #4682 + + +___ + +
+ +### **๐Ÿ“ƒ Documentation** + + +
+Docs/add architecture document #4344 + +Add `ARCHITECTURE.md` document. + +his document attemps to give a quick overview of the project to help onboarding, it's not an extensive documentation but more of a elevator pitch one-line descriptions of files/directories and what the attempt to do. + + +___ + +
+ + +
+Documentation: Tweak grammar and fix some typos #4613 + +This resolves some grammar and typos in the documentation.Also fixes the extension of some images in after effects docs which used uppercase extension even though files were lowercase extension. + + +___ + +
+ + +
+Docs: Fix some minor grammar/typos #4680 + +Typo/grammar fixes in documentation. + + +___ + +
+ +### **Merged pull requests** + + +
+Maya: Implement image file node loader #4313 + +Implements a loader for loading texture image into a `file` node in Maya. + +Similar to Maya's hypershade creation of textures on load you have the option to choose for three modes of creating: +- Texture +- Projection +- StencilThese should match what Maya generates if you create those in Maya. +- [x] Load and manage file nodes +- [x] Apply color spaces after #4195 +- [x] Support for _either_ UDIM or image sequence - currently it seems to always load sequences as UDIM automatically. +- [ ] Add support for animation sequences of UDIM textures using the `..exr` path format? + + +___ + +
+ + +
+Maya Look Assigner: Don't rely on containers for get all assets #4600 + +This resolves #4044 by not actually relying on containers in the scene but instead just rely on finding nodes with `cbId` attributes. As such, imported nodes would also be found and a shader can be assigned (similar to when using get from selection).**Please take into consideration the potential downsides below**Potential downsides would be: +- IF an already loaded look has any dagNodes, say a 3D Projection node - then that will also show up as a loaded asset where previously nodes from loaded looks were ignored. +- If any dag nodes were created locally - they would have gotten `cbId` attributes on scene save and thus the current asset would almost always show? + + +___ + +
+ + +
+Maya: Unify menu labels for "Set Frame Range" and "Set Resolution" #4605 + +Fix #4109: Unify menu labels for "Set Frame Range" and "Set Resolution"This also tweaks it in Houdini from Reset Frame Range to Set Frame Range. + + +___ + +
+ + +
+Resolve missing OPENPYPE_MONGO in deadline global job preload #4484 + +In the GlobalJobPreLoad plugin, we propose to replace the SpawnProcess by a sub-process and to pass the environment variables in the parameters, since the SpawnProcess under Centos Linux does not pass the environment variables. + +In the GlobalJobPreLoad plugin, the Deadline SpawnProcess is used to start the OpenPype process. The problem is that the SpawnProcess does not pass environment variables, including OPENPYPE_MONGO, to the process when it is under Centos7 linux, and the process gets stuck. We propose to replace it by a subprocess and to pass the variable in the parameters. + + +___ + +
+ + +
+Tests: Added setup_only to tests #4591 + +Allows to download test zip, unzip and restore DB in preparation for new test. + + +___ + +
+ + +
+Maya: Arnold don't reset maya timeline frame range on render creation (or setting render settings) #4603 + +Fix #4429: Do not reset fps or playback timeline on applying or creating render settings + + +___ + +
+ + +
+Bump @sideway/formula from 3.0.0 to 3.0.1 in /website #4609 + +Bumps [@sideway/formula](https://github.com/sideway/formula) from 3.0.0 to 3.0.1. +
+Commits + +
+
+Maintainer changes +

This version was pushed to npm by marsup, a new releaser for @โ€‹sideway/formula since your current version.

+
+
+ + +[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@sideway/formula&package-manager=npm_and_yarn&previous-version=3.0.0&new-version=3.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) + +Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. + +[//]: # (dependabot-automerge-start) +[//]: # (dependabot-automerge-end) + +--- + +
+Dependabot commands and options +
+ +You can trigger Dependabot actions by commenting on this PR: +- `@dependabot rebase` will rebase this PR +- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it +- `@dependabot merge` will merge this PR after your CI passes on it +- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it +- `@dependabot cancel merge` will cancel a previously requested merge and block automerging +- `@dependabot reopen` will reopen this PR if it is closed +- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually +- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) +- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language +- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language +- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language +- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language + +You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). + +
+___ + +
+ + +
+Update artist_hosts_maya_arnold.md #4626 + +Correct Arnold docs. +___ + +
+ + +
+Maya: Add "Include Parent Hierarchy" option in animation creator plugin #4645 + +Add an option in Project Settings > Maya > Creator Plugins > Create Animation to include (or not) parent hierarchy. This is to avoid artists to check manually the option for all create animation. + + +___ + +
+ + +
+General: Filter available applications #4667 + +Added option to filter applications that don't have valid executable available in settings in launcher and ftrack actions. This option can be disabled in new settings category `Applications`. The filtering is by default disabled. + + +___ + +
+ + +
+3dsmax: make sure that startup script executes #4695 + +Fixing reliability of OpenPype startup in 3dsmax. + + +___ + +
+ + +
+Project Manager: Change minimum frame start/end to '0' #4719 + +Project manager can have frame start/end set to `0`. + + +___ + +
+ + + +## [3.15.2](https://github.com/ynput/OpenPype/tree/3.15.2) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.1...3.15.2) + +### **๐Ÿ†• New features** + + +
+maya gltf texture convertor and validator #4261 + +Continuity of the gltf extractor implementation + +Continuity of the gltf extractor https://github.com/pypeclub/OpenPype/pull/4192UPDATE:**Validator for GLSL Shader**: Validate whether the mesh uses GLSL Shader. If not it will error out. The user can choose to perform the repair action and it will help to assign glsl shader. If the mesh with Stringray PBS, the repair action will also check to see if there is any linked texture such as Color, Occulsion, and Normal Map. If yes, it will help to relink the related textures to the glsl shader.*****If the mesh uses the PBS Shader, + + +___ + +
+ + +
+Unreal: New Publisher #4370 + +Implementation of the new publisher for Unreal. + +The implementation of the new publisher for Unreal. This PR includes the changes for all the existing creators to be compatible with the new publisher.The basic creator has been split in two distinct creators: +- `UnrealAssetCreator`, works with assets in the Content Browser. +- `UnrealActorCreator` that works with actors in the scene. + + +___ + +
+ + +
+Implementation of a new splash screen #4592 + +Implemented a new splash screen widget to reflect a process running in the background. This widget can be used for other tasks than UE. **Also fixed the compilation error of the AssetContainer.cpp when trying to build the plugin in UE 5.0** + + +___ + +
+ + +
+Deadline for 3dsMax #4439 + +Setting up deadline for 3dsmax + +Setting up deadline for 3dsmax by setting render outputs and viewport camera + + +___ + +
+ + +
+Nuke: adding nukeassist #4494 + +Adding support for NukeAssist + +For support of NukeAssist we had to limit some Nuke features since NukeAssist itself Nuke with limitations. We do not support Creator and Publisher. User can only Load versions with version control. User can also set Framerange and Colorspace. + + +___ + +
+ +### **๐Ÿš€ Enhancements** + + +
+Maya: OP-2630 acescg maya #4340 + +Resolves #2712 + + +___ + +
+ + +
+Default Ftrack Family on RenderLayer #4458 + +With default settings, renderlayers in Maya were not being tagged with the Ftrack family leading to confusion when doing reviews. + + +___ + +
+ + +
+Maya: Maya Playblast Options - OP-3783 #4487 + +Replacement PR for #3912. Adds more options for playblasts to preferences/settings. + +Adds the following as options in generating playblasts, matching viewport settings. +- Use default material +- Wireframe on shaded +- X-ray +- X-ray Joints +- X-ray active component + + +___ + +
+ + +
+Maya: Passing custom attributes to alembic - OP-4111 #4516 + +Passing custom attributes to alembic + +This PR makes it possible to pass all user defined attributes along to the alembic representation. + + +___ + +
+ + +
+Maya: Options for VrayProxy output - OP-2010 #4525 + +Options for output of VrayProxy. + +Client requested more granular control of output from VrayProxy instance. Exposed options on the instance and settings for vrmesh and alembic. + + +___ + +
+ + +
+Maya: Validate missing instance attributes #4559 + +Validate missing instance attributes. + +New attributes can be introduced as new features come in. Old instances will need to be updated with these attributes for the documentation to make sense, and users do not have to recreate the instances. + + +___ + +
+ + +
+Refactored Generation of UE Projects, installation of plugins moved to the engine #4369 + +Improved the way how OpenPype works with generation of UE projects. Also the installation of the plugin has been altered to install into the engine + +OpenPype now uses the appropriate tools to generate UE projects. Unreal Build Tool (UBT) and a "Commandlet Project" is needed to properly generate a BP project, or C++ code in case that `dev_mode = True`, folders, the .uproject file and many other resources.On the plugin's side, it is built seperately with the UnrealAutomationTool (UAT) and then it's contents are moved under the `Engine/Plugins/Marketplace/OpenPype` directory. + + +___ + +
+ + +
+Unreal: Use client functions in Layout loader #4578 + +Use 'get_representations' instead of 'legacy_io' query in layout loader. + +This is removing usage of `find_one` called on `legacy_io` and use rather client functions as preparation for AYON connection. Also all representations are queried at once instead of one by one. + + +___ + +
+ + +
+General: Support for extensions filtering in loaders #4492 + +Added extensions filtering support to loader plugins. + +To avoid possible backwards compatibility break is filtering exactly the same and filtering by extensions is enabled only if class attribute 'extensions' is set. + + +___ + +
+ + +
+Nuke: multiple reformat in baking review profiles #4514 + +Added support for multiple reformat nodes in baking profiles. + +Old settings for single reformat node is supported and prioritised just in case studios are using it and backward compatibility is needed. Warnings in Nuke terminal are notifying users to switch settings to new workflow. Settings are also explaining the migration way. + + +___ + +
+ + +
+Nuke: Add option to use new creating system in workfile template builder #4545 + +Nuke workfile template builder can use new creators instead of legacy creators. + +Modified workfile template builder to have option to say if legacy creators should be used or new creators. Legacy creators are disabled by default, so Maya has changed the value. + + +___ + +
+ + +
+Global, Nuke: Workfile first version with template processing #4579 + +Supporting new template workfile builder with toggle for creation of first version of workfile in case there is none yet. + + +___ + +
+ + +
+Fusion: New Publisher #4523 + +This is an updated PR for @BigRoy 's old PR (https://github.com/ynput/OpenPype/pull/3892).I have merged it with code from OP 3.15.1-nightly.6 and made sure it works as expected.This converts the old publishing system to the new one. It implements Fusion as a new host addon. + + +- Create button removed in OpenPype menu in favor of the new Publisher +- Draft refactor validations to raise PublishValidationError +- Implement Creator for New Publisher +- Implement Fusion as Host addon + + +___ + +
+ + +
+TVPaint: Use Publisher tool #4471 + +Use Publisher tool and new creation system in TVPaint integration. + +Using new creation system makes TVPaint integration a little bit easier to maintain for artists. Removed unneeded tools Creator and Subset Manager tools. Goal is to keep the integration work as close as possible to previous integration. Some changes were made but primarilly because they were not right using previous system.All creators create instance with final family instead of changing the family during extraction. Render passes are not related to group id but to render layer instance. Render layer is still related to group. Workfile, review and scene render instances are created using autocreators instead of auto-collection during publishing. Subset names are fully filled during publishing but instance labels are filled on refresh with the last known right value. Implemented basic of legacy convertor which should convert render layers and render passes. + + +___ + +
+ + +
+TVPaint: Auto-detect render creation #4496 + +Create plugin which will create Render Layer and Render Pass instances based on information in the scene. + +Added new creator that must be triggered by artist. The create plugin will first create Render Layer instances if were not created yet. For variant is used color group name. The creator has option to rename color groups by template defined in settings -> Template may use index of group by it's usage in scene (from bottom to top). After Render Layers will create Render Passes. Render Pass is created for each individual TVPaint layer in any group that had created Render Layer. It's name is used as variant (pass). + + +___ + +
+ + +
+TVPaint: Small enhancements #4501 + +Small enhancements in TVPaint integration which did not get to https://github.com/ynput/OpenPype/pull/4471. + +It was found out that `opacity` returned from `tv_layerinfo` is always empty and is dangerous to add it to layer information. Added information about "current" layer to layers information. Disable review of Render Layer and Render Pass instances by default. In most of productions is used only "scene review". Skip usage of `"enabled"` key from settings in automated layer/pass creation. + + +___ + +
+ + +
+Global: color v3 global oiio transcoder plugin #4291 + +Implements possibility to use `oiiotool` to transcode image sequences from one color space to another(s). + +Uses collected `colorspaceData` information about source color spaces, these information needs to be collected previously in each DCC interested in color management.Uses profiles configured in Settings to create single or multiple new representations (and file extensions) with different color spaces.New representations might replace existing one, each new representation might contain different tags and custom tags to control its integration step. + + +___ + +
+ + +
+Deadline: Added support for multiple install dirs in Deadline #4451 + +SearchDirectoryList returns FIRST existing so if you would have multiple OP install dirs, it won't search for appropriate version in later ones. + + +___ + +
+ + +
+Ftrack: Upload reviewables with original name #4483 + +Ftrack can integrate reviewables with original filenames. + +As ftrack have restrictions about names of components the only way how to achieve the result was to upload the same file twice, one with required name and one with origin name. + + +___ + +
+ + +
+TVPaint: Ignore transparency in Render Pass #4499 + +It is possible to ignore layers transparency during Render Pass extraction. + +Render pass extraction does not respect opacity of TVPaint layers set in scene during extraction. It can be enabled/disabled in settings. + + +___ + +
+ + +
+Anatomy: Preparation for different root overrides #4521 + +Prepare Anatomy to handle only 'studio' site override on it's own. + +Change how Anatomy fill root overrides based on requested site name. The logic which decide what is active site was moved to sync server addon and the same for receiving root overrides of local site. The Anatomy resolve only studio site overrides anything else is handled by sync server. BaseAnatomy only expect root overrides value and does not need site name. Validation of site name happens in sync server same as resolving if site name is local or not. + + +___ + +
+ + +
+Nuke | Global: colormanaged plugin in collection #4556 + +Colormanaged extractor had changed to Mixin class so it can be added to any stage of publishing rather then just to Exctracting.Nuke is no collecting colorspaceData to representation collected on already rendered images. + +Mixin class can no be used as secondary parent in publishing plugins. + + +___ + +
+ +### **๐Ÿ› Bug fixes** + + +
+look publishing and srgb colorspace in maya #4276 + +Check the OCIO color management is enabled before doing linearize colorspace for converting the texture maps into tx files. + +Check whether the OCIO color management is enabled before the condition of converting the texture to tx extension. + + +___ + +
+ + +
+Maya: extract Thumbnail "No active model panel found" - OP-3849 #4421 + +Error when extracting playblast with no model panel. + +If `project_settings/maya/publish/ExtractPlayblast/capture_preset/Viewport Options/override_viewport_options` were off and publishing without showing any model panel, the extraction would fail. + + +___ + +
+ + +
+Maya: Fix setting scene fps with float input #4488 + +Returned value of float fps on integer values would return float. + +This PR fixes the case when switching between integer fps values for example 24 > 25. Issue was when setting the scene fps, the original float value was used which makes it unpredictable whether the value is float or integer when mapping the fps values. + + +___ + +
+ + +
+Maya: Multipart fix #4497 + +Fix multipart logic in render products. + +Each renderer has a different way of defining whether output images is multipart, so we need to define it for each renderer. Also before the `multipart` class variable was defined multiple times in several places, which made it tricky to debug where `multipart` was defined. Now its created on initialization and referenced as `self.multipart` + + +___ + +
+ + +
+Maya: Set pool on tile assembly - OP-2012 #4520 + +Set pool on tile assembly + +Pool for publishing and tiling jobs, need to use the settings (`project_settings/deadline/publish/ProcessSubmittedJobOnFarm/deadline_pool`) else fallback on primary pool (`project_settings/deadline/publish/CollectDeadlinePools/primary_pool`) + + +___ + +
+ + +
+Maya: Extract review with handles #4527 + +Review was not extracting properly with/without handles. + +Review instance was not created properly resulting in the frame range on the instance including handles. + + +___ + +
+ + +
+Maya: Fix broken lib. #4529 + +Fix broken lib. + +This commit from this PR broke the Maya lib module. + + +___ + +
+ + +
+Maya: Validate model name - OP-4983 #4539 + +Validate model name issues. + +Couple of issues with validate model name; +- missing platform extraction from settings +- map function should be list comprehension +- code cosmetics + + +___ + +
+ + +
+Maya: SkeletalMesh family loadable as reference #4573 + +In Maya, fix the SkeletalMesh family not loadable as reference. + + +___ + +
+ + +
+Unreal: fix loaders because of missing AssetContainer #4536 + +Fixing Unreal loaders, where changes in OpenPype Unreal integration plugin deleted AssetContainer. + +`AssetContainer` and `AssetContainerFactory` are still used to mark loaded instances. Because of optimizations in Integration plugin we've accidentally removed them but that broke loader. + + +___ + +
+ + +
+3dsmax unable to delete loaded asset in the scene inventory #4507 + +Fix the bug of being unable to delete loaded asset in the Scene Inventory + +Fix the bug of being unable to delete loaded asset in the Scene Inventory + + +___ + +
+ + +
+Hiero/Nuke: originalBasename editorial publishing and loading #4453 + +Publishing and loading `originalBasename` is working as expected + +Frame-ranges on version document is now correctly defined to fit original media frame range which is published. It means loading is now correctly identifying frame start and end on clip loader in Nuke. + + +___ + +
+ + +
+Nuke: Fix workfile template placeholder creation #4512 + +Template placeholder creation was erroring out in Nuke due to the Workfile template builder not being able to find any of the plugins for the Nuke host. + +Move `get_workfile_build_placeholder_plugins` function to NukeHost class as workfile template builder expects. + + +___ + +
+ + +
+Nuke: creator farm attributes from deadline submit plugin settings #4519 + +Defaults in farm attributes are sourced from settings. + +Settings for deadline nuke submitter are now used during nuke render and prerender creator plugins. + + +___ + +
+ + +
+Nuke: fix clip sequence loading #4574 + +Nuke is loading correctly clip from image sequence created without "{originalBasename}" token in anatomy template. + + +___ + +
+ + +
+Fusion: Fix files collection and small bug-fixes #4423 + +Fixed Fusion review-representation and small bug-fixes + +This fixes the problem with review-file generation that stopped the publishing on second publish before the fix.The problem was that Fusion simply looked at all the files in the render-folder instead of only gathering the needed frames for the review.Also includes a fix to get the handle start/end that before throw an error if the data didn't exist (like from a kitsu sync). + + +___ + +
+ + +
+Fusion: Updated render_local.py to not only process the first instance #4522 + +Moved the `__hasRun` to `render_once()` so the check only happens with the rendering. Currently only the first render node gets the representations added.Critical PR + + +___ + +
+ + +
+Fusion: Load sequence fix filepath resolving from representation #4580 + +Resolves issue mentioned on discord by @movalex:The loader was incorrectly trying to find the file in the publish folder which resulted in just picking 'any first file'. + +This gets the filepath from representation instead of taking the first file from listing files from publish folder. + + +___ + +
+ + +
+Fusion: Fix review burnin start and end frame #4590 + +Fix the burnin start and end frame for reviews. Without this the asset document's start and end handle would've been added to the _burnin_ frame range even though that would've been incorrect since the handles are based on the comp saver's render range instead. + + +___ + +
+ + +
+Harmony: missing set of frame range when opening scene #4485 + +Frame range gets set from DB everytime scene is opened. + +Added also check for not up-to-date loaded containers. + + +___ + +
+ + +
+Photoshop: context is not changed in publisher #4570 + +When PS is already open and artists launch new task, it should keep only opened PS open, but change context. + +Problem were occurring in Workfile app where under new task files from old task were shown. This fixes this and adds opening of last workfile for new context if workfile exists. + + +___ + +
+ + +
+hiero: fix effect item node class #4543 + +Collected effect name after renaming is saving correct class name. + + +___ + +
+ + +
+Bugfix/OP-4616 vray multipart #4297 + +This fixes a bug where multipart vray renders would not make a review in Ftrack. + + +___ + +
+ + +
+Maya: Fix changed location of reset_frame_range #4491 + +Location in commands caused cyclic import + + +___ + +
+ + +
+global: source template fixed frame duplication #4503 + +Duplication is not happening. + +Template is using `originalBasename` which already assume all necessary elements are part of the file name so there was no need for additional optional name elements. + + +___ + +
+ + +
+Deadline: Hint to use Python 3 #4518 + +Added shebank to give deadline hint which python should be used. + +Deadline has issues with Python 2 (especially with `os.scandir`). When a shebank is added to file header deadline will use python 3 mode instead of python 2 which fix the issue. + + +___ + +
+ + +
+Publisher: Prevent access to create tab after publish start #4528 + +Prevent access to create tab after publish start. + +Disable create button in instance view on publish start and enable it again on reset. Even with that make sure that it is not possible to go to create tab if the tab is disabled. + + +___ + +
+ + +
+Color Transcoding: store target_colorspace as new colorspace #4544 + +When transcoding into new colorspace, representation must carry this information instead original color space. + + +___ + +
+ + +
+Deadline: fix submit_publish_job #4552 + +Fix submit_publish_job + +Resolves #4541 + + +___ + +
+ + +
+Kitsu: Fix task itteration in update-op-with-zou #4577 + +From the last PR (https://github.com/ynput/OpenPype/pull/4425) a comment-commit last second messed up the code and resulted in two lines being the same, crashing the script. This PR fixes that. +___ + +
+ + +
+AttrDefs: Fix type for PySide6 #4584 + +Use right type in signal emit for value change of attribute definitions. + +Changed `UUID` type to `str`. This is not an issue with PySide2 but it is with PySide6. + + +___ + +
+ +### **๐Ÿ”€ Refactored code** + + +
+Scene Inventory: Avoid using ObjectId #4524 + +Avoid using conversion to ObjectId type in scene inventory tool. + +Preparation for AYON compatibility where ObjectId won't be used for ids. Representation ids from loaded containers are not converted to ObjectId but kept as strings which also required some changes when working with representation documents. + + +___ + +
+ +### **Merged pull requests** + + +
+SiteSync: host dirmap is not working properly #4563 + +If artists uses SiteSync with real remote (gdrive, dropbox, sftp) drive, Local Settings were throwing error `string indices must be integers`. + +Logic was reworked to provide only `local_drive` values to be overrriden by Local Settings. If remote site is `gdrive` etc. mapping to `studio` is provided as it is expected that workfiles will have imported from `studio` location and not from `gdrive` folder.Also Nuke dirmap was reworked to be less verbose and much faster. + + +___ + +
+ + +
+General: Input representation ids are not ObjectIds #4576 + +Don't use `ObjectId` as representation ids during publishing. + +Representation ids are kept as strings during publishing instead of converting them to `ObjectId`. This change is pre-requirement for AYON connection.Inputs are used for integration of links and for farm publishing (or at least it looks like). + + +___ + +
+ + +
+Shotgrid: Fixes on Deadline submissions #4498 + +A few other bug fixes for getting Nuke submission to Deadline work smoothly using Shotgrid integration. + +Continuing on the work done on this other PR this fixes a few other bugs I came across with further tests. + + +___ + +
+ + +
+Fusion: New Publisher #3892 + +This converts the old publishing system to the new one. It implements Fusion as a new host addon. + + +- Create button removed in OpenPype menu in favor of the new Publisher +- Draft refactor validations to raise `PublishValidationError` +- Implement Creator for New Publisher +- Implement Fusion as Host addon + + +___ + +
+ + +
+Make Kitsu work with Tray Publisher, added kitsureview tag, fixed sync-problems. #4425 + +Make Kitsu work with Tray Publisher, added kitsureview tag, fixed sync-problems. + +This PR updates the way the module gather info for the current publish so it now works with Tray Publisher.It fixes the data that gets synced from Kitsu to OP so all needed data gets registered even if it doesn't exist on Kitsus side.It also adds the tag "Add review to Kitsu" and adds it to Burn In so previews gets generated by default to Kitsu. + + +___ + +
+ + +
+Maya: V-Ray Set Image Format from settings #4566 + +Resolves #4565 + +Set V-Ray Image Format using settings. + + +___ + +
+ + + + +## [3.15.1](https://github.com/ynput/OpenPype/tree/3.15.1) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.15.0...3.15.1) + +### **๐Ÿ†• New features** + + + + +
+Maya: Xgen (3d / maya ) - #4256 + +___ + +#### Brief description + +Initial Xgen implementation. + + + +#### Description + +Client request of Xgen pipeline. + + + + +___ + +
+ + + +
+Data exchange cameras for 3d Studio Max (3d / 3dsmax ) - #4376 + +___ + +#### Brief description + +Add Camera Family into the 3d Studio Max + + + +#### Description + +Adding Camera Extractors(extract abc camera and extract fbx camera) and validators(for camera contents) into 3dMaxAlso add the extractor for exporting 3d max raw scene (which is also related to 3dMax Scene Family) for camera family + + + + +___ + +
+ + +### **๐Ÿš€ Enhancements** + + + + +
+Adding path validator for non-maya nodes (3d / maya ) - #4271 + +___ + +#### Brief description + +Adding a path validator for filepaths from non-maya nodes, which are created by plugins such as Renderman, Yeti and abcImport. + + + +#### Description + +As File Path Editor cannot catch the wrong filenpaths from non-maya nodes such as AlembicNodes, It is neccessary to have a new validator to ensure the existence of the filepaths from the nodes. + + + + +___ + +
+ + + +
+Deadline: Allow disabling strict error check in Maya submissions (3d / maya / deadline ) - #4420 + +___ + +#### Brief description + +DL by default has Strict error checking, but some errors are not fatal. + + + +#### Description + +This allows to set profile based on Task and Subset values to temporarily disable Strict Error Checks.Subset and task names should support regular expressions. (not wildcard notation though). + + + + +___ + +
+ + + +
+Houdini: New publisher code tweak (3d / houdini ) - #4374 + +___ + +#### Brief description + +This is cosmetics only - the previous code to me felt quite unreadable due to the lengthy strings being used. + + + +#### Description + +Code should do roughly the same, but just be reformatted. + + + + +___ + +
+ + + +
+3dsmax: enhance alembic loader update function (3d / 3dsmax ) - #4387 + +___ + +## Enhancement + + + +This PR is adding update/switch ability to pointcache/alembic loader in 3dsmax and fixing wrong tool shown when clicking on "Manage" item on OpenPype menu, that is now correctly Scene Inventory (but was Subset Manager). + + + +Alembic update has still one caveat - it doesn't cope with changed number of object inside alembic, since loading alembic in max involves creating all those objects as first class nodes. So it will keep the objects in scene, just update path to alembic file on them. +___ + +
+ + + +
+Global: supporting `OPENPYPE_TMPDIR` in staging dir maker (editorial / hiero ) - #4398 + +___ + +#### Brief description + +Productions can use OPENPYPE_TMPDIR for staging temp publishing directory + + + +#### Description + +Studios were demanding to be able to configure their own shared storages as temporary staging directories. Template formatting is also supported with optional keys formatting and following anatomy keys: - root[work | ] - project[name | code] + + + + +___ + +
+ + + +
+General: Functions for current context (other ) - #4324 + +___ + +#### Brief description + +Defined more functions to receive current context information and added the methods to host integration so host can affect the result. + + + +#### Description + +This is one of steps to reduce usage of `legacy_io.Session`. This change define how to receive current context information -> call functions instead of accessing `legacy_io.Session` or `os.environ` directly. Plus, direct access on session or environments is unfortunatelly not enough for some DCCs where multiple workfiles can be opened at one time which can heavily affect the context but host integration sometimes can't affect that at all.`HostBase` already had implemented `get_current_context`, that was enhanced by adding more specific methods `get_current_project_name`, `get_current_asset_name` and `get_current_task_name`. The same functions were added to `~/openpype/pipeline/cotext_tools.py`. The functions in context tools are calling host integration methods (if are available) otherwise are using environent variables as default implementation does. Also was added `get_current_host_name` to receive host name from registered host if is available or from environment variable. + + + + +___ + +
+ + + +
+Houdini: Do not visualize the hidden OpenPypeContext node (other / houdini ) - #4382 + +___ + +#### Brief description + +Using the new publisher UI would generate a visible 'null' locator at the origin. It's confusing to the user since it's supposed to be 'hidden'. + + + +#### Description + +Before this PR the user would see a locator/null at the origin which was the 'hidden' `/obj/OpenPypeContext` node. This null would suddenly appear if the user would've ever opened the Publisher UI once.After this PR it will not show:Nice and tidy. + + + + +___ + +
+ + + +
+Maya + Blender: Pyblish plugins removed unused `version` and `category` attributes (other ) - #4402 + +___ + +#### Brief description + +Once upon a time in a land far far away there lived a few plug-ins who felt like they didn't belong in generic boxes and felt they needed to be versioned well above others. They tried, but with no success. + + + +#### Description + +Even though they now lived in a universe with elaborate `version` and `category` attributes embedded into their tiny little plug-in DNA this particular deviation has been greatly unused. There is nothing special about the version, nothing special about the category.It does nothing. + + + + +___ + +
+ + + +
+General: Fix original basename frame issues (other ) - #4452 + +___ + +#### Brief description + +Treat `{originalBasename}` in different way then standard files processing. In case template should use `{originalBasename}` the transfers will use them as they are without any changes or handling of frames. + + + +#### Description + +Frames handling is problematic with original basename because their padding can't be defined to match padding in source filenames. Also it limits the usage of functionality to "must have frame at end of fiename". This is proposal how that could be solved by simply ignoring frame handling and using filenames as are on representation. First frame is still stored to representation context but is not used in formatting part. This way we don't have to care about padding of frames at all. + + + + +___ + +
+ + + +
+Publisher: Report also crashed creators and convertors (other ) - #4473 + +___ + +#### Brief description + +Added crashes of creators and convertos discovery (lazy solution). + + + +#### Description + +Report in Publisher also contains information about crashed files caused during creator plugin discovery and convertor plugin discovery. They're not separated into categroies and there is no other information in the report about them, but this helps a lot during development. This change does not need to change format/schema of the report nor UI logic. + + + + +___ + +
+ + +### **๐Ÿ› Bug fixes** + + + + +
+Maya: Fix Validate Attributes plugin (3d / maya ) - #4401 + +___ + +#### Brief description + +Code was broken. So either plug-in was unused or it had gone unnoticed. + + + +#### Description + +Looking at the commit history of the plug-in itself it seems this might have been broken somewhere between two to three years. I think it's broken since two years since this commit.Should this plug-in be removed completely?@tokejepsen Is there still a use case where we should have this plug-in? (You created the original one) + + + + +___ + +
+ + + +
+Maya: Ignore workfile lock in Untitled scene (3d / maya ) - #4414 + +___ + +#### Brief description + +Skip workfile lock check if current scene is 'Untitled'. + + + + +___ + +
+ + + +
+Maya: fps rounding - OP-2549 (3d / maya ) - #4424 + +___ + +#### Brief description + +When FPS is registered in for example Ftrack and round either down or up (floor/ceil), comparing to Maya FPS can fail. Example:23.97 (Ftrack/Mongo) != 23.976023976023978 (Maya) + + + +#### Description + +Since Maya only has a select number of supported framerates, I've taken the approach of converting any fps to supported framerates in Maya. We validate the input fps to make sure they are supported in Maya in two ways:Whole Numbers - are validated straight against the supported framerates in Maya.Demical Numbers - we find the closest supported framerate in Maya. If the difference to the closest supported framerate, is more than 0.5 we'll throw an error.If Maya ever supports arbitrary framerates, then we might have a problem but I'm not holding my breath... + + + + +___ + +
+ + + +
+Strict Error Checking Default (3d / maya ) - #4457 + +___ + +#### Brief description + +Provide default of strict error checking for instances created prior to PR. + + + + +___ + +
+ + + +
+Create: Enhance instance & context changes (3d / houdini,after effects,3dsmax ) - #4375 + +___ + +#### Brief description + +Changes of instances and context have complex, hard to get structure. The structure did not change but instead of complex dictionaries are used objected data. + + + +#### Description + +This is poposal of changes data improvement for creators. Implemented `TrackChangesItem` which handles the changes for us. The item is creating changes based on old and new value and can provide information about changed keys or access to full old or new value. Can give the values on any "sub-dictionary".Used this new approach to fix change in houdini and 3ds max and also modified one aftereffects plugin using changes. + + + + +___ + +
+ + + +
+Houdini: hotfix condition (3d / houdini ) - #4391 + +___ + +## Hotfix + + + +This is fixing bug introduced int #4374 +___ + +
+ + + +
+Houdini: Houdini shelf tools fixes (3d / houdini ) - #4428 + +___ + +#### Brief description + +Fix Houdini shelf tools. + + + +#### Description + +Use `label` as mandatory key instead of `name`. Changed how shelves are created. If the script is empty it is gracefully skipping it instead of crashing. + + + + +___ + +
+ + + +
+3dsmax: startup fixes (3d / 3dsmax ) - #4412 + +___ + +#### Brief description + +This is fixing various issues that can occur on some of the 3dsmax versions. + + + +#### Description + +On displays with +4K resolution UI was broken, some 3dsmax versions couldn't process `PYTHONPATH` correctly. This PR is forcing `sys.path` and disabling `QT_AUTO_SCREEN_SCALE_FACTOR` + + + + +___ + +
+ + + +
+Fix features for gizmo menu (2d / nuke ) - #4280 + +___ + +#### Brief description + +Fix features for the Gizmo Menu project settings (shortcut for python type of usage and file type of usage functionality) + + + + +___ + +
+ + + +
+Photoshop: fix missing legacy io for legacy instances (2d / photoshop,after effects ) - #4467 + +___ + +#### Brief description + +`legacy_io` import was removed, but usage stayed. + + + +#### Description + +Usage of `legacy_io` should be eradicated, in creators it should be replaced by `self.create_context.get_current_project_name/asset_name/task_name`. + + + + +___ + +
+ + + +
+Fix - addSite loader handles hero version (other / sitesync ) - #4359 + +___ + +#### Brief description + +If adding site to representation presence of hero version is checked, if found hero version is marked to be donwloaded too.Replacing https://github.com/ynput/OpenPype/pull/4191 + + + + +___ + +
+ + + +
+Remove OIIO build for macos (other ) - #4381 + +___ + +## Fix + + + +Since we are not able to provide OpenImageIO tools binaries for macos, we should remove the item from th `pyproject.toml`. This PR is taking care of it. + + + +It is also changing the way `fetch_thirdparty_libs` script works in that it doesn't crash when lib cannot be processed, it only issue warning. + + + + + +Resolves #3858 +___ + +
+ + + +
+General: Attribute definitions fixes (other ) - #4392 + +___ + +#### Brief description + +Fix possible issues with attribute definitions in publisher if there is unknown attribute on an instance. + + + +#### Description + +Source of the issue is that attribute definitions from creator plugin could be "expanded" during `CreatedInstance` initialization. Which would affect all other instances using the same list of attributes -> literally object of list. If the same list object is used in "BaseClass" for other creators it would affect all instances (because of 1 instance). There had to be implemented other changes to fix the issue and keep behavior the same.Object of `CreatedInstance` can be created without reference to creator object. `CreatedInstance` is responsible to give UI attribute definitions (technically is prepared for cases when each instance may have different attribute definitions -> not yet).Attribute definition has added more conditions for `__eq__` method and have implemented `__ne__` method (which is required for Py 2 compatibility). Renamed `AbtractAttrDef` to `AbstractAttrDef` (fix typo). + + + + +___ + +
+ + + +
+Ftrack: Don't force ftrackapp endpoint (other / ftrack ) - #4411 + +___ + +#### Brief description + +Auto-fill of ftrack url don't break custom urls. Custom urls couldn't be used as `ftrackapp.com` is added if is not in the url. + + + +#### Description + +The code was changed in a way that auto-fill is still supported but before `ftrackapp` is added it will try to use url as is. If the connection works as is it is used. + + + + +___ + +
+ + + +
+Fix: DL on MacOS (other ) - #4418 + +___ + +#### Brief description + +This works if DL Openpype plugin Installation Directories is set to level of app bundle (eg. '/Applications/OpenPype 3.15.0.app') + + + + +___ + +
+ + + +
+Photoshop: make usage of layer name in subset name more controllable (other ) - #4432 + +___ + +#### Brief description + +Layer name was previously used in subset name only if multiple instances were being created in single step. This adds explicit toggle. + + + +#### Description + +Toggling this button allows to use layer name in created subset name even if single instance is being created.This follows more closely implementation if AE. + + + + +___ + +
+ + + +
+SiteSync: fix dirmap (other ) - #4436 + +___ + +#### Brief description + +Fixed issue in dirmap in Maya and Nuke + + + +#### Description + +Loads of error were thrown in Nuke console about dictionary value.`AttributeError: 'dict' object has no attribute 'lower'` + + + + +___ + +
+ + + +
+General: Ignore decode error of stdout/stderr in run_subprocess (other ) - #4446 + +___ + +#### Brief description + +Ignore decode errors and replace invalid character (byte) with escaped byte character. + + + +#### Description + +Calling of `run_subprocess` may cause crashes if output contains some unicode character which (for example Polish name of encoder handler). + + + + +___ + +
+ + + +
+Publisher: Fix reopen bug (other ) - #4463 + +___ + +#### Brief description + +Use right name of constant 'ActiveWindow' -> 'WindowActive'. + + + + +___ + +
+ + + +
+Publisher: Fix compatibility of QAction in Publisher (other ) - #4474 + +___ + +#### Brief description + +Fix `QAction` for older version of Qt bindings where QAction requires a parent on initialization. + + + +#### Description + +This bug was discovered in Nuke 11. Fixed by creating QAction when QMenu is already available and can be used as parent. + + + + +___ + +
+ + +### **๐Ÿ”€ Refactored code** + + + + +
+General: Remove 'openpype.api' (other ) - #4413 + +___ + +#### Brief description + +PR is removing `openpype/api.py` file which is causing a lot of troubles and cross-imports. + + + +#### Description + +I wanted to remove the file slowly function by function but it always reappear somewhere in codebase even if most of the functionality imported from there is triggering deprecation warnings. This is small change which may have huge impact.There shouldn't be anything in openpype codebase which is using `openpype.api` anymore so only possible issues are in customized repositories or custom addons. + + + + +___ + +
+ + +### **๐Ÿ“ƒ Documentation** + + + + +
+docs-user-Getting Started adjustments (other ) - #4365 + +___ + +#### Brief description + +Small typo fixes here and there, additional info on install/ running OP. + + + + +___ + +
+ + +### **Merged pull requests** + + + + +
+Renderman support for sample and display filters (3d / maya ) - #4003 + +___ + +#### Brief description + +User can set up both sample and display filters in Openpype settings if they are using Renderman as renderer. + + + +#### Description + +You can preset which sample and display filters for renderman , including the cryptomatte renderpass, in Openpype settings. Once you select which filters to be included in openpype settings and then create render instance for your camera in maya, it would automatically tell the system to generate your selected filters in render settings.The place you can find for setting up the filters: _Maya > Render Settings > Renderman Renderer > Display Filters/ Sample Filters_ + + + + +___ + +
+ + + +
+Maya: Create Arnold options on repair. (3d / maya ) - #4448 + +___ + +#### Brief description + +When validating/repairing we previously required users to open render settings to create the Arnold options. This is done through code now. + + + + +___ + +
+ + + +
+Update Asset field of creator Instances in Maya Template Builder (3d / maya ) - #4470 + +___ + +#### Brief description + +When we build a template with Maya Template Builder, it will update the asset field of the sets (creator instances) that are imported from the template. + + + +#### Description + +When building a template, we also want to define the publishable content in advance: create an instance of a model, or look, etc., to speed up the workflow and reduce the number of questions we are asked. After building a work file from a saved template that contains pre-created instances, the template builder should update the asset field to the current asset. + + + + +___ + +
+ + + +
+Blender: fix import workfile all families (3d / blender ) - #4405 + +___ + +#### Brief description + +Having this feature related to workfile available for any family is absurd. + + + + +___ + +
+ + + +
+Nuke: update rendered frames in latest version (2d / nuke ) - #4362 + +___ + +#### Brief description + +Introduced new field to insert frame(s) to rerender only. + + + +#### Description + +Rendering is expensive, sometimes it is helpful only to re-render changed frames and reuse existing.Artists can in Publisher fill which frame(s) should be re-rendered.If there is already published version of currently publishing subset, all representation files are collected (currently for `render` family only) and then when Nuke is rendering (locally only for now), old published files are copied into into temporary render folder where will be rewritten only by frames explicitly set in new field.That way review/burnin process could also reuse old files and recreate reviews/burnins.New version is produced during this process! + + + + +___ + +
+ + + +
+Feature: Keep synced hero representations up-to-date. (other ) - #4343 + +___ + +#### Brief description + +Keep previously synchronized sites up-to-date by comparing old and new sites and adding old sites if missing in new ones.Fix #4331 + + + + +___ + +
+ + + +
+Maya: Fix template builder bug where assets are not put in the right hierarchy (other ) - #4367 + +___ + +#### Brief description + +When buiding scene from template, the assets loaded from the placeholders are not put in the hierarchy. Plus, the assets are loaded in double. + + + + +___ + +
+ + + +
+Bump ua-parser-js from 0.7.31 to 0.7.33 in /website (other ) - #4371 + +___ + +Bumps [ua-parser-js](https://github.com/faisalman/ua-parser-js) from 0.7.31 to 0.7.33. +
+Changelog +

Sourced from ua-parser-js's changelog.

+
+

Version 0.7.31 / 1.0.2

+
    +
  • Fix OPPO Reno A5 incorrect detection
  • +
  • Fix TypeError Bug
  • +
  • Use AST to extract regexes and verify them with safe-regex
  • +
+

Version 0.7.32 / 1.0.32

+
    +
  • Add new browser : DuckDuckGo, Huawei Browser, LinkedIn
  • +
  • Add new OS : HarmonyOS
  • +
  • Add some Huawei models
  • +
  • Add Sharp Aquos TV
  • +
  • Improve detection Xiaomi Mi CC9
  • +
  • Fix Sony Xperia 1 III misidentified as Acer tablet
  • +
  • Fix Detect Sony BRAVIA as SmartTV
  • +
  • Fix Detect Xiaomi Mi TV as SmartTV
  • +
  • Fix Detect Galaxy Tab S8 as tablet
  • +
  • Fix WeGame mistakenly identified as WeChat
  • +
  • Fix included commas in Safari / Mobile Safari version
  • +
  • Increase UA_MAX_LENGTH to 350
  • +
+

Version 0.7.33 / 1.0.33

+
    +
  • Add new browser : Cobalt
  • +
  • Identify Macintosh as an Apple device
  • +
  • Fix ReDoS vulnerability
  • +
+

Version 0.8

+

Version 0.8 was created by accident. This version is now deprecated and no longer maintained, please update to version 0.7 / 1.0.

+
+
+
+Commits + +
+
+ + +[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=ua-parser-js&package-manager=npm_and_yarn&previous-version=0.7.31&new-version=0.7.33)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) + +Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. + +[//]: # (dependabot-automerge-start) +[//]: # (dependabot-automerge-end) + +--- + +
+Dependabot commands and options +
+ +You can trigger Dependabot actions by commenting on this PR: +- `@dependabot rebase` will rebase this PR +- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it +- `@dependabot merge` will merge this PR after your CI passes on it +- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it +- `@dependabot cancel merge` will cancel a previously requested merge and block automerging +- `@dependabot reopen` will reopen this PR if it is closed +- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually +- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) +- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) +- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language +- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language +- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language +- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language + +You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/ynput/OpenPype/network/alerts). + +
+___ + +
+ + + +
+Docs: Question about renaming in Kitsu (other ) - #4384 + +___ + +#### Brief description + +To keep memory of this discussion: https://discord.com/channels/517362899170230292/563751989075378201/1068112668491255818 + + + + +___ + +
+ + + +
+New Publisher: Fix Creator error typo (other ) - #4396 + +___ + +#### Brief description + +Fixes typo in error message. + + + + +___ + +
+ + + +
+Chore: pyproject.toml version because of Poetry (other ) - #4408 + +___ + +#### Brief description + +Automatization injects wrong format + + + + +___ + +
+ + + +
+Fix - remove minor part in toml (other ) - #4437 + +___ + +#### Brief description + +Causes issue in create_env and new Poetry + + + + +___ + +
+ + + +
+General: Add project code to anatomy (other ) - #4445 + +___ + +#### Brief description + +Added attribute `project_code` to `Anatomy` object. + + + +#### Description + +Anatomy already have access to almost all attributes from project anatomy except project code. This PR changing it. Technically `Anatomy` is everything what would be needed to get fill data of project. + +``` + +{ + + "project": { + + "name": anatomy.project_name, + + "code": anatomy.project_code + + } + +} + +``` + + +___ + +
+ + + +
+Maya: Arnold Scene Source overhaul - OP-4865 (other / maya ) - #4449 + +___ + +#### Brief description + +General overhaul of the Arnold Scene Source (ASS) workflow. + + + +#### Description + +This originally was to support static files (non-sequencial) ASS publishing, but digging deeper whole workflow needed an update to get ready for further issues. During this overhaul the following changes were made: + +- Generalized Arnold Standin workflow to a single loader. + +- Support multiple nodes as proxies. + +- Support proxies for `pointcache` family. + +- Generalized approach to proxies as resources, so they can be the same file format as the original.This workflow should allow further expansion to utilize operators and eventually USD. + + + + +___ + +
+ + + + +## [3.15.0](https://github.com/ynput/OpenPype/tree/3.15.0) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...3.15.0) + +**Deprecated:** + +- General: Fill default values of new publish template profiles [\#4245](https://github.com/ynput/OpenPype/pull/4245) + +### ๐Ÿ“– Documentation + +- documentation: Split tools into separate entries [\#4342](https://github.com/ynput/OpenPype/pull/4342) +- Documentation: Fix harmony docs [\#4301](https://github.com/ynput/OpenPype/pull/4301) +- Remove staging logic set by OpenPype version [\#3979](https://github.com/ynput/OpenPype/pull/3979) + +**๐Ÿ†• New features** + +- General: Push to studio library [\#4284](https://github.com/ynput/OpenPype/pull/4284) +- Colorspace Management and Distribution [\#4195](https://github.com/ynput/OpenPype/pull/4195) +- Nuke: refactor to latest publisher workfow [\#4006](https://github.com/ynput/OpenPype/pull/4006) +- Update to Python 3.9 [\#3546](https://github.com/ynput/OpenPype/pull/3546) + +**๐Ÿš€ Enhancements** + +- Unreal: Don't use mongo queries in 'ExistingLayoutLoader' [\#4356](https://github.com/ynput/OpenPype/pull/4356) +- General: Loader and Creator plugins can be disabled [\#4310](https://github.com/ynput/OpenPype/pull/4310) +- General: Unbind poetry version [\#4306](https://github.com/ynput/OpenPype/pull/4306) +- General: Enhanced enum def items [\#4295](https://github.com/ynput/OpenPype/pull/4295) +- Git: add pre-commit hooks [\#4289](https://github.com/ynput/OpenPype/pull/4289) +- Tray Publisher: Improve Online family functionality [\#4263](https://github.com/ynput/OpenPype/pull/4263) +- General: Update MacOs to PySide6 [\#4255](https://github.com/ynput/OpenPype/pull/4255) +- Build: update to Gazu in toml [\#4208](https://github.com/ynput/OpenPype/pull/4208) +- Global: adding imageio to settings [\#4158](https://github.com/ynput/OpenPype/pull/4158) +- Blender: added project settings for validator no colons in name [\#4149](https://github.com/ynput/OpenPype/pull/4149) +- Dockerfile for Debian Bullseye [\#4108](https://github.com/ynput/OpenPype/pull/4108) +- AfterEffects: publish multiple compositions [\#4092](https://github.com/ynput/OpenPype/pull/4092) +- AfterEffects: make new publisher default [\#4056](https://github.com/ynput/OpenPype/pull/4056) +- Photoshop: make new publisher default [\#4051](https://github.com/ynput/OpenPype/pull/4051) +- Feature/multiverse [\#4046](https://github.com/ynput/OpenPype/pull/4046) +- Tests: add support for deadline for automatic tests [\#3989](https://github.com/ynput/OpenPype/pull/3989) +- Add version to shortcut name [\#3906](https://github.com/ynput/OpenPype/pull/3906) +- TrayPublisher: Removed from experimental tools [\#3667](https://github.com/ynput/OpenPype/pull/3667) + +**๐Ÿ› Bug fixes** + +- change 3.7 to 3.9 in folder name [\#4354](https://github.com/ynput/OpenPype/pull/4354) +- PushToProject: Fix hierarchy of project change [\#4350](https://github.com/ynput/OpenPype/pull/4350) +- Fix photoshop workfile save-as [\#4347](https://github.com/ynput/OpenPype/pull/4347) +- Nuke Input process node sourcing improvements [\#4341](https://github.com/ynput/OpenPype/pull/4341) +- New publisher: Some validation plugin tweaks [\#4339](https://github.com/ynput/OpenPype/pull/4339) +- Harmony: fix unable to change workfile on Mac [\#4334](https://github.com/ynput/OpenPype/pull/4334) +- Global: fixing in-place source publishing for editorial [\#4333](https://github.com/ynput/OpenPype/pull/4333) +- General: Use class constants of QMessageBox [\#4332](https://github.com/ynput/OpenPype/pull/4332) +- TVPaint: Fix plugin for TVPaint 11.7 [\#4328](https://github.com/ynput/OpenPype/pull/4328) +- Exctract OTIO review has improved quality [\#4325](https://github.com/ynput/OpenPype/pull/4325) +- Ftrack: fix typos causing bugs in sync [\#4322](https://github.com/ynput/OpenPype/pull/4322) +- General: Python 2 compatibility of instance collector [\#4320](https://github.com/ynput/OpenPype/pull/4320) +- Slack: user groups speedup [\#4318](https://github.com/ynput/OpenPype/pull/4318) +- Maya: Bug - Multiverse extractor executed on plain animation family [\#4315](https://github.com/ynput/OpenPype/pull/4315) +- Fix run\_documentation.ps1 [\#4312](https://github.com/ynput/OpenPype/pull/4312) +- Nuke: new creators fixes [\#4308](https://github.com/ynput/OpenPype/pull/4308) +- General: missing comment on standalone and tray publisher [\#4303](https://github.com/ynput/OpenPype/pull/4303) +- AfterEffects: Fix for audio from mp4 layer [\#4296](https://github.com/ynput/OpenPype/pull/4296) +- General: Update gazu in poetry lock [\#4247](https://github.com/ynput/OpenPype/pull/4247) +- Bug: Fixing version detection and filtering in Igniter [\#3914](https://github.com/ynput/OpenPype/pull/3914) +- Bug: Create missing version dir [\#3903](https://github.com/ynput/OpenPype/pull/3903) + +**๐Ÿ”€ Refactored code** + +- Remove redundant export\_alembic method. [\#4293](https://github.com/ynput/OpenPype/pull/4293) +- Igniter: Use qtpy modules instead of Qt [\#4237](https://github.com/ynput/OpenPype/pull/4237) + +**Merged pull requests:** + +- Sort families by alphabetical order in the Create plugin [\#4346](https://github.com/ynput/OpenPype/pull/4346) +- Global: Validate unique subsets [\#4336](https://github.com/ynput/OpenPype/pull/4336) +- Maya: Collect instances preserve handles even if frameStart + frameEnd matches context [\#3437](https://github.com/ynput/OpenPype/pull/3437) + ## [3.14.10](https://github.com/ynput/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.9...HEAD) +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.9...3.14.10) **๐Ÿ†• New features** diff --git a/Dockerfile b/Dockerfile index 7232223c3c..46dd9e5c0a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # Build Pype docker image FROM ubuntu:focal AS builder -ARG OPENPYPE_PYTHON_VERSION=3.7.12 +ARG OPENPYPE_PYTHON_VERSION=3.9.12 ARG BUILD_DATE ARG VERSION diff --git a/Dockerfile.centos7 b/Dockerfile.centos7 index be3db58b62..ce1a624a4f 100644 --- a/Dockerfile.centos7 +++ b/Dockerfile.centos7 @@ -1,6 +1,6 @@ # Build Pype docker image FROM centos:7 AS builder -ARG OPENPYPE_PYTHON_VERSION=3.7.12 +ARG OPENPYPE_PYTHON_VERSION=3.9.12 LABEL org.opencontainers.image.name="pypeclub/openpype" LABEL org.opencontainers.image.title="OpenPype Docker Image" @@ -52,7 +52,7 @@ RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.n # we need to build our own patchelf WORKDIR /temp-patchelf -RUN git clone https://github.com/NixOS/patchelf.git . \ +RUN git clone -b 0.17.0 --single-branch https://github.com/NixOS/patchelf.git . \ && source scl_source enable devtoolset-7 \ && ./bootstrap.sh \ && ./configure \ @@ -96,11 +96,11 @@ RUN source $HOME/.bashrc \ RUN source $HOME/.bashrc \ && bash ./tools/build.sh -RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.7/lib \ - && cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.7/lib \ - && cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.7/lib \ - && cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.7/lib \ - && cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.7/vendor/python/PySide2/Qt/lib +RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.9/lib \ + && cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.9/lib \ + && cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.9/lib \ + && cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.9/lib \ + && cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.9/vendor/python/PySide2/Qt/lib RUN cd /opt/openpype \ rm -rf ./vendor/bin diff --git a/Dockerfile.debian b/Dockerfile.debian new file mode 100644 index 0000000000..a53b5aa769 --- /dev/null +++ b/Dockerfile.debian @@ -0,0 +1,81 @@ +# Build Pype docker image +FROM debian:bullseye AS builder +ARG OPENPYPE_PYTHON_VERSION=3.9.12 +ARG BUILD_DATE +ARG VERSION + +LABEL maintainer="info@openpype.io" +LABEL description="Docker Image to build and run OpenPype under Ubuntu 20.04" +LABEL org.opencontainers.image.name="pypeclub/openpype" +LABEL org.opencontainers.image.title="OpenPype Docker Image" +LABEL org.opencontainers.image.url="https://openpype.io/" +LABEL org.opencontainers.image.source="https://github.com/pypeclub/OpenPype" +LABEL org.opencontainers.image.documentation="https://openpype.io/docs/system_introduction" +LABEL org.opencontainers.image.created=$BUILD_DATE +LABEL org.opencontainers.image.version=$VERSION + +USER root + +ARG DEBIAN_FRONTEND=noninteractive + +# update base +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + ca-certificates \ + bash \ + git \ + cmake \ + make \ + curl \ + wget \ + build-essential \ + libssl-dev \ + zlib1g-dev \ + libbz2-dev \ + libreadline-dev \ + libsqlite3-dev \ + llvm \ + libncursesw5-dev \ + xz-utils \ + tk-dev \ + libxml2-dev \ + libxmlsec1-dev \ + libffi-dev \ + liblzma-dev \ + patchelf + +SHELL ["/bin/bash", "-c"] + + +RUN mkdir /opt/openpype + +# download and install pyenv +RUN curl https://pyenv.run | bash \ + && echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/init_pyenv.sh \ + && echo 'eval "$(pyenv init -)"' >> $HOME/init_pyenv.sh \ + && echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/init_pyenv.sh \ + && echo 'eval "$(pyenv init --path)"' >> $HOME/init_pyenv.sh + +# install python with pyenv +RUN source $HOME/init_pyenv.sh \ + && pyenv install ${OPENPYPE_PYTHON_VERSION} + +COPY . /opt/openpype/ + +RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh + +WORKDIR /opt/openpype + +# set local python version +RUN cd /opt/openpype \ + && source $HOME/init_pyenv.sh \ + && pyenv local ${OPENPYPE_PYTHON_VERSION} + +# fetch third party tools/libraries +RUN source $HOME/init_pyenv.sh \ + && ./tools/create_env.sh \ + && ./tools/fetch_thirdparty_libs.sh + +# build openpype +RUN source $HOME/init_pyenv.sh \ + && bash ./tools/build.sh diff --git a/HISTORY.md b/HISTORY.md index 88b50c67dd..543cf11513 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,84 @@ # Changelog -## [3.14.10](https://github.com/ynput/OpenPype/tree/HEAD) +## [3.15.0](https://github.com/ynput/OpenPype/tree/3.15.0) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...3.15.0) + +**Deprecated:** + +- General: Fill default values of new publish template profiles [\#4245](https://github.com/ynput/OpenPype/pull/4245) + +### ๐Ÿ“– Documentation + +- documentation: Split tools into separate entries [\#4342](https://github.com/ynput/OpenPype/pull/4342) +- Documentation: Fix harmony docs [\#4301](https://github.com/ynput/OpenPype/pull/4301) +- Remove staging logic set by OpenPype version [\#3979](https://github.com/ynput/OpenPype/pull/3979) + +**๐Ÿ†• New features** + +- General: Push to studio library [\#4284](https://github.com/ynput/OpenPype/pull/4284) +- Colorspace Management and Distribution [\#4195](https://github.com/ynput/OpenPype/pull/4195) +- Nuke: refactor to latest publisher workfow [\#4006](https://github.com/ynput/OpenPype/pull/4006) +- Update to Python 3.9 [\#3546](https://github.com/ynput/OpenPype/pull/3546) + +**๐Ÿš€ Enhancements** + +- Unreal: Don't use mongo queries in 'ExistingLayoutLoader' [\#4356](https://github.com/ynput/OpenPype/pull/4356) +- General: Loader and Creator plugins can be disabled [\#4310](https://github.com/ynput/OpenPype/pull/4310) +- General: Unbind poetry version [\#4306](https://github.com/ynput/OpenPype/pull/4306) +- General: Enhanced enum def items [\#4295](https://github.com/ynput/OpenPype/pull/4295) +- Git: add pre-commit hooks [\#4289](https://github.com/ynput/OpenPype/pull/4289) +- Tray Publisher: Improve Online family functionality [\#4263](https://github.com/ynput/OpenPype/pull/4263) +- General: Update MacOs to PySide6 [\#4255](https://github.com/ynput/OpenPype/pull/4255) +- Build: update to Gazu in toml [\#4208](https://github.com/ynput/OpenPype/pull/4208) +- Global: adding imageio to settings [\#4158](https://github.com/ynput/OpenPype/pull/4158) +- Blender: added project settings for validator no colons in name [\#4149](https://github.com/ynput/OpenPype/pull/4149) +- Dockerfile for Debian Bullseye [\#4108](https://github.com/ynput/OpenPype/pull/4108) +- AfterEffects: publish multiple compositions [\#4092](https://github.com/ynput/OpenPype/pull/4092) +- AfterEffects: make new publisher default [\#4056](https://github.com/ynput/OpenPype/pull/4056) +- Photoshop: make new publisher default [\#4051](https://github.com/ynput/OpenPype/pull/4051) +- Feature/multiverse [\#4046](https://github.com/ynput/OpenPype/pull/4046) +- Tests: add support for deadline for automatic tests [\#3989](https://github.com/ynput/OpenPype/pull/3989) +- Add version to shortcut name [\#3906](https://github.com/ynput/OpenPype/pull/3906) +- TrayPublisher: Removed from experimental tools [\#3667](https://github.com/ynput/OpenPype/pull/3667) + +**๐Ÿ› Bug fixes** + +- change 3.7 to 3.9 in folder name [\#4354](https://github.com/ynput/OpenPype/pull/4354) +- PushToProject: Fix hierarchy of project change [\#4350](https://github.com/ynput/OpenPype/pull/4350) +- Fix photoshop workfile save-as [\#4347](https://github.com/ynput/OpenPype/pull/4347) +- Nuke Input process node sourcing improvements [\#4341](https://github.com/ynput/OpenPype/pull/4341) +- New publisher: Some validation plugin tweaks [\#4339](https://github.com/ynput/OpenPype/pull/4339) +- Harmony: fix unable to change workfile on Mac [\#4334](https://github.com/ynput/OpenPype/pull/4334) +- Global: fixing in-place source publishing for editorial [\#4333](https://github.com/ynput/OpenPype/pull/4333) +- General: Use class constants of QMessageBox [\#4332](https://github.com/ynput/OpenPype/pull/4332) +- TVPaint: Fix plugin for TVPaint 11.7 [\#4328](https://github.com/ynput/OpenPype/pull/4328) +- Exctract OTIO review has improved quality [\#4325](https://github.com/ynput/OpenPype/pull/4325) +- Ftrack: fix typos causing bugs in sync [\#4322](https://github.com/ynput/OpenPype/pull/4322) +- General: Python 2 compatibility of instance collector [\#4320](https://github.com/ynput/OpenPype/pull/4320) +- Slack: user groups speedup [\#4318](https://github.com/ynput/OpenPype/pull/4318) +- Maya: Bug - Multiverse extractor executed on plain animation family [\#4315](https://github.com/ynput/OpenPype/pull/4315) +- Fix run\_documentation.ps1 [\#4312](https://github.com/ynput/OpenPype/pull/4312) +- Nuke: new creators fixes [\#4308](https://github.com/ynput/OpenPype/pull/4308) +- General: missing comment on standalone and tray publisher [\#4303](https://github.com/ynput/OpenPype/pull/4303) +- AfterEffects: Fix for audio from mp4 layer [\#4296](https://github.com/ynput/OpenPype/pull/4296) +- General: Update gazu in poetry lock [\#4247](https://github.com/ynput/OpenPype/pull/4247) +- Bug: Fixing version detection and filtering in Igniter [\#3914](https://github.com/ynput/OpenPype/pull/3914) +- Bug: Create missing version dir [\#3903](https://github.com/ynput/OpenPype/pull/3903) + +**๐Ÿ”€ Refactored code** + +- Remove redundant export\_alembic method. [\#4293](https://github.com/ynput/OpenPype/pull/4293) +- Igniter: Use qtpy modules instead of Qt [\#4237](https://github.com/ynput/OpenPype/pull/4237) + +**Merged pull requests:** + +- Sort families by alphabetical order in the Create plugin [\#4346](https://github.com/ynput/OpenPype/pull/4346) +- Global: Validate unique subsets [\#4336](https://github.com/ynput/OpenPype/pull/4336) +- Maya: Collect instances preserve handles even if frameStart + frameEnd matches context [\#3437](https://github.com/ynput/OpenPype/pull/3437) + + +## [3.14.10](https://github.com/ynput/OpenPype/tree/3.14.10) [Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.9...3.14.10) diff --git a/README.md b/README.md index a3d3cf1dbb..8757e3db92 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,11 @@ -[![All Contributors](https://img.shields.io/badge/all_contributors-27-orange.svg?style=flat-square)](#contributors-) +[![All Contributors](https://img.shields.io/badge/all_contributors-28-orange.svg?style=flat-square)](#contributors-) OpenPype ==== -[![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2021-lightgrey?labelColor=303846) - +[![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2022-lightgrey?labelColor=303846) Introduction @@ -31,7 +30,7 @@ The main things you will need to run and build OpenPype are: - **Terminal** in your OS - PowerShell 5.0+ (Windows) - Bash (Linux) -- [**Python 3.7.8**](#python) or higher +- [**Python 3.9.6**](#python) or higher - [**MongoDB**](#database) (needed only for local development) @@ -50,13 +49,14 @@ For more details on requirements visit [requirements documentation](https://open Building OpenPype ------------- -To build OpenPype you currently need [Python 3.7](https://www.python.org/downloads/) as we are following +To build OpenPype you currently need [Python 3.9](https://www.python.org/downloads/) as we are following [vfx platform](https://vfxplatform.com). Because of some Linux distros comes with newer Python version -already, you need to install **3.7** version and make use of it. You can use perhaps [pyenv](https://github.com/pyenv/pyenv) for this on Linux. +already, you need to install **3.9** version and make use of it. You can use perhaps [pyenv](https://github.com/pyenv/pyenv) for this on Linux. +**Note**: We do not support 3.9.0 because of [this bug](https://github.com/python/cpython/pull/22670). Please, use higher versions of 3.9.x. ### Windows -You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). +You will need [Python >= 3.9.1](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). More tools might be needed for installing dependencies (for example for **OpenTimelineIO**) - mostly development tools like [CMake](https://cmake.org/) and [Visual Studio](https://visualstudio.microsoft.com/cs/downloads/) @@ -82,7 +82,7 @@ OpenPype is build using [CX_Freeze](https://cx-freeze.readthedocs.io/en/latest) ### macOS -You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll need also other tools to build +You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll need also other tools to build some OpenPype dependencies like [CMake](https://cmake.org/) and **XCode Command Line Tools** (or some other build system). Easy way of installing everything necessary is to use [Homebrew](https://brew.sh): @@ -106,19 +106,19 @@ exec "$SHELL" PATH=$(pyenv root)/shims:$PATH ``` -4) Pull in required Python version 3.7.x +4) Pull in required Python version 3.9.x ```sh # install Python build dependences brew install openssl readline sqlite3 xz zlib -# replace with up-to-date 3.7.x version -pyenv install 3.7.9 +# replace with up-to-date 3.9.x version +pyenv install 3.9.6 ``` 5) Set local Python version ```sh # switch to OpenPype source directory -pyenv local 3.7.9 +pyenv local 3.9.6 ``` #### To build OpenPype: @@ -145,7 +145,7 @@ sudo ./tools/docker_build.sh centos7 If all is successful, you'll find built OpenPype in `./build/` folder. #### Manual build -You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled. +You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled. To build Python related stuff, you need Python header files installed (`python3-dev` on Ubuntu for example). @@ -222,14 +222,14 @@ eval "$(pyenv virtualenv-init -)" # reload shell exec $SHELL -# install Python 3.7.9 -pyenv install -v 3.7.9 +# install Python 3.9.x +pyenv install -v 3.9.6 # change path to OpenPype 3 cd /path/to/openpype-3 # set local python version -pyenv local 3.7.9 +pyenv local 3.9.6 ``` @@ -303,41 +303,44 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Milan Kolar

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ’ผ ๐Ÿ–‹ ๐Ÿ” ๐Ÿšง ๐Ÿ“† ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ

Jakub Jeลพek

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ–‹ ๐Ÿ‘€ ๐Ÿšง ๐Ÿง‘โ€๐Ÿซ ๐Ÿ“† ๐Ÿ’ฌ

Ondล™ej Samohel

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ–‹ ๐Ÿ‘€ ๐Ÿšง ๐Ÿง‘โ€๐Ÿซ ๐Ÿ“† ๐Ÿ’ฌ

Jakub Trllo

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ‘€ ๐Ÿšง ๐Ÿ’ฌ

Petr Kalis

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ‘€ ๐Ÿšง ๐Ÿ’ฌ

64qam

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ“– ๐Ÿš‡ ๐Ÿ“† ๐Ÿšง ๐Ÿ–‹ ๐Ÿ““

Roy Nieterau

๐Ÿ’ป ๐Ÿ“– ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ

Toke Jepsen

๐Ÿ’ป ๐Ÿ“– ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ

Jiri Sindelar

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ“– ๐Ÿ–‹ โœ… ๐Ÿ““

Simone Barbieri

๐Ÿ’ป ๐Ÿ“–

karimmozilla

๐Ÿ’ป

Allan I. A.

๐Ÿ’ป

murphy

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ““ ๐Ÿ“– ๐Ÿ“†

Wijnand Koreman

๐Ÿ’ป

Bo Zhou

๐Ÿ’ป

Clรฉment Hector

๐Ÿ’ป ๐Ÿ‘€

David Lai

๐Ÿ’ป ๐Ÿ‘€

Derek

๐Ÿ’ป ๐Ÿ“–

Gรกbor Marinov

๐Ÿ’ป ๐Ÿ“–

icyvapor

๐Ÿ’ป ๐Ÿ“–

Jรฉrรดme LORRAIN

๐Ÿ’ป

David Morris-Oliveros

๐Ÿ’ป

BenoitConnan

๐Ÿ’ป

Malthaldar

๐Ÿ’ป

Sven Neve

๐Ÿ’ป

zafrs

๐Ÿ’ป

Fรฉlix David

๐Ÿ’ป ๐Ÿ“–
Milan Kolar
Milan Kolar

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ’ผ ๐Ÿ–‹ ๐Ÿ” ๐Ÿšง ๐Ÿ“† ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ
Jakub Jeลพek
Jakub Jeลพek

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ–‹ ๐Ÿ‘€ ๐Ÿšง ๐Ÿง‘โ€๐Ÿซ ๐Ÿ“† ๐Ÿ’ฌ
Ondล™ej Samohel
Ondล™ej Samohel

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ–‹ ๐Ÿ‘€ ๐Ÿšง ๐Ÿง‘โ€๐Ÿซ ๐Ÿ“† ๐Ÿ’ฌ
Jakub Trllo
Jakub Trllo

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ‘€ ๐Ÿšง ๐Ÿ’ฌ
Petr Kalis
Petr Kalis

๐Ÿ’ป ๐Ÿ“– ๐Ÿš‡ ๐Ÿ‘€ ๐Ÿšง ๐Ÿ’ฌ
64qam
64qam

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ“– ๐Ÿš‡ ๐Ÿ“† ๐Ÿšง ๐Ÿ–‹ ๐Ÿ““
Roy Nieterau
Roy Nieterau

๐Ÿ’ป ๐Ÿ“– ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ
Toke Jepsen
Toke Jepsen

๐Ÿ’ป ๐Ÿ“– ๐Ÿ‘€ ๐Ÿง‘โ€๐Ÿซ ๐Ÿ’ฌ
Jiri Sindelar
Jiri Sindelar

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ“– ๐Ÿ–‹ โœ… ๐Ÿ““
Simone Barbieri
Simone Barbieri

๐Ÿ’ป ๐Ÿ“–
karimmozilla
karimmozilla

๐Ÿ’ป
Allan I. A.
Allan I. A.

๐Ÿ’ป
murphy
murphy

๐Ÿ’ป ๐Ÿ‘€ ๐Ÿ““ ๐Ÿ“– ๐Ÿ“†
Wijnand Koreman
Wijnand Koreman

๐Ÿ’ป
Bo Zhou
Bo Zhou

๐Ÿ’ป
Clรฉment Hector
Clรฉment Hector

๐Ÿ’ป ๐Ÿ‘€
David Lai
David Lai

๐Ÿ’ป ๐Ÿ‘€
Derek
Derek

๐Ÿ’ป ๐Ÿ“–
Gรกbor Marinov
Gรกbor Marinov

๐Ÿ’ป ๐Ÿ“–
icyvapor
icyvapor

๐Ÿ’ป ๐Ÿ“–
Jรฉrรดme LORRAIN
Jรฉrรดme LORRAIN

๐Ÿ’ป
David Morris-Oliveros
David Morris-Oliveros

๐Ÿ’ป
BenoitConnan
BenoitConnan

๐Ÿ’ป
Malthaldar
Malthaldar

๐Ÿ’ป
Sven Neve
Sven Neve

๐Ÿ’ป
zafrs
zafrs

๐Ÿ’ป
Fรฉlix David
Fรฉlix David

๐Ÿ’ป ๐Ÿ“–
Alexey Bogomolov
Alexey Bogomolov

๐Ÿ’ป
@@ -345,4 +348,4 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d -This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! \ No newline at end of file +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! diff --git a/common/openpype_common/distribution/file_handler.py b/common/openpype_common/distribution/file_handler.py index f585c77632..e649f143e9 100644 --- a/common/openpype_common/distribution/file_handler.py +++ b/common/openpype_common/distribution/file_handler.py @@ -1,4 +1,3 @@ -import enlighten import os import re import urllib @@ -252,6 +251,11 @@ class RemoteFileHandler: if key.startswith('download_warning'): return value + # handle antivirus warning for big zips + found = re.search("(confirm=)([^&.+])", response.text) + if found: + return found.groups()[1] + return None @staticmethod @@ -259,15 +263,9 @@ class RemoteFileHandler: response_gen, destination, ): with open(destination, "wb") as f: - pbar = enlighten.Counter( - total=None, desc="Save content", units="%", color="green") - progress = 0 for chunk in response_gen: if chunk: # filter out keep-alive new chunks f.write(chunk) - progress += len(chunk) - - pbar.close() @staticmethod def _quota_exceeded(first_chunk): diff --git a/igniter/__init__.py b/igniter/__init__.py index 02cba6a483..aa1b1d209e 100644 --- a/igniter/__init__.py +++ b/igniter/__init__.py @@ -24,7 +24,7 @@ def open_dialog(): if os.getenv("OPENPYPE_HEADLESS_MODE"): print("!!! Can't open dialog in headless mode. Exiting.") sys.exit(1) - from Qt import QtWidgets, QtCore + from qtpy import QtWidgets, QtCore from .install_dialog import InstallDialog scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) @@ -47,7 +47,7 @@ def open_update_window(openpype_version): if os.getenv("OPENPYPE_HEADLESS_MODE"): print("!!! Can't open dialog in headless mode. Exiting.") sys.exit(1) - from Qt import QtWidgets, QtCore + from qtpy import QtWidgets, QtCore from .update_window import UpdateWindow scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) @@ -71,7 +71,7 @@ def show_message_dialog(title, message): if os.getenv("OPENPYPE_HEADLESS_MODE"): print("!!! Can't open dialog in headless mode. Exiting.") sys.exit(1) - from Qt import QtWidgets, QtCore + from qtpy import QtWidgets, QtCore from .message_dialog import MessageDialog scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) diff --git a/igniter/__main__.py b/igniter/__main__.py index b453d29d5f..9783b20f49 100644 --- a/igniter/__main__.py +++ b/igniter/__main__.py @@ -2,8 +2,7 @@ """Open install dialog.""" import sys -from Qt import QtWidgets # noqa -from Qt.QtCore import Signal # noqa +from qtpy import QtWidgets from .install_dialog import InstallDialog diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index 077f56d769..6c7c834062 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -57,11 +57,9 @@ class OpenPypeVersion(semver.VersionInfo): """Class for storing information about OpenPype version. Attributes: - staging (bool): True if it is staging version path (str): path to OpenPype """ - staging = False path = None # this should match any string complying with https://semver.org/ _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P[a-zA-Z\d\-.]*))?(?:\+(?P[a-zA-Z\d\-.]*))?") # noqa: E501 @@ -83,12 +81,10 @@ class OpenPypeVersion(semver.VersionInfo): build (str): an optional build string version (str): if set, it will be parsed and will override parameters like `major`, `minor` and so on. - staging (bool): set to True if version is staging. path (Path): path to version location. """ self.path = None - self.staging = False if "version" in kwargs.keys(): if not kwargs.get("version"): @@ -113,29 +109,8 @@ class OpenPypeVersion(semver.VersionInfo): if "path" in kwargs.keys(): kwargs.pop("path") - if kwargs.get("staging"): - self.staging = kwargs.get("staging", False) - kwargs.pop("staging") - - if "staging" in kwargs.keys(): - kwargs.pop("staging") - - if self.staging: - if kwargs.get("build"): - if "staging" not in kwargs.get("build"): - kwargs["build"] = f"{kwargs.get('build')}-staging" - else: - kwargs["build"] = "staging" - - if kwargs.get("build") and "staging" in kwargs.get("build", ""): - self.staging = True - super().__init__(*args, **kwargs) - def __eq__(self, other): - result = super().__eq__(other) - return bool(result and self.staging == other.staging) - def __repr__(self): return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" @@ -150,43 +125,11 @@ class OpenPypeVersion(semver.VersionInfo): return True if self.finalize_version() == other.finalize_version() and \ - self.prerelease == other.prerelease and \ - self.is_staging() and not other.is_staging(): + self.prerelease == other.prerelease: return True return result - def set_staging(self) -> OpenPypeVersion: - """Set version as staging and return it. - - This will preserve current one. - - Returns: - OpenPypeVersion: Set as staging. - - """ - if self.staging: - return self - return self.replace(parts={"build": f"{self.build}-staging"}) - - def set_production(self) -> OpenPypeVersion: - """Set version as production and return it. - - This will preserve current one. - - Returns: - OpenPypeVersion: Set as production. - - """ - if not self.staging: - return self - return self.replace( - parts={"build": self.build.replace("-staging", "")}) - - def is_staging(self) -> bool: - """Test if current version is staging one.""" - return self.staging - def get_main_version(self) -> str: """Return main version component. @@ -218,21 +161,8 @@ class OpenPypeVersion(semver.VersionInfo): if not m: return None version = OpenPypeVersion.parse(string[m.start():m.end()]) - if "staging" in string[m.start():m.end()]: - version.staging = True return version - @classmethod - def parse(cls, version): - """Extends parse to handle ta handle staging variant.""" - v = super().parse(version) - openpype_version = cls(major=v.major, minor=v.minor, - patch=v.patch, prerelease=v.prerelease, - build=v.build) - if v.build and "staging" in v.build: - openpype_version.staging = True - return openpype_version - def __hash__(self): return hash(self.path) if self.path else hash(str(self)) @@ -382,80 +312,28 @@ class OpenPypeVersion(semver.VersionInfo): return False @classmethod - def get_local_versions( - cls, production: bool = None, - staging: bool = None - ) -> List: + def get_local_versions(cls) -> List: """Get all versions available on this machine. - Arguments give ability to specify if filtering is needed. If both - arguments are set to None all found versions are returned. - - Args: - production (bool): Return production versions. - staging (bool): Return staging versions. - Returns: list: of compatible versions available on the machine. """ - # Return all local versions if arguments are set to None - if production is None and staging is None: - production = True - staging = True - - elif production is None and not staging: - production = True - - elif staging is None and not production: - staging = True - - # Just return empty output if both are disabled - if not production and not staging: - return [] - # DEPRECATED: backwards compatible way to look for versions in root dir_to_search = Path(user_data_dir("openpype", "pypeclub")) versions = OpenPypeVersion.get_versions_from_directory(dir_to_search) - filtered_versions = [] - for version in versions: - if version.is_staging(): - if staging: - filtered_versions.append(version) - elif production: - filtered_versions.append(version) - return list(sorted(set(filtered_versions))) + return list(sorted(set(versions))) @classmethod - def get_remote_versions( - cls, production: bool = None, - staging: bool = None - ) -> List: + def get_remote_versions(cls) -> List: """Get all versions available in OpenPype Path. - Arguments give ability to specify if filtering is needed. If both - arguments are set to None all found versions are returned. - - Args: - production (bool): Return production versions. - staging (bool): Return staging versions. + Returns: + list of OpenPypeVersions: Versions found in OpenPype path. """ # Return all local versions if arguments are set to None - if production is None and staging is None: - production = True - staging = True - - elif production is None and not staging: - production = True - - elif staging is None and not production: - staging = True - - # Just return empty output if both are disabled - if not production and not staging: - return [] dir_to_search = None if cls.openpype_path_is_accessible(): @@ -476,14 +354,7 @@ class OpenPypeVersion(semver.VersionInfo): versions = cls.get_versions_from_directory(dir_to_search) - filtered_versions = [] - for version in versions: - if version.is_staging(): - if staging: - filtered_versions.append(version) - elif production: - filtered_versions.append(version) - return list(sorted(set(filtered_versions))) + return list(sorted(set(versions))) @staticmethod def get_versions_from_directory( @@ -562,7 +433,6 @@ class OpenPypeVersion(semver.VersionInfo): @staticmethod def get_latest_version( - staging: bool = False, local: bool = None, remote: bool = None ) -> Union[OpenPypeVersion, None]: @@ -571,7 +441,6 @@ class OpenPypeVersion(semver.VersionInfo): The version does not contain information about path and source. This is utility version to get the latest version from all found. - Build version is not listed if staging is enabled. Arguments 'local' and 'remote' define if local and remote repository versions are used. All versions are used if both are not set (or set @@ -580,7 +449,6 @@ class OpenPypeVersion(semver.VersionInfo): 'False' in that case only build version can be used. Args: - staging (bool, optional): List staging versions if True. local (bool, optional): List local versions if True. remote (bool, optional): List remote versions if True. @@ -599,22 +467,9 @@ class OpenPypeVersion(semver.VersionInfo): remote = True installed_version = OpenPypeVersion.get_installed_version() - local_versions = [] - remote_versions = [] - if local: - local_versions = OpenPypeVersion.get_local_versions( - staging=staging - ) - if remote: - remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging - ) - all_versions = local_versions + remote_versions - if not staging: - all_versions.append(installed_version) - - if not all_versions: - return None + local_versions = OpenPypeVersion.get_local_versions() if local else [] + remote_versions = OpenPypeVersion.get_remote_versions() if remote else [] # noqa: E501 + all_versions = local_versions + remote_versions + [installed_version] all_versions.sort() return all_versions[-1] @@ -705,7 +560,7 @@ class BootstrapRepos: """Get path for specific version in list of OpenPype versions. Args: - version (str): Version string to look for (1.2.4+staging) + version (str): Version string to look for (1.2.4-nightly.1+test) version_list (list of OpenPypeVersion): list of version to search. Returns: @@ -807,6 +662,8 @@ class BootstrapRepos: """ version = OpenPypeVersion.version_in_str(zip_file.name) destination_dir = self.data_dir / f"{version.major}.{version.minor}" + if not destination_dir.exists(): + destination_dir.mkdir(parents=True) destination = destination_dir / zip_file.name if destination.exists(): @@ -1131,14 +988,12 @@ class BootstrapRepos: @staticmethod def find_openpype_version( - version: Union[str, OpenPypeVersion], - staging: bool + version: Union[str, OpenPypeVersion] ) -> Union[OpenPypeVersion, None]: """Find location of specified OpenPype version. Args: version (Union[str, OpenPypeVersion): Version to find. - staging (bool): Filter staging versions. Returns: requested OpenPypeVersion. @@ -1151,9 +1006,7 @@ class BootstrapRepos: if installed_version == version: return installed_version - local_versions = OpenPypeVersion.get_local_versions( - staging=staging, production=not staging - ) + local_versions = OpenPypeVersion.get_local_versions() zip_version = None for local_version in local_versions: if local_version == version: @@ -1165,37 +1018,25 @@ class BootstrapRepos: if zip_version is not None: return zip_version - remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging, production=not staging - ) - for remote_version in remote_versions: - if remote_version == version: - return remote_version - return None + remote_versions = OpenPypeVersion.get_remote_versions() + return next( + ( + remote_version for remote_version in remote_versions + if remote_version == version + ), None) @staticmethod - def find_latest_openpype_version( - staging: bool - ) -> Union[OpenPypeVersion, None]: + def find_latest_openpype_version() -> Union[OpenPypeVersion, None]: """Find the latest available OpenPype version in all location. - Args: - staging (bool): True to look for staging versions. - Returns: Latest OpenPype version on None if nothing was found. """ installed_version = OpenPypeVersion.get_installed_version() - local_versions = OpenPypeVersion.get_local_versions( - staging=staging - ) - remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging - ) - all_versions = local_versions + remote_versions - if not staging: - all_versions.append(installed_version) + local_versions = OpenPypeVersion.get_local_versions() + remote_versions = OpenPypeVersion.get_remote_versions() + all_versions = local_versions + remote_versions + [installed_version] if not all_versions: return None @@ -1215,7 +1056,6 @@ class BootstrapRepos: def find_openpype( self, openpype_path: Union[Path, str] = None, - staging: bool = False, include_zips: bool = False ) -> Union[List[OpenPypeVersion], None]: """Get ordered dict of detected OpenPype version. @@ -1229,8 +1069,6 @@ class BootstrapRepos: Args: openpype_path (Path or str, optional): Try to find OpenPype on the given path or url. - staging (bool, optional): Filter only staging version, skip them - otherwise. include_zips (bool, optional): If set True it will try to find OpenPype in zip files in given directory. @@ -1278,7 +1116,7 @@ class BootstrapRepos: for dir_to_search in dirs_to_search: try: openpype_versions += self.get_openpype_versions( - dir_to_search, staging) + dir_to_search) except ValueError: # location is invalid, skip it pass @@ -1643,15 +1481,11 @@ class BootstrapRepos: return False return True - def get_openpype_versions( - self, - openpype_dir: Path, - staging: bool = False) -> list: + def get_openpype_versions(self, openpype_dir: Path) -> list: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. - staging (bool, optional): Find staging versions if True. Returns: list of OpenPypeVersion @@ -1669,8 +1503,7 @@ class BootstrapRepos: for item in openpype_dir.iterdir(): # if the item is directory with major.minor version, dive deeper if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): - _versions = self.get_openpype_versions( - item, staging=staging) + _versions = self.get_openpype_versions(item) if _versions: openpype_versions += _versions @@ -1693,11 +1526,7 @@ class BootstrapRepos: continue detected_version.path = item - if staging and detected_version.is_staging(): - openpype_versions.append(detected_version) - - if not staging and not detected_version.is_staging(): - openpype_versions.append(detected_version) + openpype_versions.append(detected_version) return sorted(openpype_versions) diff --git a/igniter/install_dialog.py b/igniter/install_dialog.py index 65ddd58735..551e2da918 100644 --- a/igniter/install_dialog.py +++ b/igniter/install_dialog.py @@ -5,9 +5,7 @@ import sys import re import collections -from Qt import QtCore, QtGui, QtWidgets # noqa -from Qt.QtGui import QValidator # noqa -from Qt.QtCore import QTimer # noqa +from qtpy import QtCore, QtGui, QtWidgets from .install_thread import InstallThread from .tools import ( diff --git a/igniter/install_thread.py b/igniter/install_thread.py index 0cccf664e7..4723e6adfb 100644 --- a/igniter/install_thread.py +++ b/igniter/install_thread.py @@ -4,7 +4,7 @@ import os import sys from pathlib import Path -from Qt.QtCore import QThread, Signal, QObject # noqa +from qtpy import QtCore from .bootstrap_repos import ( BootstrapRepos, @@ -17,7 +17,7 @@ from .bootstrap_repos import ( from .tools import validate_mongo_connection -class InstallThread(QThread): +class InstallThread(QtCore.QThread): """Install Worker thread. This class takes care of finding OpenPype version on user entered path @@ -28,15 +28,14 @@ class InstallThread(QThread): user data dir. """ - progress = Signal(int) - message = Signal((str, bool)) + progress = QtCore.Signal(int) + message = QtCore.Signal((str, bool)) def __init__(self, parent=None,): self._mongo = None - self._path = None self._result = None - QThread.__init__(self, parent) + super().__init__(parent) def result(self): """Result of finished installation.""" @@ -62,143 +61,117 @@ class InstallThread(QThread): progress_callback=self.set_progress, message=self.message) local_version = OpenPypeVersion.get_installed_version_str() - # if user did enter nothing, we install OpenPype from local version. - # zip content of `repos`, copy it to user data dir and append - # version to it. - if not self._path: - # user did not entered url - if not self._mongo: - # it not set in environment - if not os.getenv("OPENPYPE_MONGO"): - # try to get it from settings registry - try: - self._mongo = bs.secure_registry.get_item( - "openPypeMongo") - except ValueError: - self.message.emit( - "!!! We need MongoDB URL to proceed.", True) - self._set_result(-1) - return - else: - self._mongo = os.getenv("OPENPYPE_MONGO") - else: - self.message.emit("Saving mongo connection string ...", False) - bs.secure_registry.set_item("openPypeMongo", self._mongo) - - os.environ["OPENPYPE_MONGO"] = self._mongo - - self.message.emit( - f"Detecting installed OpenPype versions in {bs.data_dir}", - False) - detected = bs.find_openpype(include_zips=True) - - if detected: - if not OpenPypeVersion.get_installed_version().is_compatible( - detected[-1]): - self.message.emit(( - f"Latest detected version {detected[-1]} " - "is not compatible with the currently running " - f"{local_version}" - ), True) - self.message.emit(( - "Filtering detected versions to compatible ones..." - ), False) - - detected = [ - version for version in detected - if version.is_compatible( - OpenPypeVersion.get_installed_version()) - ] - - if OpenPypeVersion( - version=local_version, path=Path()) < detected[-1]: - self.message.emit(( - f"Latest installed version {detected[-1]} is newer " - f"then currently running {local_version}" - ), False) - self.message.emit("Skipping OpenPype install ...", False) - if detected[-1].path.suffix.lower() == ".zip": - bs.extract_openpype(detected[-1]) - self._set_result(0) - return - - if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa - self.message.emit(( - f"Latest installed version is the same as " - f"currently running {local_version}" - ), False) - self.message.emit("Skipping OpenPype install ...", False) - self._set_result(0) - return - - self.message.emit(( - "All installed versions are older then " - f"currently running one {local_version}" - ), False) - else: - if getattr(sys, 'frozen', False): - self.message.emit("None detected.", True) - self.message.emit(("We will use OpenPype coming with " - "installer."), False) - openpype_version = bs.create_version_from_frozen_code() - if not openpype_version: - self.message.emit( - f"!!! Install failed - {openpype_version}", True) - self._set_result(-1) - return - self.message.emit(f"Using: {openpype_version}", False) - bs.install_version(openpype_version) - self.message.emit(f"Installed as {openpype_version}", False) - self.progress.emit(100) - self._set_result(1) - return - else: - self.message.emit("None detected.", False) - - self.message.emit( - f"We will use local OpenPype version {local_version}", False) - - local_openpype = bs.create_version_from_live_code() - if not local_openpype: - self.message.emit( - f"!!! Install failed - {local_openpype}", True) - self._set_result(-1) - return + # user did not entered url + if self._mongo: + self.message.emit("Saving mongo connection string ...", False) + bs.secure_registry.set_item("openPypeMongo", self._mongo) + elif os.getenv("OPENPYPE_MONGO"): + self._mongo = os.getenv("OPENPYPE_MONGO") + else: + # try to get it from settings registry try: - bs.install_version(local_openpype) - except (OpenPypeVersionExists, - OpenPypeVersionInvalid, - OpenPypeVersionIOError) as e: - self.message.emit(f"Installed failed: ", True) - self.message.emit(str(e), True) + self._mongo = bs.secure_registry.get_item( + "openPypeMongo") + except ValueError: + self.message.emit( + "!!! We need MongoDB URL to proceed.", True) self._set_result(-1) return + os.environ["OPENPYPE_MONGO"] = self._mongo - self.message.emit(f"Installed as {local_openpype}", False) + self.message.emit( + f"Detecting installed OpenPype versions in {bs.data_dir}", + False) + detected = bs.find_openpype(include_zips=True) + if not detected and getattr(sys, 'frozen', False): + self.message.emit("None detected.", True) + self.message.emit(("We will use OpenPype coming with " + "installer."), False) + openpype_version = bs.create_version_from_frozen_code() + if not openpype_version: + self.message.emit( + f"!!! Install failed - {openpype_version}", True) + self._set_result(-1) + return + self.message.emit(f"Using: {openpype_version}", False) + bs.install_version(openpype_version) + self.message.emit(f"Installed as {openpype_version}", False) self.progress.emit(100) self._set_result(1) return - else: - # if we have mongo connection string, validate it, set it to - # user settings and get OPENPYPE_PATH from there. - if self._mongo: - if not validate_mongo_connection(self._mongo): - self.message.emit( - f"!!! invalid mongo url {self._mongo}", True) - self._set_result(-1) - return - bs.secure_registry.set_item("openPypeMongo", self._mongo) - os.environ["OPENPYPE_MONGO"] = self._mongo - self.message.emit(f"processing {self._path}", True) - repo_file = bs.process_entered_location(self._path) + if detected and not OpenPypeVersion.get_installed_version().is_compatible(detected[-1]): # noqa: E501 + self.message.emit(( + f"Latest detected version {detected[-1]} " + "is not compatible with the currently running " + f"{local_version}" + ), True) + self.message.emit(( + "Filtering detected versions to compatible ones..." + ), False) - if not repo_file: - self.message.emit("!!! Cannot install", True) - self._set_result(-1) + # filter results to get only compatible versions + detected = [ + version for version in detected + if version.is_compatible( + OpenPypeVersion.get_installed_version()) + ] + + if detected: + if OpenPypeVersion( + version=local_version, path=Path()) < detected[-1]: + self.message.emit(( + f"Latest installed version {detected[-1]} is newer " + f"then currently running {local_version}" + ), False) + self.message.emit("Skipping OpenPype install ...", False) + if detected[-1].path.suffix.lower() == ".zip": + bs.extract_openpype(detected[-1]) + self._set_result(0) return + if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa: E501 + self.message.emit(( + f"Latest installed version is the same as " + f"currently running {local_version}" + ), False) + self.message.emit("Skipping OpenPype install ...", False) + self._set_result(0) + return + + self.message.emit(( + "All installed versions are older then " + f"currently running one {local_version}" + ), False) + + self.message.emit("None detected.", False) + + self.message.emit( + f"We will use local OpenPype version {local_version}", False) + + local_openpype = bs.create_version_from_live_code() + if not local_openpype: + self.message.emit( + f"!!! Install failed - {local_openpype}", True) + self._set_result(-1) + return + + try: + bs.install_version(local_openpype) + except (OpenPypeVersionExists, + OpenPypeVersionInvalid, + OpenPypeVersionIOError) as e: + self.message.emit(f"Installed failed: ", True) + self.message.emit(str(e), True) + self._set_result(-1) + return + + self.message.emit(f"Installed as {local_openpype}", False) + self.progress.emit(100) + self._set_result(1) + return + self.progress.emit(100) self._set_result(1) return diff --git a/igniter/message_dialog.py b/igniter/message_dialog.py index c8e875cc37..a2a8bce3a2 100644 --- a/igniter/message_dialog.py +++ b/igniter/message_dialog.py @@ -1,4 +1,4 @@ -from Qt import QtWidgets, QtGui +from qtpy import QtWidgets, QtGui from .tools import ( load_stylesheet, diff --git a/igniter/nice_progress_bar.py b/igniter/nice_progress_bar.py index 47d695a101..ee16d108d4 100644 --- a/igniter/nice_progress_bar.py +++ b/igniter/nice_progress_bar.py @@ -1,4 +1,4 @@ -from Qt import QtCore, QtGui, QtWidgets # noqa +from qtpy import QtWidgets class NiceProgressBar(QtWidgets.QProgressBar): diff --git a/igniter/tools.py b/igniter/tools.py index a9d592acf0..79235b2329 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -153,7 +153,8 @@ def get_openpype_global_settings(url: str) -> dict: # Create mongo connection client = MongoClient(url, **kwargs) # Access settings collection - col = client["openpype"]["settings"] + openpype_db = os.environ.get("OPENPYPE_DATABASE_NAME") or "openpype" + col = client[openpype_db]["settings"] # Query global settings global_settings = col.find_one({"type": "global_settings"}) or {} # Close Mongo connection @@ -184,11 +185,7 @@ def get_openpype_path_from_settings(settings: dict) -> Union[str, None]: if paths and isinstance(paths, str): paths = [paths] - # Loop over paths and return only existing - for path in paths: - if os.path.exists(path): - return path - return None + return next((path for path in paths if os.path.exists(path)), None) def get_expected_studio_version_str( @@ -206,10 +203,7 @@ def get_expected_studio_version_str( mongo_url = os.environ.get("OPENPYPE_MONGO") if global_settings is None: global_settings = get_openpype_global_settings(mongo_url) - if staging: - key = "staging_version" - else: - key = "production_version" + key = "staging_version" if staging else "production_version" return global_settings.get(key) or "" diff --git a/igniter/update_thread.py b/igniter/update_thread.py index f4fc729faf..e98c95f892 100644 --- a/igniter/update_thread.py +++ b/igniter/update_thread.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Working thread for update.""" -from Qt.QtCore import QThread, Signal, QObject # noqa +from qtpy import QtCore from .bootstrap_repos import ( BootstrapRepos, @@ -8,7 +8,7 @@ from .bootstrap_repos import ( ) -class UpdateThread(QThread): +class UpdateThread(QtCore.QThread): """Install Worker thread. This class takes care of finding OpenPype version on user entered path @@ -19,13 +19,13 @@ class UpdateThread(QThread): user data dir. """ - progress = Signal(int) - message = Signal((str, bool)) + progress = QtCore.Signal(int) + message = QtCore.Signal((str, bool)) def __init__(self, parent=None): self._result = None self._openpype_version = None - QThread.__init__(self, parent) + super().__init__(parent) def set_version(self, openpype_version: OpenPypeVersion): self._openpype_version = openpype_version diff --git a/igniter/update_window.py b/igniter/update_window.py index d7908c240b..d51ae18cd0 100644 --- a/igniter/update_window.py +++ b/igniter/update_window.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- """Progress window to show when OpenPype is updating/installing locally.""" import os + +from qtpy import QtCore, QtGui, QtWidgets + from .update_thread import UpdateThread -from Qt import QtCore, QtGui, QtWidgets # noqa from .bootstrap_repos import OpenPypeVersion from .nice_progress_bar import NiceProgressBar from .tools import load_stylesheet @@ -47,7 +49,6 @@ class UpdateWindow(QtWidgets.QDialog): self._update_thread = None - self.resize(QtCore.QSize(self._width, self._height)) self._init_ui() # Set stylesheet @@ -79,6 +80,16 @@ class UpdateWindow(QtWidgets.QDialog): self._progress_bar = progress_bar + def showEvent(self, event): + super().showEvent(event) + current_size = self.size() + new_size = QtCore.QSize( + max(current_size.width(), self._width), + max(current_size.height(), self._height) + ) + if current_size != new_size: + self.resize(new_size) + def _run_update(self): """Start install process. diff --git a/inno_setup.iss b/inno_setup.iss index fa050ef1d6..418bedbd4d 100644 --- a/inno_setup.iss +++ b/inno_setup.iss @@ -14,10 +14,10 @@ AppId={{B9E9DF6A-5BDA-42DD-9F35-C09D564C4D93} AppName={#MyAppName} AppVersion={#AppVer} AppVerName={#MyAppName} version {#AppVer} -AppPublisher=Orbi Tools s.r.o -AppPublisherURL=http://pype.club -AppSupportURL=http://pype.club -AppUpdatesURL=http://pype.club +AppPublisher=Ynput s.r.o +AppPublisherURL=https://ynput.io +AppSupportURL=https://ynput.io +AppUpdatesURL=https://ynput.io DefaultDirName={autopf}\{#MyAppName}\{#AppVer} UsePreviousAppDir=no DisableProgramGroupPage=yes @@ -48,8 +48,8 @@ Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdir ; NOTE: Don't use "Flags: ignoreversion" on any shared system files [Icons] -Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe" -Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon +Name: "{autoprograms}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe" +Name: "{autodesktop}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon [Run] Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent diff --git a/openpype/addons/README.md b/openpype/addons/README.md new file mode 100644 index 0000000000..92b8b8c07c --- /dev/null +++ b/openpype/addons/README.md @@ -0,0 +1,3 @@ +This directory is for storing external addons that needs to be included in the pipeline when distributed. + +The directory is ignored by Git, but included in the zip and installation files. diff --git a/openpype/api.py b/openpype/api.py deleted file mode 100644 index b60cd21d2b..0000000000 --- a/openpype/api.py +++ /dev/null @@ -1,112 +0,0 @@ -from .settings import ( - get_system_settings, - get_project_settings, - get_current_project_settings, - get_anatomy_settings, - - SystemSettings, - ProjectSettings -) -from .lib import ( - PypeLogger, - Logger, - Anatomy, - execute, - run_subprocess, - version_up, - get_asset, - get_workdir_data, - get_version_from_path, - get_last_version_from_path, - get_app_environments_for_context, - source_hash, - get_latest_version, - get_local_site_id, - change_openpype_mongo_url, - create_project_folders, - get_project_basic_paths -) - -from .lib.mongo import ( - get_default_components -) - -from .lib.applications import ( - ApplicationManager -) - -from .lib.avalon_context import ( - BuildWorkfile -) - -from . import resources - -from .plugin import ( - Extractor, - - ValidatePipelineOrder, - ValidateContentsOrder, - ValidateSceneOrder, - ValidateMeshOrder, -) - -# temporary fix, might -from .action import ( - get_errored_instances_from_context, - RepairAction, - RepairContextAction -) - - -__all__ = [ - "get_system_settings", - "get_project_settings", - "get_current_project_settings", - "get_anatomy_settings", - "get_project_basic_paths", - - "SystemSettings", - "ProjectSettings", - - "PypeLogger", - "Logger", - "Anatomy", - "execute", - "get_default_components", - "ApplicationManager", - "BuildWorkfile", - - # Resources - "resources", - - # plugin classes - "Extractor", - # ordering - "ValidatePipelineOrder", - "ValidateContentsOrder", - "ValidateSceneOrder", - "ValidateMeshOrder", - # action - "get_errored_instances_from_context", - "RepairAction", - "RepairContextAction", - - # get contextual data - "version_up", - "get_asset", - "get_workdir_data", - "get_version_from_path", - "get_last_version_from_path", - "get_app_environments_for_context", - "source_hash", - - "run_subprocess", - "get_latest_version", - - "get_local_site_id", - "change_openpype_mongo_url", - - "get_project_basic_paths", - "create_project_folders" - -] diff --git a/openpype/cli.py b/openpype/cli.py index 897106c35f..54af42920d 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -16,14 +16,15 @@ from .pype_commands import PypeCommands @click.option("--use-staging", is_flag=True, expose_value=False, help="use staging variants") @click.option("--list-versions", is_flag=True, expose_value=False, - help=("list all detected versions. Use With `--use-staging " - "to list staging versions.")) + help="list all detected versions.") @click.option("--validate-version", expose_value=False, help="validate given version integrity") @click.option("--debug", is_flag=True, expose_value=False, - help=("Enable debug")) + help="Enable debug") @click.option("--verbose", expose_value=False, help=("Change OpenPype log level (debug - critical or 0-50)")) +@click.option("--automatic-tests", is_flag=True, expose_value=False, + help=("Run in automatic tests mode")) def main(ctx): """Pype is main command serving as entry point to pipeline system. @@ -366,11 +367,15 @@ def run(script): "--timeout", help="Provide specific timeout value for test case", default=None) +@click.option("-so", + "--setup_only", + help="Only create dbs, do not run tests", + default=None) def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant, - timeout): + timeout, setup_only): """Run all automatic tests after proper initialization via start.py""" PypeCommands().run_tests(folder, mark, pyargs, test_data_folder, - persist, app_variant, timeout) + persist, app_variant, timeout, setup_only) @main.command() @@ -410,11 +415,12 @@ def repack_version(directory): @main.command() @click.option("--project", help="Project name") @click.option( - "--dirpath", help="Directory where package is stored", default=None -) -def pack_project(project, dirpath): + "--dirpath", help="Directory where package is stored", default=None) +@click.option( + "--dbonly", help="Store only Database data", default=False, is_flag=True) +def pack_project(project, dirpath, dbonly): """Create a package of project with all files and database dump.""" - PypeCommands().pack_project(project, dirpath) + PypeCommands().pack_project(project, dirpath, dbonly) @main.command() @@ -422,27 +428,27 @@ def pack_project(project, dirpath): @click.option( "--root", help="Replace root which was stored in project", default=None ) -def unpack_project(zipfile, root): +@click.option( + "--dbonly", help="Store only Database data", default=False, is_flag=True) +def unpack_project(zipfile, root, dbonly): """Create a package of project with all files and database dump.""" - PypeCommands().unpack_project(zipfile, root) + PypeCommands().unpack_project(zipfile, root, dbonly) @main.command() def interactive(): - """Interative (Python like) console. + """Interactive (Python like) console. - Helpfull command not only for development to directly work with python + Helpful command not only for development to directly work with python interpreter. Warning: - Executable 'openpype_gui' on windows won't work. + Executable 'openpype_gui' on Windows won't work. """ from openpype.version import __version__ - banner = "OpenPype {}\nPython {} on {}".format( - __version__, sys.version, sys.platform - ) + banner = f"OpenPype {__version__}\nPython {sys.version} on {sys.platform}" code.interact(banner) diff --git a/openpype/client/entities.py b/openpype/client/entities.py index c415be8816..adbdd7a47c 100644 --- a/openpype/client/entities.py +++ b/openpype/client/entities.py @@ -3,7 +3,7 @@ Goal is that most of functions here are called on (or with) an object that has project name as a context (e.g. on 'ProjectEntity'?). -+ We will need more specific functions doing wery specific queires really fast. ++ We will need more specific functions doing very specific queries really fast. """ import re @@ -69,6 +69,19 @@ def convert_ids(in_ids): def get_projects(active=True, inactive=False, fields=None): + """Yield all project entity documents. + + Args: + active (Optional[bool]): Include active projects. Defaults to True. + inactive (Optional[bool]): Include inactive projects. + Defaults to False. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. + + Yields: + dict: Project entity data which can be reduced to specified 'fields'. + None is returned if project with specified filters was not found. + """ mongodb = get_project_database() for project_name in mongodb.collection_names(): if project_name in ("system.indexes",): @@ -81,6 +94,20 @@ def get_projects(active=True, inactive=False, fields=None): def get_project(project_name, active=True, inactive=True, fields=None): + """Return project entity document by project name. + + Args: + project_name (str): Name of project. + active (Optional[bool]): Allow active project. Defaults to True. + inactive (Optional[bool]): Allow inactive project. Defaults to True. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. + + Returns: + Union[Dict, None]: Project entity data which can be reduced to + specified 'fields'. None is returned if project with specified + filters was not found. + """ # Skip if both are disabled if not active and not inactive: return None @@ -124,17 +151,18 @@ def get_whole_project(project_name): def get_asset_by_id(project_name, asset_id, fields=None): - """Receive asset data by it's id. + """Receive asset data by its id. Args: project_name (str): Name of project where to look for queried entities. asset_id (Union[str, ObjectId]): Asset's id. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - dict: Asset entity data. - None: Asset was not found by id. + Union[Dict, None]: Asset entity data which can be reduced to + specified 'fields'. None is returned if asset with specified + filters was not found. """ asset_id = convert_id(asset_id) @@ -147,17 +175,18 @@ def get_asset_by_id(project_name, asset_id, fields=None): def get_asset_by_name(project_name, asset_name, fields=None): - """Receive asset data by it's name. + """Receive asset data by its name. Args: project_name (str): Name of project where to look for queried entities. asset_name (str): Asset's name. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - dict: Asset entity data. - None: Asset was not found by name. + Union[Dict, None]: Asset entity data which can be reduced to + specified 'fields'. None is returned if asset with specified + filters was not found. """ if not asset_name: @@ -193,10 +222,10 @@ def _get_assets( be found. asset_names (Iterable[str]): Name assets that should be found. parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. - standard (bool): Query standart assets (type 'asset'). + standard (bool): Query standard assets (type 'asset'). archived (bool): Query archived assets (type 'archived_asset'). - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor: Query cursor as iterable which returns asset documents matching @@ -261,8 +290,8 @@ def get_assets( asset_names (Iterable[str]): Name assets that should be found. parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. archived (bool): Add also archived assets. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor: Query cursor as iterable which returns asset documents matching @@ -300,8 +329,8 @@ def get_archived_assets( be found. asset_names (Iterable[str]): Name assets that should be found. parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor: Query cursor as iterable which returns asset documents matching @@ -356,17 +385,18 @@ def get_asset_ids_with_subsets(project_name, asset_ids=None): def get_subset_by_id(project_name, subset_id, fields=None): - """Single subset entity data by it's id. + """Single subset entity data by its id. Args: project_name (str): Name of project where to look for queried entities. subset_id (Union[str, ObjectId]): Id of subset which should be found. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If subset with specified filters was not found. - Dict: Subset document which can be reduced to specified 'fields'. + Union[Dict, None]: Subset entity data which can be reduced to + specified 'fields'. None is returned if subset with specified + filters was not found. """ subset_id = convert_id(subset_id) @@ -379,20 +409,19 @@ def get_subset_by_id(project_name, subset_id, fields=None): def get_subset_by_name(project_name, subset_name, asset_id, fields=None): - """Single subset entity data by it's name and it's version id. + """Single subset entity data by its name and its version id. Args: project_name (str): Name of project where to look for queried entities. subset_name (str): Name of subset. asset_id (Union[str, ObjectId]): Id of parent asset. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - Union[None, Dict[str, Any]]: None if subset with specified filters was - not found or dict subset document which can be reduced to - specified 'fields'. - + Union[Dict, None]: Subset entity data which can be reduced to + specified 'fields'. None is returned if subset with specified + filters was not found. """ if not subset_name: return None @@ -434,8 +463,8 @@ def get_subsets( names_by_asset_ids (dict[ObjectId, List[str]]): Complex filtering using asset ids and list of subset names under the asset. archived (bool): Look for archived subsets too. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor: Iterable cursor yielding all matching subsets. @@ -520,17 +549,18 @@ def get_subset_families(project_name, subset_ids=None): def get_version_by_id(project_name, version_id, fields=None): - """Single version entity data by it's id. + """Single version entity data by its id. Args: project_name (str): Name of project where to look for queried entities. version_id (Union[str, ObjectId]): Id of version which should be found. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If version with specified filters was not found. - Dict: Version document which can be reduced to specified 'fields'. + Union[Dict, None]: Version entity data which can be reduced to + specified 'fields'. None is returned if version with specified + filters was not found. """ version_id = convert_id(version_id) @@ -546,18 +576,19 @@ def get_version_by_id(project_name, version_id, fields=None): def get_version_by_name(project_name, version, subset_id, fields=None): - """Single version entity data by it's name and subset id. + """Single version entity data by its name and subset id. Args: project_name (str): Name of project where to look for queried entities. - version (int): name of version entity (it's version). + version (int): name of version entity (its version). subset_id (Union[str, ObjectId]): Id of version which should be found. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If version with specified filters was not found. - Dict: Version document which can be reduced to specified 'fields'. + Union[Dict, None]: Version entity data which can be reduced to + specified 'fields'. None is returned if version with specified + filters was not found. """ subset_id = convert_id(subset_id) @@ -574,7 +605,7 @@ def get_version_by_name(project_name, version, subset_id, fields=None): def version_is_latest(project_name, version_id): - """Is version the latest from it's subset. + """Is version the latest from its subset. Note: Hero versions are considered as latest. @@ -680,8 +711,8 @@ def get_versions( versions (Iterable[int]): Version names (as integers). Filter ignored if 'None' is passed. hero (bool): Look also for hero versions. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor: Iterable cursor yielding all matching versions. @@ -705,12 +736,13 @@ def get_hero_version_by_subset_id(project_name, subset_id, fields=None): project_name (str): Name of project where to look for queried entities. subset_id (Union[str, ObjectId]): Subset id under which is hero version. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If hero version for passed subset id does not exists. - Dict: Hero version entity data. + Union[Dict, None]: Hero version entity data which can be reduced to + specified 'fields'. None is returned if hero version with specified + filters was not found. """ subset_id = convert_id(subset_id) @@ -730,17 +762,18 @@ def get_hero_version_by_subset_id(project_name, subset_id, fields=None): def get_hero_version_by_id(project_name, version_id, fields=None): - """Hero version by it's id. + """Hero version by its id. Args: project_name (str): Name of project where to look for queried entities. version_id (Union[str, ObjectId]): Hero version id. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If hero version with passed id was not found. - Dict: Hero version entity data. + Union[Dict, None]: Hero version entity data which can be reduced to + specified 'fields'. None is returned if hero version with specified + filters was not found. """ version_id = convert_id(version_id) @@ -773,8 +806,8 @@ def get_hero_versions( should look for hero versions. Filter ignored if 'None' is passed. version_ids (Iterable[Union[str, ObjectId]]): Hero version ids. Filter ignored if 'None' is passed. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor|list: Iterable yielding hero versions matching passed filters. @@ -801,8 +834,8 @@ def get_output_link_versions(project_name, version_id, fields=None): project_name (str): Name of project where to look for queried entities. version_id (Union[str, ObjectId]): Version id which can be used as input link for other versions. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Iterable: Iterable cursor yielding versions that are used as input @@ -822,14 +855,15 @@ def get_output_link_versions(project_name, version_id, fields=None): return conn.find(query_filter, _prepare_fields(fields)) -def get_last_versions(project_name, subset_ids, fields=None): +def get_last_versions(project_name, subset_ids, active=None, fields=None): """Latest versions for entered subset_ids. Args: project_name (str): Name of project where to look for queried entities. subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + active (Optional[bool]): If True only active versions are returned. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: dict[ObjectId, int]: Key is subset id and value is last version name. @@ -866,12 +900,21 @@ def get_last_versions(project_name, subset_ids, fields=None): if name_needed: group_item["name"] = {"$last": "$name"} + aggregate_filter = { + "type": "version", + "parent": {"$in": subset_ids} + } + if active is False: + aggregate_filter["data.active"] = active + elif active is True: + aggregate_filter["$or"] = [ + {"data.active": {"$exists": 0}}, + {"data.active": active}, + ] + aggregation_pipeline = [ # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, + {"$match": aggregate_filter}, # Sorting versions all together {"$sort": {"name": 1}}, # Group them by "parent", but only take the last @@ -913,12 +956,13 @@ def get_last_version_by_subset_id(project_name, subset_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. subset_id (Union[str, ObjectId]): Id of version which should be found. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If version with specified filters was not found. - Dict: Version document which can be reduced to specified 'fields'. + Union[Dict, None]: Version entity data which can be reduced to + specified 'fields'. None is returned if version with specified + filters was not found. """ subset_id = convert_id(subset_id) @@ -945,12 +989,13 @@ def get_last_version_by_subset_name( asset_id (Union[str, ObjectId]): Asset id which is parent of passed subset name. asset_name (str): Asset name which is parent of passed subset name. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If version with specified filters was not found. - Dict: Version document which can be reduced to specified 'fields'. + Union[Dict, None]: Version entity data which can be reduced to + specified 'fields'. None is returned if version with specified + filters was not found. """ if not asset_id and not asset_name: @@ -972,18 +1017,18 @@ def get_last_version_by_subset_name( def get_representation_by_id(project_name, representation_id, fields=None): - """Representation entity data by it's id. + """Representation entity data by its id. Args: project_name (str): Name of project where to look for queried entities. representation_id (Union[str, ObjectId]): Representation id. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If representation with specified filters was not found. - Dict: Representation entity data which can be reduced - to specified 'fields'. + Union[Dict, None]: Representation entity data which can be reduced to + specified 'fields'. None is returned if representation with + specified filters was not found. """ if not representation_id: @@ -1004,19 +1049,19 @@ def get_representation_by_id(project_name, representation_id, fields=None): def get_representation_by_name( project_name, representation_name, version_id, fields=None ): - """Representation entity data by it's name and it's version id. + """Representation entity data by its name and its version id. Args: project_name (str): Name of project where to look for queried entities. representation_name (str): Representation name. version_id (Union[str, ObjectId]): Id of parent version entity. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If representation with specified filters was not found. - Dict: Representation entity data which can be reduced - to specified 'fields'. + Union[dict[str, Any], None]: Representation entity data which can be + reduced to specified 'fields'. None is returned if representation + with specified filters was not found. """ version_id = convert_id(version_id) @@ -1185,7 +1230,7 @@ def get_representations( standard=True, fields=None ): - """Representaion entities data from one project filtered by filters. + """Representation entities data from one project filtered by filters. Filters are additive (all conditions must pass to return subset). @@ -1202,8 +1247,8 @@ def get_representations( names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering using version ids and list of names under the version. archived (bool): Output will also contain archived representations. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor: Iterable cursor yielding all matching representations. @@ -1216,7 +1261,7 @@ def get_representations( version_ids=version_ids, context_filters=context_filters, names_by_version_ids=names_by_version_ids, - standard=True, + standard=standard, archived=archived, fields=fields ) @@ -1231,7 +1276,7 @@ def get_archived_representations( names_by_version_ids=None, fields=None ): - """Archived representaion entities data from project with applied filters. + """Archived representation entities data from project with applied filters. Filters are additive (all conditions must pass to return subset). @@ -1247,8 +1292,8 @@ def get_archived_representations( representation context fields. names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering using version ids and list of names under the version. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: Cursor: Iterable cursor yielding all matching representations. @@ -1377,8 +1422,8 @@ def get_thumbnail_id_from_source(project_name, src_type, src_id): src_id (Union[str, ObjectId]): Id of source entity. Returns: - ObjectId: Thumbnail id assigned to entity. - None: If Source entity does not have any thumbnail id assigned. + Union[ObjectId, None]: Thumbnail id assigned to entity. If Source + entity does not have any thumbnail id assigned. """ if not src_type or not src_id: @@ -1397,14 +1442,14 @@ def get_thumbnails(project_name, thumbnail_ids, fields=None): """Receive thumbnails entity data. Thumbnail entity can be used to receive binary content of thumbnail based - on it's content and ThumbnailResolvers. + on its content and ThumbnailResolvers. Args: project_name (str): Name of project where to look for queried entities. thumbnail_ids (Iterable[Union[str, ObjectId]]): Ids of thumbnail entities. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: cursor: Cursor of queried documents. @@ -1429,12 +1474,13 @@ def get_thumbnail(project_name, thumbnail_id, fields=None): Args: project_name (str): Name of project where to look for queried entities. thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. Returns: - None: If thumbnail with specified id was not found. - Dict: Thumbnail entity data which can be reduced to specified 'fields'. + Union[Dict, None]: Thumbnail entity data which can be reduced to + specified 'fields'.None is returned if thumbnail with specified + filters was not found. """ if not thumbnail_id: @@ -1458,8 +1504,13 @@ def get_workfile_info( project_name (str): Name of project where to look for queried entities. asset_id (Union[str, ObjectId]): Id of asset entity. task_name (str): Task name on asset. - fields (Iterable[str]): Fields that should be returned. All fields are - returned if 'None' is passed. + fields (Optional[Iterable[str]]): Fields that should be returned. All + fields are returned if 'None' is passed. + + Returns: + Union[Dict, None]: Workfile entity data which can be reduced to + specified 'fields'.None is returned if workfile with specified + filters was not found. """ if not asset_id or not task_name or not filename: diff --git a/openpype/client/entity_links.py b/openpype/client/entity_links.py index e42ac58aff..b74b4ce7f6 100644 --- a/openpype/client/entity_links.py +++ b/openpype/client/entity_links.py @@ -164,7 +164,6 @@ def get_linked_representation_id( # Recursive graph lookup for inputs {"$graphLookup": graph_lookup} ] - conn = get_project_connection(project_name) result = conn.aggregate(query_pipeline) referenced_version_ids = _process_referenced_pipeline_result( @@ -213,7 +212,7 @@ def _process_referenced_pipeline_result(result, link_type): for output in sorted(outputs_recursive, key=lambda o: o["depth"]): output_links = output.get("data", {}).get("inputLinks") - if not output_links: + if not output_links and output["type"] != "hero_version": continue # Leaf @@ -232,6 +231,9 @@ def _process_referenced_pipeline_result(result, link_type): def _filter_input_links(input_links, link_type, correctly_linked_ids): + if not input_links: # to handle hero versions + return + for input_link in input_links: if link_type and input_link["type"] != link_type: continue diff --git a/openpype/client/mongo.py b/openpype/client/mongo.py index 72acbc5476..251041c028 100644 --- a/openpype/client/mongo.py +++ b/openpype/client/mongo.py @@ -5,6 +5,12 @@ import logging import pymongo import certifi +from bson.json_util import ( + loads, + dumps, + CANONICAL_JSON_OPTIONS +) + if sys.version_info[0] == 2: from urlparse import urlparse, parse_qs else: @@ -15,6 +21,49 @@ class MongoEnvNotSet(Exception): pass +def documents_to_json(docs): + """Convert documents to json string. + + Args: + Union[list[dict[str, Any]], dict[str, Any]]: Document/s to convert to + json string. + + Returns: + str: Json string with mongo documents. + """ + + return dumps(docs, json_options=CANONICAL_JSON_OPTIONS) + + +def load_json_file(filepath): + """Load mongo documents from a json file. + + Args: + filepath (str): Path to a json file. + + Returns: + Union[dict[str, Any], list[dict[str, Any]]]: Loaded content from a + json file. + """ + + if not os.path.exists(filepath): + raise ValueError("Path {} was not found".format(filepath)) + + with open(filepath, "r") as stream: + content = stream.read() + return loads("".join(content)) + + +def get_project_database_name(): + """Name of database name where projects are available. + + Returns: + str: Name of database name where projects are. + """ + + return os.environ.get("AVALON_DB") or "avalon" + + def _decompose_url(url): """Decompose mongo url to basic components. @@ -210,12 +259,102 @@ class OpenPypeMongoConnection: return mongo_client -def get_project_database(): - db_name = os.environ.get("AVALON_DB") or "avalon" - return OpenPypeMongoConnection.get_mongo_client()[db_name] +# ------ Helper Mongo functions ------ +# Functions can be helpful with custom tools to backup/restore mongo state. +# Not meant as API functionality that should be used in production codebase! +def get_collection_documents(database_name, collection_name, as_json=False): + """Query all documents from a collection. + + Args: + database_name (str): Name of database where to look for collection. + collection_name (str): Name of collection where to look for collection. + as_json (Optional[bool]): Output should be a json string. + Default: 'False' + + Returns: + Union[list[dict[str, Any]], str]: Queried documents. + """ + + client = OpenPypeMongoConnection.get_mongo_client() + output = list(client[database_name][collection_name].find({})) + if as_json: + output = documents_to_json(output) + return output -def get_project_connection(project_name): +def store_collection(filepath, database_name, collection_name): + """Store collection documents to a json file. + + Args: + filepath (str): Path to a json file where documents will be stored. + database_name (str): Name of database where to look for collection. + collection_name (str): Name of collection to store. + """ + + # Make sure directory for output file exists + dirpath = os.path.dirname(filepath) + if not os.path.isdir(dirpath): + os.makedirs(dirpath) + + content = get_collection_documents(database_name, collection_name, True) + with open(filepath, "w") as stream: + stream.write(content) + + +def replace_collection_documents(docs, database_name, collection_name): + """Replace all documents in a collection with passed documents. + + Warnings: + All existing documents in collection will be removed if there are any. + + Args: + docs (list[dict[str, Any]]): New documents. + database_name (str): Name of database where to look for collection. + collection_name (str): Name of collection where new documents are + uploaded. + """ + + client = OpenPypeMongoConnection.get_mongo_client() + database = client[database_name] + if collection_name in database.list_collection_names(): + database.drop_collection(collection_name) + col = database[collection_name] + col.insert_many(docs) + + +def restore_collection(filepath, database_name, collection_name): + """Restore/replace collection from a json filepath. + + Warnings: + All existing documents in collection will be removed if there are any. + + Args: + filepath (str): Path to a json with documents. + database_name (str): Name of database where to look for collection. + collection_name (str): Name of collection where new documents are + uploaded. + """ + + docs = load_json_file(filepath) + replace_collection_documents(docs, database_name, collection_name) + + +def get_project_database(database_name=None): + """Database object where project collections are. + + Args: + database_name (Optional[str]): Custom name of database. + + Returns: + pymongo.database.Database: Collection related to passed project. + """ + + if not database_name: + database_name = get_project_database_name() + return OpenPypeMongoConnection.get_mongo_client()[database_name] + + +def get_project_connection(project_name, database_name=None): """Direct access to mongo collection. We're trying to avoid using direct access to mongo. This should be used @@ -223,13 +362,83 @@ def get_project_connection(project_name): api calls for that. Args: - project_name(str): Project name for which collection should be + project_name (str): Project name for which collection should be returned. + database_name (Optional[str]): Custom name of database. Returns: - pymongo.Collection: Collection realated to passed project. + pymongo.collection.Collection: Collection related to passed project. """ if not project_name: raise ValueError("Invalid project name {}".format(str(project_name))) - return get_project_database()[project_name] + return get_project_database(database_name)[project_name] + + +def get_project_documents(project_name, database_name=None): + """Query all documents from project collection. + + Args: + project_name (str): Name of project. + database_name (Optional[str]): Name of mongo database where to look for + project. + + Returns: + list[dict[str, Any]]: Documents in project collection. + """ + + if not database_name: + database_name = get_project_database_name() + return get_collection_documents(database_name, project_name) + + +def store_project_documents(project_name, filepath, database_name=None): + """Store project documents to a file as json string. + + Args: + project_name (str): Name of project to store. + filepath (str): Path to a json file where output will be stored. + database_name (Optional[str]): Name of mongo database where to look for + project. + """ + + if not database_name: + database_name = get_project_database_name() + + store_collection(filepath, database_name, project_name) + + +def replace_project_documents(project_name, docs, database_name=None): + """Replace documents in mongo with passed documents. + + Warnings: + Existing project collection is removed if exists in mongo. + + Args: + project_name (str): Name of project. + docs (list[dict[str, Any]]): Documents to restore. + database_name (Optional[str]): Name of mongo database where project + collection will be created. + """ + + if not database_name: + database_name = get_project_database_name() + replace_collection_documents(docs, database_name, project_name) + + +def restore_project_documents(project_name, filepath, database_name=None): + """Replace documents in mongo with passed documents. + + Warnings: + Existing project collection is removed if exists in mongo. + + Args: + project_name (str): Name of project. + filepath (str): File to json file with project documents. + database_name (Optional[str]): Name of mongo database where project + collection will be created. + """ + + if not database_name: + database_name = get_project_database_name() + restore_collection(filepath, database_name, project_name) diff --git a/openpype/client/notes.md b/openpype/client/notes.md index a261b86eca..59743892eb 100644 --- a/openpype/client/notes.md +++ b/openpype/client/notes.md @@ -2,7 +2,7 @@ ## Reason Preparation for OpenPype v4 server. Goal is to remove direct mongo calls in code to prepare a little bit for different source of data for code before. To start think about database calls less as mongo calls but more universally. To do so was implemented simple wrapper around database calls to not use pymongo specific code. -Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tighly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state. +Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tightly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state. ## Queries Query functions don't use full potential of mongo queries like very specific queries based on subdictionaries or unknown structures. We try to avoid these calls as much as possible because they'll probably won't be available in future. If it's really necessary a new function can be added but only if it's reasonable for overall logic. All query functions were moved to `~/client/entities.py`. Each function has arguments with available filters and possible reduce of returned keys for each entity. @@ -14,7 +14,7 @@ Changes are a little bit complicated. Mongo has many options how update can happ Create operations expect already prepared document data, for that are prepared functions creating skeletal structures of documents (do not fill all required data), except `_id` all data should be right. Existence of entity is not validated so if the same creation operation is send n times it will create the entity n times which can cause issues. ### Update -Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare__update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementaion. +Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare__update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementation. ### Delete Delete operation need entity id. Entity will be deleted from mongo. diff --git a/openpype/client/operations.py b/openpype/client/operations.py index fd639c34a7..ef48f2a1c4 100644 --- a/openpype/client/operations.py +++ b/openpype/client/operations.py @@ -368,7 +368,7 @@ def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): class AbstractOperation(object): """Base operation class. - Opration represent a call into database. The call can create, change or + Operation represent a call into database. The call can create, change or remove data. Args: @@ -409,7 +409,7 @@ class AbstractOperation(object): pass def to_data(self): - """Convert opration to data that can be converted to json or others. + """Convert operation to data that can be converted to json or others. Warning: Current state returns ObjectId objects which cannot be parsed by @@ -428,7 +428,7 @@ class AbstractOperation(object): class CreateOperation(AbstractOperation): - """Opeartion to create an entity. + """Operation to create an entity. Args: project_name (str): On which project operation will happen. @@ -485,7 +485,7 @@ class CreateOperation(AbstractOperation): class UpdateOperation(AbstractOperation): - """Opeartion to update an entity. + """Operation to update an entity. Args: project_name (str): On which project operation will happen. @@ -552,7 +552,7 @@ class UpdateOperation(AbstractOperation): class DeleteOperation(AbstractOperation): - """Opeartion to delete an entity. + """Operation to delete an entity. Args: project_name (str): On which project operation will happen. diff --git a/openpype/hooks/pre_add_last_workfile_arg.py b/openpype/hooks/pre_add_last_workfile_arg.py index 3609620917..c54acbc203 100644 --- a/openpype/hooks/pre_add_last_workfile_arg.py +++ b/openpype/hooks/pre_add_last_workfile_arg.py @@ -1,4 +1,5 @@ import os + from openpype.lib import PreLaunchHook @@ -13,6 +14,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook): # Execute after workfile template copy order = 10 app_groups = [ + "3dsmax", "maya", "nuke", "nukex", @@ -23,6 +25,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook): "blender", "photoshop", "tvpaint", + "substancepainter", "aftereffects" ] diff --git a/openpype/hooks/pre_create_extra_workdir_folders.py b/openpype/hooks/pre_create_extra_workdir_folders.py index c5af620c87..8856281120 100644 --- a/openpype/hooks/pre_create_extra_workdir_folders.py +++ b/openpype/hooks/pre_create_extra_workdir_folders.py @@ -3,10 +3,13 @@ from openpype.lib import PreLaunchHook from openpype.pipeline.workfile import create_workdir_extra_folders -class AddLastWorkfileToLaunchArgs(PreLaunchHook): - """Add last workfile path to launch arguments. +class CreateWorkdirExtraFolders(PreLaunchHook): + """Create extra folders for the work directory. + + Based on setting `project_settings/global/tools/Workfiles/extra_folders` + profile filtering will decide whether extra folders need to be created in + the work directory. - This is not possible to do for all applications the same way. """ # Execute after workfile template copy diff --git a/openpype/hooks/pre_foundry_apps.py b/openpype/hooks/pre_foundry_apps.py index 85f68c6b60..21ec8e7881 100644 --- a/openpype/hooks/pre_foundry_apps.py +++ b/openpype/hooks/pre_foundry_apps.py @@ -7,18 +7,18 @@ class LaunchFoundryAppsWindows(PreLaunchHook): Nuke is executed "like" python process so it is required to pass `CREATE_NEW_CONSOLE` flag on windows to trigger creation of new console. - At the same time the newly created console won't create it's own stdout + At the same time the newly created console won't create its own stdout and stderr handlers so they should not be redirected to DEVNULL. """ # Should be as last hook because must change launch arguments to string order = 1000 - app_groups = ["nuke", "nukex", "hiero", "nukestudio"] + app_groups = ["nuke", "nukeassist", "nukex", "hiero", "nukestudio"] platforms = ["windows"] def execute(self): # Change `creationflags` to CREATE_NEW_CONSOLE - # - on Windows will nuke create new window using it's console + # - on Windows nuke will create new window using its console # Set `stdout` and `stderr` to None so new created console does not # have redirected output to DEVNULL in build self.launch_context.kwargs.update({ diff --git a/openpype/hooks/pre_ocio_hook.py b/openpype/hooks/pre_ocio_hook.py new file mode 100644 index 0000000000..8f462665bc --- /dev/null +++ b/openpype/hooks/pre_ocio_hook.py @@ -0,0 +1,53 @@ +from openpype.lib import PreLaunchHook + +from openpype.pipeline.colorspace import ( + get_imageio_config +) +from openpype.pipeline.template_data import get_template_data_with_names + + +class OCIOEnvHook(PreLaunchHook): + """Set OCIO environment variable for hosts that use OpenColorIO.""" + + order = 0 + hosts = [ + "substancepainter", + "fusion", + "blender", + "aftereffects", + "max", + "houdini", + "maya", + "nuke", + "hiero", + "resolve" + ] + + def execute(self): + """Hook entry method.""" + + template_data = get_template_data_with_names( + project_name=self.data["project_name"], + asset_name=self.data["asset_name"], + task_name=self.data["task_name"], + host_name=self.host_name, + system_settings=self.data["system_settings"] + ) + + config_data = get_imageio_config( + project_name=self.data["project_name"], + host_name=self.host_name, + project_settings=self.data["project_settings"], + anatomy_data=template_data, + anatomy=self.data["anatomy"] + ) + + if config_data: + ocio_path = config_data["path"] + + self.log.info( + f"Setting OCIO environment to config path: {ocio_path}") + + self.launch_context.env["OCIO"] = ocio_path + else: + self.log.debug("OCIO not set or enabled") diff --git a/openpype/host/dirmap.py b/openpype/host/dirmap.py index 88d68f27bf..42bf80ecec 100644 --- a/openpype/host/dirmap.py +++ b/openpype/host/dirmap.py @@ -2,12 +2,13 @@ Idea for current dirmap implementation was used from Maya where is possible to enter source and destination roots and maya will try each found source -in referenced file replace with each destionation paths. First path which +in referenced file replace with each destination paths. First path which exists is used. """ import os from abc import ABCMeta, abstractmethod +import platform import six @@ -38,7 +39,6 @@ class HostDirmap(object): self._project_settings = project_settings self._sync_module = sync_module # to limit reinit of Modules self._log = None - self._mapping = None # cache mapping @property def sync_module(self): @@ -69,29 +69,28 @@ class HostDirmap(object): """Run host dependent remapping from source_path to destination_path""" pass - def process_dirmap(self): + def process_dirmap(self, mapping=None): # type: (dict) -> None """Go through all paths in Settings and set them using `dirmap`. If artists has Site Sync enabled, take dirmap mapping directly from Local Settings when artist is syncing workfile locally. - Args: - project_settings (dict): Settings for current project. """ - if not self._mapping: - self._mapping = self.get_mappings(self.project_settings) - if not self._mapping: + if not mapping: + mapping = self.get_mappings() + if not mapping: return - self.log.info("Processing directory mapping ...") self.on_enable_dirmap() - self.log.info("mapping:: {}".format(self._mapping)) - for k, sp in enumerate(self._mapping["source-path"]): - dst = self._mapping["destination-path"][k] + for k, sp in enumerate(mapping["source-path"]): + dst = mapping["destination-path"][k] try: + # add trailing slash if missing + sp = os.path.join(sp, '') + dst = os.path.join(dst, '') print("{} -> {}".format(sp, dst)) self.dirmap_routine(sp, dst) except IndexError: @@ -109,28 +108,24 @@ class HostDirmap(object): ) continue - def get_mappings(self, project_settings): + def get_mappings(self): """Get translation from source-path to destination-path. It checks if Site Sync is enabled and user chose to use local site, in that case configuration in Local Settings takes precedence """ - local_mapping = self._get_local_sync_dirmap(project_settings) dirmap_label = "{}-dirmap".format(self.host_name) - if ( - not self.project_settings[self.host_name].get(dirmap_label) - and not local_mapping - ): - return {} - mapping_settings = self.project_settings[self.host_name][dirmap_label] - mapping_enabled = mapping_settings["enabled"] or bool(local_mapping) + mapping_sett = self.project_settings[self.host_name].get(dirmap_label, + {}) + local_mapping = self._get_local_sync_dirmap() + mapping_enabled = mapping_sett.get("enabled") or bool(local_mapping) if not mapping_enabled: return {} mapping = ( local_mapping - or mapping_settings["paths"] + or mapping_sett["paths"] or {} ) @@ -140,28 +135,27 @@ class HostDirmap(object): or not mapping.get("source-path") ): return {} + self.log.info("Processing directory mapping ...") + self.log.info("mapping:: {}".format(mapping)) return mapping - def _get_local_sync_dirmap(self, project_settings): + def _get_local_sync_dirmap(self): """ Returns dirmap if synch to local project is enabled. Only valid mapping is from roots of remote site to local site set in Local Settings. - Args: - project_settings (dict) Returns: dict : { "source-path": [XXX], "destination-path": [YYYY]} """ + project_name = os.getenv("AVALON_PROJECT") mapping = {} - - if not project_settings["global"]["sync_server"]["enabled"]: + if (not self.sync_module.enabled or + project_name not in self.sync_module.get_enabled_projects()): return mapping - project_name = os.getenv("AVALON_PROJECT") - active_site = self.sync_module.get_local_normalized_site( self.sync_module.get_active_site(project_name)) remote_site = self.sync_module.get_local_normalized_site( @@ -170,11 +164,7 @@ class HostDirmap(object): "active {} - remote {}".format(active_site, remote_site) ) - if ( - active_site == "local" - and project_name in self.sync_module.get_enabled_projects() - and active_site != remote_site - ): + if active_site == "local" and active_site != remote_site: sync_settings = self.sync_module.get_sync_project_setting( project_name, exclude_locals=False, @@ -187,11 +177,27 @@ class HostDirmap(object): self.log.debug("local overrides {}".format(active_overrides)) self.log.debug("remote overrides {}".format(remote_overrides)) + + current_platform = platform.system().lower() + remote_provider = self.sync_module.get_provider_for_site( + project_name, remote_site + ) + # dirmap has sense only with regular disk provider, in the workfile + # won't be root on cloud or sftp provider + if remote_provider != "local_drive": + remote_site = "studio" for root_name, active_site_dir in active_overrides.items(): remote_site_dir = ( remote_overrides.get(root_name) or sync_settings["sites"][remote_site]["root"][root_name] ) + + if isinstance(remote_site_dir, dict): + remote_site_dir = remote_site_dir.get(current_platform) + + if not remote_site_dir: + continue + if os.path.isdir(active_site_dir): if "destination-path" not in mapping: mapping["destination-path"] = [] diff --git a/openpype/host/host.py b/openpype/host/host.py index 94416bb39a..630fb873a8 100644 --- a/openpype/host/host.py +++ b/openpype/host/host.py @@ -1,3 +1,4 @@ +import os import logging import contextlib from abc import ABCMeta, abstractproperty @@ -17,7 +18,7 @@ class HostBase(object): Compared to 'avalon' concept: What was before considered as functions in host implementation folder. The host implementation should primarily care about adding ability of creation - (mark subsets to be published) and optionaly about referencing published + (mark subsets to be published) and optionally about referencing published representations as containers. Host may need extend some functionality like working with workfiles @@ -100,30 +101,49 @@ class HostBase(object): pass + def get_current_project_name(self): + """ + Returns: + Union[str, None]: Current project name. + """ + + return os.environ.get("AVALON_PROJECT") + + def get_current_asset_name(self): + """ + Returns: + Union[str, None]: Current asset name. + """ + + return os.environ.get("AVALON_ASSET") + + def get_current_task_name(self): + """ + Returns: + Union[str, None]: Current task name. + """ + + return os.environ.get("AVALON_TASK") + def get_current_context(self): """Get current context information. This method should be used to get current context of host. Usage of - this method can be crutial for host implementations in DCCs where + this method can be crucial for host implementations in DCCs where can be opened multiple workfiles at one moment and change of context - can't be catched properly. + can't be caught properly. Default implementation returns values from 'legacy_io.Session'. Returns: - dict: Context with 3 keys 'project_name', 'asset_name' and - 'task_name'. All of them can be 'None'. + Dict[str, Union[str, None]]: Context with 3 keys 'project_name', + 'asset_name' and 'task_name'. All of them can be 'None'. """ - from openpype.pipeline import legacy_io - - if legacy_io.is_installed(): - legacy_io.install() - return { - "project_name": legacy_io.Session["AVALON_PROJECT"], - "asset_name": legacy_io.Session["AVALON_ASSET"], - "task_name": legacy_io.Session["AVALON_TASK"] + "project_name": self.get_current_project_name(), + "asset_name": self.get_current_asset_name(), + "task_name": self.get_current_task_name() } def get_context_title(self): diff --git a/openpype/host/interfaces.py b/openpype/host/interfaces.py index 999aefd254..7c6057acf0 100644 --- a/openpype/host/interfaces.py +++ b/openpype/host/interfaces.py @@ -81,7 +81,7 @@ class ILoadHost: @abstractmethod def get_containers(self): - """Retreive referenced containers from scene. + """Retrieve referenced containers from scene. This can be implemented in hosts where referencing can be used. @@ -191,7 +191,7 @@ class IWorkfileHost: @abstractmethod def get_current_workfile(self): - """Retreive path to current opened file. + """Retrieve path to current opened file. Returns: str: Path to file which is currently opened. @@ -220,8 +220,8 @@ class IWorkfileHost: Default implementation keeps workdir untouched. Warnings: - We must handle this modification with more sofisticated way because - this can't be called out of DCC so opening of last workfile + We must handle this modification with more sophisticated way + because this can't be called out of DCC so opening of last workfile (calculated before DCC is launched) is complicated. Also breaking defined work template is not a good idea. Only place where it's really used and can make sense is Maya. There @@ -302,7 +302,7 @@ class IPublishHost: required methods. Returns: - list[str]: Missing method implementations for new publsher + list[str]: Missing method implementations for new publisher workflow. """ diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py index 2ad1255d27..28062cc35d 100644 --- a/openpype/hosts/aftereffects/api/__init__.py +++ b/openpype/hosts/aftereffects/api/__init__.py @@ -4,36 +4,21 @@ Anything that isn't defined here is INTERNAL and unreliable for external use. """ -from .launch_logic import ( +from .ws_stub import ( get_stub, - stub, ) from .pipeline import ( + AfterEffectsHost, ls, - get_asset_settings, - install, - uninstall, - list_instances, - remove_instance, - containerise, - get_context_data, - update_context_data, - get_context_title -) - -from .workio import ( - file_extensions, - has_unsaved_changes, - save_file, - open_file, - current_file, - work_root, + containerise ) from .lib import ( maintained_selection, - get_extension_manifest_path + get_extension_manifest_path, + get_asset_settings, + set_settings ) from .plugin import ( @@ -42,32 +27,18 @@ from .plugin import ( __all__ = [ - # launch_logic + # ws_stub "get_stub", - "stub", # pipeline "ls", - "get_asset_settings", - "install", - "uninstall", - "list_instances", - "remove_instance", "containerise", - "get_context_data", - "update_context_data", - "get_context_title", - - "file_extensions", - "has_unsaved_changes", - "save_file", - "open_file", - "current_file", - "work_root", # lib "maintained_selection", "get_extension_manifest_path", + "get_asset_settings", + "set_settings", # plugin "AfterEffectsLoader" diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/openpype/hosts/aftereffects/api/extension.zxp index 0ed799991e..50fda416f8 100644 Binary files a/openpype/hosts/aftereffects/api/extension.zxp and b/openpype/hosts/aftereffects/api/extension.zxp differ diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml index a39f5781bb..9f65720ef0 100644 --- a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml +++ b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml @@ -1,6 +1,6 @@ - + diff --git a/openpype/hosts/aftereffects/api/extension/index.html b/openpype/hosts/aftereffects/api/extension/index.html index 9e39bf1acc..291965559f 100644 --- a/openpype/hosts/aftereffects/api/extension/index.html +++ b/openpype/hosts/aftereffects/api/extension/index.html @@ -2,7 +2,7 @@ - + @@ -25,11 +25,11 @@ - + - - - + - + - + - + - + $(function() { + $("a#setresolution-button").bind("click", function() { + RPC.call('AfterEffects.setresolution_route').then(function (data) { + }, function (error) { + alert(error); + }); + }); + }); + + + + + + - - + + + - - + @@ -131,6 +143,6 @@ - + - \ No newline at end of file + diff --git a/openpype/hosts/aftereffects/api/extension/js/main.js b/openpype/hosts/aftereffects/api/extension/js/main.js index 2105ea82dc..ffc41f0937 100644 --- a/openpype/hosts/aftereffects/api/extension/js/main.js +++ b/openpype/hosts/aftereffects/api/extension/js/main.js @@ -4,7 +4,7 @@ indent: 4, maxerr: 50 */ var csInterface = new CSInterface(); - + log.warn("script start"); WSRPC.DEBUG = false; @@ -14,7 +14,7 @@ WSRPC.TRACE = false; async function startUp(url){ promis = runEvalScript("getEnv('" + url + "')"); - var res = await promis; + var res = await promis; log.warn("res: " + res); promis = runEvalScript("getEnv('OPENPYPE_DEBUG')"); @@ -56,7 +56,7 @@ function get_extension_version(){ } function main(websocket_url){ - // creates connection to 'websocket_url', registers routes + // creates connection to 'websocket_url', registers routes var default_url = 'ws://localhost:8099/ws/'; if (websocket_url == ''){ @@ -66,7 +66,7 @@ function main(websocket_url){ RPC.connect(); - log.warn("connected"); + log.warn("connected"); RPC.addRoute('AfterEffects.open', function (data) { log.warn('Server called client route "open":', data); @@ -88,7 +88,7 @@ function main(websocket_url){ }); RPC.addRoute('AfterEffects.get_active_document_name', function (data) { - log.warn('Server called client route ' + + log.warn('Server called client route ' + '"get_active_document_name":', data); return runEvalScript("getActiveDocumentName()") .then(function(result){ @@ -98,7 +98,7 @@ function main(websocket_url){ }); RPC.addRoute('AfterEffects.get_active_document_full_name', function (data){ - log.warn('Server called client route ' + + log.warn('Server called client route ' + '"get_active_document_full_name":', data); return runEvalScript("getActiveDocumentFullName()") .then(function(result){ @@ -118,7 +118,7 @@ function main(websocket_url){ }); }); - + RPC.addRoute('AfterEffects.get_selected_items', function (data) { log.warn('Server called client route "get_selected_items":', data); return runEvalScript("getSelectedItems(" + data.comps + "," + @@ -194,23 +194,25 @@ function main(websocket_url){ }); }); - RPC.addRoute('AfterEffects.get_work_area', function (data) { - log.warn('Server called client route "get_work_area":', data); - return runEvalScript("getWorkArea(" + data.item_id + ")") + RPC.addRoute('AfterEffects.get_comp_properties', function (data) { + log.warn('Server called client route "get_comp_properties":', data); + return runEvalScript("getCompProperties(" + data.item_id + ")") .then(function(result){ - log.warn("getWorkArea: " + result); + log.warn("get_comp_properties: " + result); return result; }); }); - RPC.addRoute('AfterEffects.set_work_area', function (data) { + RPC.addRoute('AfterEffects.set_comp_properties', function (data) { log.warn('Server called client route "set_work_area":', data); - return runEvalScript("setWorkArea(" + data.item_id + ',' + + return runEvalScript("setCompProperties(" + data.item_id + ',' + data.start + ',' + data.duration + ',' + - data.frame_rate + ")") + data.frame_rate + ',' + + data.width + ',' + + data.height + ")") .then(function(result){ - log.warn("getWorkArea: " + result); + log.warn("set_comp_properties: " + result); return result; }); }); @@ -237,7 +239,7 @@ function main(websocket_url){ RPC.addRoute('AfterEffects.get_render_info', function (data) { log.warn('Server called client route "get_render_info":', data); - return runEvalScript("getRenderInfo()") + return runEvalScript("getRenderInfo(" + data.comp_id +")") .then(function(result){ log.warn("get_render_info: " + result); return result; @@ -255,7 +257,7 @@ function main(websocket_url){ RPC.addRoute('AfterEffects.import_background', function (data) { log.warn('Server called client route "import_background":', data); - return runEvalScript("importBackground(" + data.comp_id + ", " + + return runEvalScript("importBackground(" + data.comp_id + ", " + "'" + data.comp_name + "', " + JSON.stringify(data.files) + ")") .then(function(result){ @@ -266,7 +268,7 @@ function main(websocket_url){ RPC.addRoute('AfterEffects.reload_background', function (data) { log.warn('Server called client route "reload_background":', data); - return runEvalScript("reloadBackground(" + data.comp_id + ", " + + return runEvalScript("reloadBackground(" + data.comp_id + ", " + "'" + data.comp_name + "', " + JSON.stringify(data.files) + ")") .then(function(result){ @@ -289,7 +291,7 @@ function main(websocket_url){ RPC.addRoute('AfterEffects.render', function (data) { log.warn('Server called client route "render":', data); var escapedPath = EscapeStringForJSX(data.folder_url); - return runEvalScript("render('" + escapedPath +"')") + return runEvalScript("render('" + escapedPath +"', " + data.comp_id + ")") .then(function(result){ log.warn("render: " + result); return result; @@ -314,6 +316,16 @@ function main(websocket_url){ log.warn('Server called client route "close":', data); return runEvalScript("close()"); }); + + RPC.addRoute('AfterEffects.print_msg', function (data) { + log.warn('Server called client route "print_msg":', data); + var escaped_msg = EscapeStringForJSX(data.msg); + return runEvalScript("printMsg('" + escaped_msg +"')") + .then(function(result){ + log.warn("print_msg: " + result); + return result; + }); + }); } /** main entry point **/ @@ -323,17 +335,17 @@ startUp("WEBSOCKET_URL"); 'use strict'; var csInterface = new CSInterface(); - - + + function init() { - + themeManager.init(); - + $("#btn_test").click(function () { csInterface.evalScript('sayHello()'); }); } - + init(); }()); diff --git a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx index 91df433908..7d0b20bbb4 100644 --- a/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx +++ b/openpype/hosts/aftereffects/api/extension/jsx/hostscript.jsx @@ -1,7 +1,7 @@ /*jslint vars: true, plusplus: true, devel: true, nomen: true, regexp: true, indent: 4, maxerr: 50 */ /*global $, Folder*/ -#include "../js/libs/json.js"; +//@include "../js/libs/json.js" /* All public API function should return JSON! */ @@ -29,13 +29,13 @@ function getEnv(variable){ function getMetadata(){ /** * Returns payload in 'Label' field of project's metadata - * + * **/ if (ExternalObject.AdobeXMPScript === undefined){ ExternalObject.AdobeXMPScript = new ExternalObject('lib:AdobeXMPScript'); } - + var proj = app.project; var meta = new XMPMeta(app.project.xmpPacket); var schemaNS = XMPMeta.getNamespaceURI("xmp"); @@ -53,7 +53,7 @@ function getMetadata(){ function imprint(payload){ /** * Stores payload in 'Label' field of project's metadata - * + * * Args: * payload (string): json content */ @@ -61,14 +61,14 @@ function imprint(payload){ ExternalObject.AdobeXMPScript = new ExternalObject('lib:AdobeXMPScript'); } - + var proj = app.project; var meta = new XMPMeta(app.project.xmpPacket); var schemaNS = XMPMeta.getNamespaceURI("xmp"); var label = "xmp:Label"; meta.setProperty(schemaNS, label, payload); - + app.project.xmpPacket = meta.serialize(); } @@ -116,14 +116,14 @@ function getItems(comps, folders, footages){ /** * Returns JSON representation of compositions and * if 'collectLayers' then layers in comps too. - * + * * Args: * comps (bool): return selected compositions * folders (bool): return folders * footages (bool): return FootageItem * Returns: * (list) of JSON items - */ + */ var items = [] for (i = 1; i <= app.project.items.length; ++i){ var item = app.project.items[i]; @@ -142,14 +142,14 @@ function getItems(comps, folders, footages){ function getSelectedItems(comps, folders, footages){ /** * Returns list of selected items from Project menu - * + * * Args: * comps (bool): return selected compositions * folders (bool): return folders * footages (bool): return FootageItem * Returns: * (list) of JSON items - */ + */ var items = [] for (i = 0; i < app.project.selection.length; ++i){ var item = app.project.selection[i]; @@ -166,9 +166,9 @@ function getSelectedItems(comps, folders, footages){ function _getItem(item, comps, folders, footages){ /** - * Auxiliary function as project items and selections + * Auxiliary function as project items and selections * are indexed in different way :/ - * Refactor + * Refactor */ var item_type = ''; if (item instanceof FolderItem){ @@ -189,7 +189,7 @@ function _getItem(item, comps, folders, footages){ return "{}"; } } - + var item = {"name": item.name, "id": item.id, "type": item_type}; @@ -200,7 +200,7 @@ function importFile(path, item_name, import_options){ /** * Imports file (image tested for now) as a FootageItem. * Creates new composition - * + * * Args: * path (string): absolute path to image file * item_name (string): label for composition @@ -218,7 +218,7 @@ function importFile(path, item_name, import_options){ app.beginUndoGroup("Import File"); fp = new File(path); if (fp.exists){ - try { + try { im_opt = new ImportOptions(fp); importAsType = import_options["ImportAsType"]; @@ -234,18 +234,18 @@ function importFile(path, item_name, import_options){ } if (importAsType.indexOf('PROJECT') > 0){ im_opt.importAs = ImportAsType.PROJECT; - } - + } + } if ('sequence' in import_options){ im_opt.sequence = true; } - + comp = app.project.importFile(im_opt); if (app.project.selection.length == 2 && app.project.selection[0] instanceof FolderItem){ - comp.parentFolder = app.project.selection[0] + comp.parentFolder = app.project.selection[0] } } catch (error) { return _prepareError(error.toString() + importOptions.file.fsName); @@ -283,14 +283,14 @@ function setLabelColor(comp_id, color_idx){ function replaceItem(comp_id, path, item_name){ /** * Replaces loaded file with new file and updates name - * + * * Args: * comp_id (int): id of composition, not a index! * path (string): absolute path to new file * item_name (string): new composition name */ app.beginUndoGroup("Replace File"); - + fp = new File(path); if (!fp.exists){ return _prepareError("File " + path + " not found."); @@ -303,7 +303,7 @@ function replaceItem(comp_id, path, item_name){ }else{ item.replace(fp); } - + item.name = item_name; } catch (error) { return _prepareError(error.toString() + path); @@ -319,7 +319,7 @@ function replaceItem(comp_id, path, item_name){ function renameItem(item_id, new_name){ /** * Renames item with 'item_id' to 'new_name' - * + * * Args: * item_id (int): id to search item * new_name (str) @@ -335,7 +335,7 @@ function renameItem(item_id, new_name){ function deleteItem(item_id){ /** * Delete any 'item_id' - * + * * Not restricted only to comp, it could delete * any item with 'id' */ @@ -347,38 +347,76 @@ function deleteItem(item_id){ } } -function getWorkArea(comp_id){ +function getCompProperties(comp_id){ /** - * Returns information about workarea - are that will be - * rendered. All calculation will be done in OpenPype, - * easier to modify without redeploy of extension. - * + * Returns information about composition - are that will be + * rendered. + * * Returns * (dict) */ - var item = app.project.itemByID(comp_id); - if (item){ - return JSON.stringify({ - "workAreaStart": item.displayStartFrame, - "workAreaDuration": item.duration, - "frameRate": item.frameRate}); - }else{ + var comp = app.project.itemByID(comp_id); + if (!comp){ return _prepareError("There is no composition with "+ comp_id); } + + return JSON.stringify({ + "id": comp.id, + "name": comp.name, + "frameStart": comp.displayStartFrame, + "framesDuration": comp.duration * comp.frameRate, + "frameRate": comp.frameRate, + "width": comp.width, + "height": comp.height}); } -function setWorkArea(comp_id, workAreaStart, workAreaDuration, frameRate){ +function setCompProperties(comp_id, frameStart, framesCount, frameRate, + width, height){ /** * Sets work area info from outside (from Ftrack via OpenPype) */ - var item = app.project.itemByID(comp_id); - if (item){ - item.displayStartTime = workAreaStart; - item.duration = workAreaDuration; - item.frameRate = frameRate; - }else{ + var comp = app.project.itemByID(comp_id); + if (!comp){ return _prepareError("There is no composition with "+ comp_id); } + + app.beginUndoGroup('change comp properties'); + if (frameStart && framesCount && frameRate){ + comp.displayStartFrame = frameStart; + comp.duration = framesCount / frameRate; + comp.frameRate = frameRate; + } + if (width && height){ + var widthOld = comp.width; + var widthNew = width; + var widthDelta = widthNew - widthOld; + + var heightOld = comp.height; + var heightNew = height; + var heightDelta = heightNew - heightOld; + + var offset = [widthDelta / 2, heightDelta / 2]; + + comp.width = widthNew; + comp.height = heightNew; + + for (var i = 1, il = comp.numLayers; i <= il; i++) { + var layer = comp.layer(i); + var positionProperty = layer.property('ADBE Transform Group').property('ADBE Position'); + + if (positionProperty.numKeys > 0) { + for (var j = 1, jl = positionProperty.numKeys; j <= jl; j++) { + var keyValue = positionProperty.keyValue(j); + positionProperty.setValueAtKey(j, keyValue + offset); + } + } else { + var positionValue = positionProperty.value; + positionProperty.setValue(positionValue + offset); + } + } + } + + app.endUndoGroup(); } function save(){ @@ -395,41 +433,84 @@ function saveAs(path){ app.project.save(fp = new File(path)); } -function getRenderInfo(){ +function getRenderInfo(comp_id){ /*** Get info from render queue. - Currently pulls only file name to parse extension and + Currently pulls only file name to parse extension and if it is sequence in Python + Args: + comp_id (int): id of composition + Return: + (list) [{file_name:"xx.png", width:00, height:00}] **/ + var item = app.project.itemByID(comp_id); + if (!item){ + return _prepareError("Composition with '" + comp_id + "' wasn't found! Recreate publishable instance(s)") + } + + var comp_name = item.name; + var output_metadata = [] try{ - var render_item = app.project.renderQueue.item(1); - if (render_item.status == RQItemStatus.DONE){ - render_item.duplicate(); // create new, cannot change status if DONE - render_item.remove(); // remove existing to limit duplications - render_item = app.project.renderQueue.item(1); + // render_item.duplicate() should create new item on renderQueue + // BUT it works only sometimes, there are some weird synchronization issue + // this method will be called always before render, so prepare items here + // for render to spare the hassle + for (i = 1; i <= app.project.renderQueue.numItems; ++i){ + var render_item = app.project.renderQueue.item(i); + if (render_item.comp.id != comp_id){ + continue; + } + + if (render_item.status == RQItemStatus.DONE){ + render_item.duplicate(); // create new, cannot change status if DONE + render_item.remove(); // remove existing to limit duplications + continue; + } } - render_item.render = true; // always set render queue to render - var item = render_item.outputModule(1); + // properly validate as `numItems` won't change magically + var comp_id_count = 0; + for (i = 1; i <= app.project.renderQueue.numItems; ++i){ + var render_item = app.project.renderQueue.item(i); + if (render_item.comp.id != comp_id){ + continue; + } + comp_id_count += 1; + var item = render_item.outputModule(1); + + for (j = 1; j<= render_item.numOutputModules; ++j){ + var file_url = item.file.toString(); + output_metadata.push( + JSON.stringify({ + "file_name": file_url, + "width": render_item.comp.width, + "height": render_item.comp.height + }) + ); + } + } } catch (error) { return _prepareError("There is no render queue, create one"); } - var file_url = item.file.toString(); - return JSON.stringify({ - "file_name": file_url, - "width": render_item.comp.width, - "height": render_item.comp.height - }) + if (comp_id_count > 1){ + return _prepareError("There cannot be more items in Render Queue for '" + comp_name + "'!") + } + + if (comp_id_count == 0){ + return _prepareError("There is no item in Render Queue for '" + comp_name + "'! Add composition to Render Queue.") + } + + return '[' + output_metadata.join() + ']'; } function getAudioUrlForComp(comp_id){ /** * Searches composition for audio layer - * + * * Only single AVLayer is expected! * Used for collecting Audio - * + * * Args: * comp_id (int): id of composition * Return: @@ -457,7 +538,7 @@ function addItemAsLayerToComp(comp_id, item_id, found_comp){ /** * Adds already imported FootageItem ('item_id') as a new * layer to composition ('comp_id'). - * + * * Args: * comp_id (int): id of target composition * item_id (int): FootageItem.id @@ -480,17 +561,17 @@ function addItemAsLayerToComp(comp_id, item_id, found_comp){ function importBackground(comp_id, composition_name, files_to_import){ /** * Imports backgrounds images to existing or new composition. - * + * * If comp_id is not provided, new composition is created, basic * values (width, heights, frameRatio) takes from first imported * image. - * + * * Args: * comp_id (int): id of existing composition (null if new) - * composition_name (str): used when new composition + * composition_name (str): used when new composition * files_to_import (list): list of absolute paths to import and * add as layers - * + * * Returns: * (str): json representation (id, name, members) */ @@ -512,7 +593,7 @@ function importBackground(comp_id, composition_name, files_to_import){ } } } - + if (files_to_import){ for (i = 0; i < files_to_import.length; ++i){ item = _importItem(files_to_import[i]); @@ -524,8 +605,8 @@ function importBackground(comp_id, composition_name, files_to_import){ if (!comp){ folder = app.project.items.addFolder(composition_name); imported_ids.push(folder.id); - comp = app.project.items.addComp(composition_name, item.width, - item.height, item.pixelAspect, + comp = app.project.items.addComp(composition_name, item.width, + item.height, item.pixelAspect, 1, 26.7); // hardcode defaults imported_ids.push(comp.id); comp.parentFolder = folder; @@ -534,7 +615,7 @@ function importBackground(comp_id, composition_name, files_to_import){ item.parentFolder = folder; addItemAsLayerToComp(comp.id, item.id, comp); - } + } } var item = {"name": comp.name, "id": folder.id, @@ -545,19 +626,19 @@ function importBackground(comp_id, composition_name, files_to_import){ function reloadBackground(comp_id, composition_name, files_to_import){ /** * Reloads existing composition. - * + * * It deletes complete composition with encompassing folder, recreates * from scratch via 'importBackground' functionality. - * + * * Args: * comp_id (int): id of existing composition (null if new) - * composition_name (str): used when new composition + * composition_name (str): used when new composition * files_to_import (list): list of absolute paths to import and * add as layers - * + * * Returns: * (str): json representation (id, name, members) - * + * */ var imported_ids = []; // keep track of members of composition comp = app.project.itemByID(comp_id); @@ -620,7 +701,7 @@ function reloadBackground(comp_id, composition_name, files_to_import){ function _get_file_name(file_url){ /** * Returns file name without extension from 'file_url' - * + * * Args: * file_url (str): full absolute url * Returns: @@ -635,7 +716,7 @@ function _delete_obsolete_items(folder, new_filenames){ /*** * Goes through 'folder' and removes layers not in new * background - * + * * Args: * folder (FolderItem) * new_filenames (array): list of layer names in new bg @@ -660,14 +741,14 @@ function _delete_obsolete_items(folder, new_filenames){ function _importItem(file_url){ /** * Imports 'file_url' as new FootageItem - * + * * Args: * file_url (str): file url with content * Returns: * (FootageItem) */ file_name = _get_file_name(file_url); - + //importFile prepared previously to return json item_json = importFile(file_url, file_name, JSON.stringify({"ImportAsType":"FOOTAGE"})); item_json = JSON.parse(item_json); @@ -689,30 +770,42 @@ function isFileSequence (item){ return false; } -function render(target_folder){ +function render(target_folder, comp_id){ var out_dir = new Folder(target_folder); var out_dir = out_dir.fsName; for (i = 1; i <= app.project.renderQueue.numItems; ++i){ var render_item = app.project.renderQueue.item(i); - var om1 = app.project.renderQueue.item(i).outputModule(1); - var file_name = File.decode( om1.file.name ).replace('โ„—', ''); // Name contains special character, space? - - var omItem1_settable_str = app.project.renderQueue.item(i).outputModule(1).getSettings( GetSettingsFormat.STRING_SETTABLE ); + var composition = render_item.comp; + if (composition.id == comp_id){ + if (render_item.status == RQItemStatus.DONE){ + var new_item = render_item.duplicate(); + render_item.remove(); + render_item = new_item; + } - if (render_item.status == RQItemStatus.DONE){ - render_item.duplicate(); - render_item.remove(); - continue; + render_item.render = true; + + var om1 = app.project.renderQueue.item(i).outputModule(1); + var file_name = File.decode( om1.file.name ).replace('โ„—', ''); // Name contains special character, space? + + var omItem1_settable_str = app.project.renderQueue.item(i).outputModule(1).getSettings( GetSettingsFormat.STRING_SETTABLE ); + + var targetFolder = new Folder(target_folder); + if (!targetFolder.exists) { + targetFolder.create(); + } + + om1.file = new File(targetFolder.fsName + '/' + file_name); + }else{ + if (render_item.status != RQItemStatus.DONE){ + render_item.render = false; + } } - var targetFolder = new Folder(target_folder); - if (!targetFolder.exists) { - targetFolder.create(); - } - - om1.file = new File(targetFolder.fsName + '/' + file_name); } + app.beginSuppressDialogs(); app.project.renderQueue.render(); + app.endSuppressDialogs(false); } function close(){ @@ -724,6 +817,10 @@ function getAppVersion(){ return _prepareSingleValue(app.version); } +function printMsg(msg){ + alert(msg); +} + function _prepareSingleValue(value){ return JSON.stringify({"result": value}) } diff --git a/openpype/hosts/aftereffects/api/launch_logic.py b/openpype/hosts/aftereffects/api/launch_logic.py index 50675c8482..77c2b0b6ca 100644 --- a/openpype/hosts/aftereffects/api/launch_logic.py +++ b/openpype/hosts/aftereffects/api/launch_logic.py @@ -1,49 +1,77 @@ import os +import sys import subprocess import collections import logging import asyncio import functools +import traceback + from wsrpc_aiohttp import ( WebSocketRoute, WebSocketAsync ) -from qtpy import QtCore +from qtpy import QtCore, QtWidgets from openpype.lib import Logger -from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools +from openpype.tests.lib import is_in_tests +from openpype.pipeline import install_host, legacy_io +from openpype.modules import ModulesManager from openpype.tools.adobe_webserver.app import WebServerTool -from .ws_stub import AfterEffectsServerStub +from .ws_stub import get_stub +from .lib import set_settings log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) -class ConnectionNotEstablishedYet(Exception): - pass +def safe_excepthook(*args): + traceback.print_exception(*args) -def get_stub(): - """ - Convenience function to get server RPC stub to call methods directed - for host (Photoshop). - It expects already created connection, started from client. - Currently created when panel is opened (PS: Window>Extensions>Avalon) - :return: where functions could be called from - """ - ae_stub = AfterEffectsServerStub() - if not ae_stub.client: - raise ConnectionNotEstablishedYet("Connection is not created yet") +def main(*subprocess_args): + """Main entrypoint to AE launching, called from pre hook.""" + sys.excepthook = safe_excepthook - return ae_stub + from openpype.hosts.aftereffects.api import AfterEffectsHost + host = AfterEffectsHost() + install_host(host) -def stub(): - return get_stub() + os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" + app = QtWidgets.QApplication([]) + app.setQuitOnLastWindowClosed(False) + + launcher = ProcessLauncher(subprocess_args) + launcher.start() + + if os.environ.get("HEADLESS_PUBLISH"): + manager = ModulesManager() + webpublisher_addon = manager["webpublisher"] + + launcher.execute_in_main_thread( + functools.partial( + webpublisher_addon.headless_publish, + log, + "CloseAE", + is_in_tests() + ) + ) + + elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True): + save = False + if os.getenv("WORKFILES_SAVE_AS"): + save = True + + launcher.execute_in_main_thread( + lambda: host_tools.show_tool_by_name("workfiles", save=save) + ) + + sys.exit(app.exec_()) def show_tool_by_name(tool_name): @@ -55,6 +83,7 @@ def show_tool_by_name(tool_name): class ProcessLauncher(QtCore.QObject): + """Launches webserver, connects to it, runs main thread.""" route_name = "AfterEffects" _main_thread_callbacks = collections.deque() @@ -284,9 +313,6 @@ class AfterEffectsRoute(WebSocketRoute): return await self.socket.call('aftereffects.read') # panel routes for tools - async def creator_route(self): - self._tool_route("creator") - async def workfiles_route(self): self._tool_route("workfiles") @@ -294,13 +320,19 @@ class AfterEffectsRoute(WebSocketRoute): self._tool_route("loader") async def publish_route(self): - self._tool_route("publish") + self._tool_route("publisher") async def sceneinventory_route(self): self._tool_route("sceneinventory") - async def subsetmanager_route(self): - self._tool_route("subsetmanager") + async def setresolution_route(self): + self._settings_route(False, True) + + async def setframes_route(self): + self._settings_route(True, False) + + async def setall_route(self): + self._settings_route(True, True) async def experimental_tools_route(self): self._tool_route("experimental_tools") @@ -315,3 +347,13 @@ class AfterEffectsRoute(WebSocketRoute): # Required return statement. return "nothing" + + def _settings_route(self, frames, resolution): + partial_method = functools.partial(set_settings, + frames, + resolution) + + ProcessLauncher.execute_in_main_thread(partial_method) + + # Required return statement. + return "nothing" diff --git a/openpype/hosts/aftereffects/api/lib.py b/openpype/hosts/aftereffects/api/lib.py index c738bcba2d..e8352c382b 100644 --- a/openpype/hosts/aftereffects/api/lib.py +++ b/openpype/hosts/aftereffects/api/lib.py @@ -1,67 +1,17 @@ import os -import sys import re import json import contextlib -import traceback import logging -from functools import partial -from qtpy import QtWidgets - -from openpype.pipeline import install_host -from openpype.modules import ModulesManager - -from openpype.tools.utils import host_tools -from .launch_logic import ProcessLauncher, get_stub +from openpype.pipeline.context_tools import get_current_context +from openpype.client import get_asset_by_name +from .ws_stub import get_stub log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) -def safe_excepthook(*args): - traceback.print_exception(*args) - - -def main(*subprocess_args): - sys.excepthook = safe_excepthook - - from openpype.hosts.aftereffects import api - - install_host(api) - - os.environ["OPENPYPE_LOG_NO_COLORS"] = "False" - app = QtWidgets.QApplication([]) - app.setQuitOnLastWindowClosed(False) - - launcher = ProcessLauncher(subprocess_args) - launcher.start() - - if os.environ.get("HEADLESS_PUBLISH"): - manager = ModulesManager() - webpublisher_addon = manager["webpublisher"] - - launcher.execute_in_main_thread( - partial( - webpublisher_addon.headless_publish, - log, - "CloseAE", - os.environ.get("IS_TEST") - ) - ) - - elif os.environ.get("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", True): - save = False - if os.getenv("WORKFILES_SAVE_AS"): - save = True - - launcher.execute_in_main_thread( - lambda: host_tools.show_tool_by_name("workfiles", save=save) - ) - - sys.exit(app.exec_()) - - @contextlib.contextmanager def maintained_selection(): """Maintain selection during context.""" @@ -133,3 +83,78 @@ def get_background_layers(file_url): layer.get("filename")). replace("\\", "/")) return layers + + +def get_asset_settings(asset_doc): + """Get settings on current asset from database. + + Returns: + dict: Scene data. + + """ + asset_data = asset_doc["data"] + fps = asset_data.get("fps", 0) + frame_start = asset_data.get("frameStart", 0) + frame_end = asset_data.get("frameEnd", 0) + handle_start = asset_data.get("handleStart", 0) + handle_end = asset_data.get("handleEnd", 0) + resolution_width = asset_data.get("resolutionWidth", 0) + resolution_height = asset_data.get("resolutionHeight", 0) + duration = (frame_end - frame_start + 1) + handle_start + handle_end + + return { + "fps": fps, + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "duration": duration + } + + +def set_settings(frames, resolution, comp_ids=None, print_msg=True): + """Sets number of frames and resolution to selected comps. + + Args: + frames (bool): True if set frame info + resolution (bool): True if set resolution + comp_ids (list): specific composition ids, if empty + it tries to look for currently selected + print_msg (bool): True throw JS alert with msg + """ + frame_start = frames_duration = fps = width = height = None + current_context = get_current_context() + + asset_doc = get_asset_by_name(current_context["project_name"], + current_context["asset_name"]) + settings = get_asset_settings(asset_doc) + + msg = '' + if frames: + frame_start = settings["frameStart"] - settings["handleStart"] + frames_duration = settings["duration"] + fps = settings["fps"] + msg += f"frame start:{frame_start}, duration:{frames_duration}, "\ + f"fps:{fps}" + if resolution: + width = settings["resolutionWidth"] + height = settings["resolutionHeight"] + msg += f"width:{width} and height:{height}" + + stub = get_stub() + if not comp_ids: + comps = stub.get_selected_items(True, False, False) + comp_ids = [comp.id for comp in comps] + if not comp_ids: + stub.print_msg("Select at least one composition to apply settings.") + return + + for comp_id in comp_ids: + msg = f"Setting for comp {comp_id} " + msg + log.debug(msg) + stub.set_comp_properties(comp_id, frame_start, frames_duration, + fps, width, height) + if print_msg: + stub.print_msg(msg) diff --git a/openpype/hosts/aftereffects/api/pipeline.py b/openpype/hosts/aftereffects/api/pipeline.py index 68a00e30b7..27aee8c7ce 100644 --- a/openpype/hosts/aftereffects/api/pipeline.py +++ b/openpype/hosts/aftereffects/api/pipeline.py @@ -8,15 +8,20 @@ from openpype.lib import Logger, register_event_callback from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, AVALON_CONTAINER_ID, - legacy_io, ) from openpype.pipeline.load import any_outdated_containers import openpype.hosts.aftereffects -from .launch_logic import get_stub, ConnectionNotEstablishedYet +from openpype.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost +) + +from .launch_logic import get_stub +from .ws_stub import ConnectionNotEstablishedYet log = Logger.get_logger(__name__) @@ -30,27 +35,139 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") -def install(): - print("Installing Pype config...") +class AfterEffectsHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "aftereffects" - pyblish.api.register_host("aftereffects") - pyblish.api.register_plugin_path(PUBLISH_PATH) + def __init__(self): + self._stub = None + super(AfterEffectsHost, self).__init__() - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - log.info(PUBLISH_PATH) + @property + def stub(self): + """ + Handle pulling stub from PS to run operations on host + Returns: + (AEServerStub) or None + """ + if self._stub: + return self._stub - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled - ) + try: + stub = get_stub() # only after Photoshop is up + except ConnectionNotEstablishedYet: + print("Not connected yet, ignoring") + return - register_event_callback("application.launched", application_launch) + self._stub = stub + return self._stub + def install(self): + print("Installing Pype config...") -def uninstall(): - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) + pyblish.api.register_host("aftereffects") + pyblish.api.register_plugin_path(PUBLISH_PATH) + + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + log.info(PUBLISH_PATH) + + pyblish.api.register_callback( + "instanceToggled", on_pyblish_instance_toggled + ) + + register_event_callback("application.launched", application_launch) + + def get_workfile_extensions(self): + return [".aep"] + + def save_workfile(self, dst_path=None): + self.stub.saveAs(dst_path, True) + + def open_workfile(self, filepath): + self.stub.open(filepath) + + return True + + def get_current_workfile(self): + try: + full_name = get_stub().get_active_document_full_name() + if full_name and full_name != "null": + return os.path.normpath(full_name).replace("\\", "/") + except ValueError: + print("Nothing opened") + pass + + return None + + def get_containers(self): + return ls() + + def get_context_data(self): + meta = self.stub.get_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + def update_context_data(self, data, changes): + item = data + item["id"] = "publish_context" + self.stub.imprint(item["id"], item) + + # created instances section + def list_instances(self): + """List all created instances from current workfile which + will be published. + + Pulls from File > File Info + + For SubsetManager + + Returns: + (list) of dictionaries matching instances format + """ + stub = self.stub + if not stub: + return [] + + instances = [] + layers_meta = stub.get_metadata() + + for instance in layers_meta: + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) + return instances + + def remove_instance(self, instance): + """Remove instance from current workfile metadata. + + Updates metadata of current file in File > File Info and removes + icon highlight on group layer. + + For SubsetManager + + Args: + instance (dict): instance representation from subsetmanager model + """ + stub = self.stub + + if not stub: + return + + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_item(instance["members"][0]) + if item: + stub.rename_item(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) def application_launch(): @@ -63,35 +180,6 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): instance[0].Visible = new_value -def get_asset_settings(asset_doc): - """Get settings on current asset from database. - - Returns: - dict: Scene data. - - """ - asset_data = asset_doc["data"] - fps = asset_data.get("fps") - frame_start = asset_data.get("frameStart") - frame_end = asset_data.get("frameEnd") - handle_start = asset_data.get("handleStart") - handle_end = asset_data.get("handleEnd") - resolution_width = asset_data.get("resolutionWidth") - resolution_height = asset_data.get("resolutionHeight") - duration = (frame_end - frame_start + 1) + handle_start + handle_end - - return { - "fps": fps, - "frameStart": frame_start, - "frameEnd": frame_end, - "handleStart": handle_start, - "handleEnd": handle_end, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "duration": duration - } - - def ls(): """Yields containers from active AfterEffects document. @@ -191,102 +279,17 @@ def containerise(name, return comp -# created instances section -def list_instances(): - """ - List all created instances from current workfile which - will be published. +def cache_and_get_instances(creator): + """Cache instances in shared data. - Pulls from File > File Info - - For SubsetManager - - Returns: - (list) of dictionaries matching instances format - """ - stub = _get_stub() - if not stub: - return [] - - instances = [] - layers_meta = stub.get_metadata() - - for instance in layers_meta: - if instance.get("id") == "pyblish.avalon.instance": - instances.append(instance) - return instances - - -def remove_instance(instance): - """ - Remove instance from current workfile metadata. - - Updates metadata of current file in File > File Info and removes - icon highlight on group layer. - - For SubsetManager - - Args: - instance (dict): instance representation from subsetmanager model - """ - stub = _get_stub() - - if not stub: - return - - inst_id = instance.get("instance_id") or instance.get("uuid") # legacy - if not inst_id: - log.warning("No instance identifier for {}".format(instance)) - return - - stub.remove_instance(inst_id) - - if instance.get("members"): - item = stub.get_item(instance["members"][0]) - if item: - stub.rename_item(item.id, - item.name.replace(stub.PUBLISH_ICON, '')) - - -# new publisher section -def get_context_data(): - meta = _get_stub().get_metadata() - for item in meta: - if item.get("id") == "publish_context": - item.pop("id") - return item - - return {} - - -def update_context_data(data, changes): - item = data - item["id"] = "publish_context" - _get_stub().imprint(item["id"], item) - - -def get_context_title(): - """Returns title for Creator window""" - - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - return "{}/{}/{}".format(project_name, asset_name, task_name) - - -def _get_stub(): - """ - Handle pulling stub from PS to run operations on host + Storing all instances as a list as legacy instances might be still present. + Args: + creator (Creator): Plugin which would like to get instances from host. Returns: - (AEServerStub) or None + List[]: list of all instances stored in metadata """ - try: - stub = get_stub() # only after Photoshop is up - except ConnectionNotEstablishedYet: - print("Not connected yet, ignoring") - return - - if not stub.get_active_document_name(): - return - - return stub + shared_key = "openpype.photoshop.instances" + if shared_key not in creator.collection_shared_data: + creator.collection_shared_data[shared_key] = \ + creator.host.list_instances() + return creator.collection_shared_data[shared_key] diff --git a/openpype/hosts/aftereffects/api/workio.py b/openpype/hosts/aftereffects/api/workio.py deleted file mode 100644 index 18b40af5dc..0000000000 --- a/openpype/hosts/aftereffects/api/workio.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Host API required Work Files tool""" -import os - -from .launch_logic import get_stub - - -def file_extensions(): - return [".aep"] - - -def has_unsaved_changes(): - if _active_document(): - return not get_stub().is_saved() - - return False - - -def save_file(filepath): - get_stub().saveAs(filepath, True) - - -def open_file(filepath): - get_stub().open(filepath) - - return True - - -def current_file(): - try: - full_name = get_stub().get_active_document_full_name() - if full_name and full_name != "null": - return os.path.normpath(full_name).replace("\\", "/") - except ValueError: - print("Nothing opened") - pass - - return None - - -def work_root(session): - return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") - - -def _active_document(): - # TODO merge with current_file - even in extension - document_name = None - try: - document_name = get_stub().get_active_document_name() - except ValueError: - print("Nothing opened") - pass - - return document_name diff --git a/openpype/hosts/aftereffects/api/ws_stub.py b/openpype/hosts/aftereffects/api/ws_stub.py index 8719a8f46e..576c997f49 100644 --- a/openpype/hosts/aftereffects/api/ws_stub.py +++ b/openpype/hosts/aftereffects/api/ws_stub.py @@ -11,6 +11,10 @@ from wsrpc_aiohttp import WebSocketAsync from openpype.tools.adobe_webserver.app import WebServerTool +class ConnectionNotEstablishedYet(Exception): + pass + + @attr.s class AEItem(object): """ @@ -24,8 +28,8 @@ class AEItem(object): # all imported elements, single for # regular image, array for Backgrounds members = attr.ib(factory=list) - workAreaStart = attr.ib(default=None) - workAreaDuration = attr.ib(default=None) + frameStart = attr.ib(default=None) + framesDuration = attr.ib(default=None) frameRate = attr.ib(default=None) file_name = attr.ib(default=None) instance_id = attr.ib(default=None) # New Publisher @@ -80,7 +84,7 @@ class AfterEffectsServerStub(): Get complete stored JSON with metadata from AE.Metadata.Label field. - It contains containers loaded by any Loader OR instances creted + It contains containers loaded by any Loader OR instances created by Creator. Returns: @@ -355,42 +359,50 @@ class AfterEffectsServerStub(): return self._handle_return(res) - def get_work_area(self, item_id): - """ Get work are information for render purposes + def get_comp_properties(self, comp_id): + """ Get composition information for render purposes + + Returns startFrame, frameDuration, fps, width, height. + Args: - item_id (int): + comp_id (int): Returns: (AEItem) """ res = self.websocketserver.call(self.client.call - ('AfterEffects.get_work_area', - item_id=item_id + ('AfterEffects.get_comp_properties', + item_id=comp_id )) records = self._to_records(self._handle_return(res)) if records: return records.pop() - def set_work_area(self, item, start, duration, frame_rate): + def set_comp_properties(self, comp_id, start, duration, frame_rate, + width, height): """ Set work area to predefined values (from Ftrack). Work area directs what gets rendered. Beware of rounding, AE expects seconds, not frames directly. Args: - item (dict): - start (float): workAreaStart in seconds - duration (float): in seconds + comp_id (int): + start (int): workAreaStart in frames + duration (int): in frames frame_rate (float): frames in seconds + width (int): resolution width + height (int): resolution height """ res = self.websocketserver.call(self.client.call - ('AfterEffects.set_work_area', - item_id=item.id, + ('AfterEffects.set_comp_properties', + item_id=comp_id, start=start, duration=duration, - frame_rate=frame_rate)) + frame_rate=frame_rate, + width=width, + height=height)) return self._handle_return(res) def save(self): @@ -418,18 +430,18 @@ class AfterEffectsServerStub(): return self._handle_return(res) - def get_render_info(self): + def get_render_info(self, comp_id): """ Get render queue info for render purposes Returns: - (AEItem): with 'file_name' field + (list) of (AEItem): with 'file_name' field """ res = self.websocketserver.call(self.client.call - ('AfterEffects.get_render_info')) + ('AfterEffects.get_render_info', + comp_id=comp_id)) records = self._to_records(self._handle_return(res)) - if records: - return records.pop() + return records def get_audio_url(self, item_id): """ Get audio layer absolute url for comp @@ -522,7 +534,7 @@ class AfterEffectsServerStub(): if records: return records.pop() - def render(self, folder_url): + def render(self, folder_url, comp_id): """ Render all renderqueueitem to 'folder_url' Args: @@ -531,7 +543,8 @@ class AfterEffectsServerStub(): """ res = self.websocketserver.call(self.client.call ('AfterEffects.render', - folder_url=folder_url)) + folder_url=folder_url, + comp_id=comp_id)) return self._handle_return(res) def get_extension_version(self): @@ -553,6 +566,12 @@ class AfterEffectsServerStub(): return self._handle_return(res) + def print_msg(self, msg): + """Triggers Javascript alert dialog.""" + self.websocketserver.call(self.client.call + ('AfterEffects.print_msg', + msg=msg)) + def _handle_return(self, res): """Wraps return, throws ValueError if 'error' key is present.""" if res and isinstance(res, str) and res != "undefined": @@ -607,8 +626,8 @@ class AfterEffectsServerStub(): d.get('name'), d.get('type'), d.get('members'), - d.get('workAreaStart'), - d.get('workAreaDuration'), + d.get('frameStart'), + d.get('framesDuration'), d.get('frameRate'), d.get('file_name'), d.get("instance_id"), @@ -617,3 +636,18 @@ class AfterEffectsServerStub(): ret.append(item) return ret + + +def get_stub(): + """ + Convenience function to get server RPC stub to call methods directed + for host (Photoshop). + It expects already created connection, started from client. + Currently created when panel is opened (PS: Window>Extensions>Avalon) + :return: where functions could be called from + """ + ae_stub = AfterEffectsServerStub() + if not ae_stub.client: + raise ConnectionNotEstablishedYet("Connection is not created yet") + + return ae_stub diff --git a/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py deleted file mode 100644 index 04413acbcf..0000000000 --- a/openpype/hosts/aftereffects/plugins/create/create_legacy_local_render.py +++ /dev/null @@ -1,13 +0,0 @@ -from openpype.hosts.aftereffects.plugins.create import create_legacy_render - - -class CreateLocalRender(create_legacy_render.CreateRender): - """ Creator to render locally. - - Created only after default render on farm. So family 'render.local' is - used for backward compatibility. - """ - - name = "renderDefault" - label = "Render Locally" - family = "renderLocal" diff --git a/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py b/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py deleted file mode 100644 index e4fbb47a33..0000000000 --- a/openpype/hosts/aftereffects/plugins/create/create_legacy_render.py +++ /dev/null @@ -1,62 +0,0 @@ -from openpype.pipeline import create -from openpype.pipeline import CreatorError -from openpype.hosts.aftereffects.api import ( - get_stub, - list_instances -) - - -class CreateRender(create.LegacyCreator): - """Render folder for publish. - - Creates subsets in format 'familyTaskSubsetname', - eg 'renderCompositingMain'. - - Create only single instance from composition at a time. - """ - - name = "renderDefault" - label = "Render on Farm" - family = "render" - defaults = ["Main"] - - def process(self): - stub = get_stub() # only after After Effects is up - items = [] - if (self.options or {}).get("useSelection"): - items = stub.get_selected_items( - comps=True, folders=False, footages=False - ) - if len(items) > 1: - raise CreatorError( - "Please select only single composition at time." - ) - - if not items: - raise CreatorError(( - "Nothing to create. Select composition " - "if 'useSelection' or create at least " - "one composition." - )) - - existing_subsets = [ - instance['subset'].lower() - for instance in list_instances() - ] - - item = items.pop() - if self.name.lower() in existing_subsets: - txt = "Instance with name \"{}\" already exists.".format(self.name) - raise CreatorError(txt) - - self.data["members"] = [item.id] - self.data["uuid"] = item.id # for SubsetManager - self.data["subset"] = ( - self.data["subset"] - .replace(stub.PUBLISH_ICON, '') - .replace(stub.LOADED_ICON, '') - ) - - stub.imprint(item, self.data) - stub.set_label_color(item.id, 14) # Cyan options 0 - 16 - stub.rename_item(item.id, stub.PUBLISH_ICON + self.data["subset"]) diff --git a/openpype/hosts/aftereffects/plugins/create/create_render.py b/openpype/hosts/aftereffects/plugins/create/create_render.py index 1019709dd6..fa79fac78f 100644 --- a/openpype/hosts/aftereffects/plugins/create/create_render.py +++ b/openpype/hosts/aftereffects/plugins/create/create_render.py @@ -1,15 +1,25 @@ +import re + from openpype import resources from openpype.lib import BoolDef, UISeparatorDef from openpype.hosts.aftereffects import api from openpype.pipeline import ( Creator, CreatedInstance, - CreatorError, - legacy_io, + CreatorError ) +from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances +from openpype.hosts.aftereffects.api.lib import set_settings +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS class RenderCreator(Creator): + """Creates 'render' instance for publishing. + + Result of 'render' instance is video or sequence of images for particular + composition based of configuration in its RenderQueue. + """ identifier = "render" label = "Render" family = "render" @@ -17,18 +27,110 @@ class RenderCreator(Creator): create_allow_context_change = True - def __init__(self, project_settings, *args, **kwargs): - super(RenderCreator, self).__init__(project_settings, *args, **kwargs) - self._default_variants = (project_settings["aftereffects"] - ["create"] - ["RenderCreator"] - ["defaults"]) + # Settings + default_variants = [] + mark_for_review = True + + def create(self, subset_name_from_ui, data, pre_create_data): + stub = api.get_stub() # only after After Effects is up + + try: + _ = stub.get_active_document_full_name() + except ValueError: + raise CreatorError( + "Please save workfile via Workfile app first!" + ) + + if pre_create_data.get("use_selection"): + comps = stub.get_selected_items( + comps=True, folders=False, footages=False + ) + else: + comps = stub.get_items(comps=True, folders=False, footages=False) + + if not comps: + raise CreatorError( + "Nothing to create. Select composition in Project Bin if " + "'Use selection' is toggled or create at least " + "one composition." + ) + use_composition_name = (pre_create_data.get("use_composition_name") or + len(comps) > 1) + for comp in comps: + if use_composition_name: + if "{composition}" not in subset_name_from_ui.lower(): + subset_name_from_ui += "{Composition}" + + composition_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + comp.name + ) + + dynamic_fill = prepare_template_data({"composition": + composition_name}) + subset_name = subset_name_from_ui.format(**dynamic_fill) + data["composition_name"] = composition_name + else: + subset_name = subset_name_from_ui + subset_name = re.sub(r"\{composition\}", '', subset_name, + flags=re.IGNORECASE) + + for inst in self.create_context.instances: + if subset_name == inst.subset_name: + raise CreatorError("{} already exists".format( + inst.subset_name)) + + data["members"] = [comp.id] + new_instance = CreatedInstance(self.family, subset_name, data, + self) + if "farm" in pre_create_data: + use_farm = pre_create_data["farm"] + new_instance.creator_attributes["farm"] = use_farm + + review = pre_create_data["mark_for_review"] + new_instance.creator_attributes["mark_for_review"] = review + + api.get_stub().imprint(new_instance.id, + new_instance.data_to_store()) + self._add_instance_to_context(new_instance) + + stub.rename_item(comp.id, subset_name) + set_settings(True, True, [comp.id], print_msg=False) + + def get_pre_create_attr_defs(self): + output = [ + BoolDef("use_selection", + tooltip="Composition for publishable instance should be " + "selected by default.", + default=True, label="Use selection"), + BoolDef("use_composition_name", + label="Use composition name in subset"), + UISeparatorDef(), + BoolDef("farm", label="Render on farm"), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + return output + + def get_instance_attr_defs(self): + return [ + BoolDef("farm", label="Render on farm"), + BoolDef( + "mark_for_review", + label="Review", + default=False + ) + ] def get_icon(self): return resources.get_openpype_splash_filepath() def collect_instances(self): - for instance_data in api.list_instances(): + for instance_data in cache_and_get_instances(self): # legacy instances have family=='render' or 'renderLocal', use them creator_id = (instance_data.get("creator_identifier") or instance_data.get("family", '').replace("Local", '')) @@ -43,63 +145,72 @@ class RenderCreator(Creator): for created_inst, _changes in update_list: api.get_stub().imprint(created_inst.get("instance_id"), created_inst.data_to_store()) + subset_change = _changes.get("subset") + if subset_change: + api.get_stub().rename_item(created_inst.data["members"][0], + subset_change.new_value) def remove_instances(self, instances): for instance in instances: - api.remove_instance(instance) self._remove_instance_from_context(instance) + self.host.remove_instance(instance) - def create(self, subset_name, data, pre_create_data): - stub = api.get_stub() # only after After Effects is up - if pre_create_data.get("use_selection"): - items = stub.get_selected_items( - comps=True, folders=False, footages=False - ) - else: - items = stub.get_items(comps=True, folders=False, footages=False) + subset = instance.data["subset"] + comp_id = instance.data["members"][0] + comp = api.get_stub().get_item(comp_id) + if comp: + new_comp_name = comp.name.replace(subset, '') + if not new_comp_name: + new_comp_name = "dummyCompName" + api.get_stub().rename_item(comp_id, + new_comp_name) - if len(items) > 1: - raise CreatorError( - "Please select only single composition at time." - ) - if not items: - raise CreatorError(( - "Nothing to create. Select composition " - "if 'useSelection' or create at least " - "one composition." - )) + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["aftereffects"]["create"]["RenderCreator"] + ) - for inst in self.create_context.instances: - if subset_name == inst.subset_name: - raise CreatorError("{} already exists".format( - inst.subset_name)) - - data["members"] = [items[0].id] - new_instance = CreatedInstance(self.family, subset_name, data, self) - if "farm" in pre_create_data: - use_farm = pre_create_data["farm"] - new_instance.creator_attributes["farm"] = use_farm - - api.get_stub().imprint(new_instance.id, - new_instance.data_to_store()) - self._add_instance_to_context(new_instance) - - def get_default_variants(self): - return self._default_variants - - def get_instance_attr_defs(self): - return [BoolDef("farm", label="Render on farm")] - - def get_pre_create_attr_defs(self): - output = [ - BoolDef("use_selection", default=True, label="Use selection"), - UISeparatorDef(), - BoolDef("farm", label="Render on farm") - ] - return output + self.mark_for_review = plugin_settings["mark_for_review"] def get_detail_description(self): - return """Creator for Render instances""" + return """Creator for Render instances + + Main publishable item in AfterEffects will be of `render` family. + Result of this item (instance) is picture sequence or video that could + be a final delivery product or loaded and used in another DCCs. + + Select single composition and create instance of 'render' family or + turn off 'Use selection' to create instance for all compositions. + + 'Use composition name in subset' allows to explicitly add composition + name into created subset name. + + Position of composition name could be set in + `project_settings/global/tools/creator/subset_name_profiles` with some + form of '{composition}' placeholder. + + Composition name will be used implicitly if multiple composition should + be handled at same time. + + If {composition} placeholder is not us 'subset_name_profiles' + composition name will be capitalized and set at the end of subset name + if necessary. + + If composition name should be used, it will be cleaned up of characters + that would cause an issue in published file names. + """ + + def get_dynamic_data(self, variant, task_name, asset_doc, + project_name, host_name, instance): + dynamic_data = {} + if instance is not None: + composition_name = instance.get("composition_name") + if composition_name: + dynamic_data["composition"] = composition_name + else: + dynamic_data["composition"] = "{composition}" + + return dynamic_data def _handle_legacy(self, instance_data): """Converts old instances to new format.""" @@ -112,11 +223,14 @@ class RenderCreator(Creator): instance_data.pop("uuid") if not instance_data.get("task"): - instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + instance_data["task"] = self.create_context.get_current_task_name() if not instance_data.get("creator_attributes"): is_old_farm = instance_data["family"] != "renderLocal" instance_data["creator_attributes"] = {"farm": is_old_farm} instance_data["family"] = self.family + if instance_data["creator_attributes"].get("mark_for_review") is None: + instance_data["creator_attributes"]["mark_for_review"] = True + return instance_data diff --git a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py index f82d15b3c9..2e7b9d4a7e 100644 --- a/openpype/hosts/aftereffects/plugins/create/workfile_creator.py +++ b/openpype/hosts/aftereffects/plugins/create/workfile_creator.py @@ -2,9 +2,9 @@ import openpype.hosts.aftereffects.api as api from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, - CreatedInstance, - legacy_io, + CreatedInstance ) +from openpype.hosts.aftereffects.api.pipeline import cache_and_get_instances class AEWorkfileCreator(AutoCreator): @@ -17,7 +17,7 @@ class AEWorkfileCreator(AutoCreator): return [] def collect_instances(self): - for instance_data in api.list_instances(): + for instance_data in cache_and_get_instances(self): creator_id = instance_data.get("creator_identifier") if creator_id == self.identifier: subset_name = instance_data["subset"] @@ -37,10 +37,11 @@ class AEWorkfileCreator(AutoCreator): existing_instance = instance break - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - host_name = legacy_io.Session["AVALON_APP"] + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name if existing_instance is None: asset_doc = get_asset_by_name(project_name, asset_name) @@ -55,7 +56,7 @@ class AEWorkfileCreator(AutoCreator): } data.update(self.get_dynamic_data( self.default_variant, task_name, asset_doc, - project_name, host_name + project_name, host_name, None )) new_instance = CreatedInstance( diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_render.py b/openpype/hosts/aftereffects/plugins/publish/collect_render.py index d444ead6dc..aa46461915 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_render.py @@ -22,7 +22,7 @@ class AERenderInstance(RenderInstance): stagingDir = attr.ib(default=None) app_version = attr.ib(default=None) publish_attributes = attr.ib(default={}) - file_name = attr.ib(default=None) + file_names = attr.ib(default=[]) class CollectAERender(publish.AbstractCollectRender): @@ -64,34 +64,35 @@ class CollectAERender(publish.AbstractCollectRender): if family not in ["render", "renderLocal"]: # legacy continue - item_id = inst.data["members"][0] + comp_id = int(inst.data["members"][0]) - work_area_info = CollectAERender.get_stub().get_work_area( - int(item_id)) + comp_info = CollectAERender.get_stub().get_comp_properties( + comp_id) - if not work_area_info: + if not comp_info: self.log.warning("Orphaned instance, deleting metadata") - inst_id = inst.get("instance_id") or item_id + inst_id = inst.data.get("instance_id") or str(comp_id) CollectAERender.get_stub().remove_instance(inst_id) continue - frame_start = work_area_info.workAreaStart - frame_end = round(work_area_info.workAreaStart + - float(work_area_info.workAreaDuration) * - float(work_area_info.frameRate)) - 1 - fps = work_area_info.frameRate + frame_start = comp_info.frameStart + frame_end = round(comp_info.frameStart + + comp_info.framesDuration) - 1 + fps = comp_info.frameRate # TODO add resolution when supported by extension task_name = inst.data.get("task") # legacy - render_q = CollectAERender.get_stub().get_render_info() + render_q = CollectAERender.get_stub().get_render_info(comp_id) if not render_q: raise ValueError("No file extension set in Render Queue") + render_item = render_q[0] + instance_families = inst.data.get("families", []) subset_name = inst.data["subset"] instance = AERenderInstance( family="render", - families=inst.data.get("families", []), + families=instance_families, version=version, time="", source=current_file, @@ -103,28 +104,29 @@ class CollectAERender(publish.AbstractCollectRender): setMembers='', publish=True, name=subset_name, - resolutionWidth=render_q.width, - resolutionHeight=render_q.height, + resolutionWidth=render_item.width, + resolutionHeight=render_item.height, pixelAspect=1, tileRendering=False, tilesX=0, tilesY=0, + review="review" in instance_families, frameStart=frame_start, frameEnd=frame_end, frameStep=1, fps=fps, app_version=app_version, publish_attributes=inst.data.get("publish_attributes", {}), - file_name=render_q.file_name + file_names=[item.file_name for item in render_q] ) - comp = compositions_by_id.get(int(item_id)) + comp = compositions_by_id.get(comp_id) if not comp: raise ValueError("There is no composition for item {}". - format(item_id)) + format(comp_id)) instance.outputDir = self._get_output_dir(instance) instance.comp_name = comp.name - instance.comp_id = item_id + instance.comp_id = comp_id is_local = "renderLocal" in inst.data["family"] # legacy if inst.data.get("creator_attributes"): @@ -139,6 +141,9 @@ class CollectAERender(publish.AbstractCollectRender): instance.toBeRenderedOn = "deadline" instance.renderer = "aerender" instance.farm = True # to skip integrate + if "review" in instance.families: + # to skip ExtractReview locally + instance.families.remove("review") instances.append(instance) instances_to_remove.append(inst) @@ -163,28 +168,30 @@ class CollectAERender(publish.AbstractCollectRender): start = render_instance.frameStart end = render_instance.frameEnd - _, ext = os.path.splitext(os.path.basename(render_instance.file_name)) - base_dir = self._get_output_dir(render_instance) expected_files = [] - if "#" not in render_instance.file_name: # single frame (mov)W - path = os.path.join(base_dir, "{}_{}_{}.{}".format( - render_instance.asset, - render_instance.subset, - "v{:03d}".format(render_instance.version), - ext.replace('.', '') - )) - expected_files.append(path) - else: - for frame in range(start, end + 1): - path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format( + for file_name in render_instance.file_names: + _, ext = os.path.splitext(os.path.basename(file_name)) + ext = ext.replace('.', '') + version_str = "v{:03d}".format(render_instance.version) + if "#" not in file_name: # single frame (mov)W + path = os.path.join(base_dir, "{}_{}_{}.{}".format( render_instance.asset, render_instance.subset, - "v{:03d}".format(render_instance.version), - str(frame).zfill(self.padding_width), - ext.replace('.', '') + version_str, + ext )) expected_files.append(path) + else: + for frame in range(start, end + 1): + path = os.path.join(base_dir, "{}_{}_{}.{}.{}".format( + render_instance.asset, + render_instance.subset, + version_str, + str(frame).zfill(self.padding_width), + ext + )) + expected_files.append(path) return expected_files def _get_output_dir(self, render_instance): @@ -216,15 +223,4 @@ class CollectAERender(publish.AbstractCollectRender): if fam not in instance.families: instance.families.append(fam) - settings = get_project_settings(os.getenv("AVALON_PROJECT")) - reviewable_subset_filter = (settings["deadline"] - ["publish"] - ["ProcessSubmittedJobOnFarm"] - ["aov_filter"].get(self.hosts[0])) - for aov_pattern in reviewable_subset_filter: - if re.match(aov_pattern, instance.subset): - instance.families.append("review") - instance.review = True - break - return instance diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_review.py b/openpype/hosts/aftereffects/plugins/publish/collect_review.py new file mode 100644 index 0000000000..a933b9fed2 --- /dev/null +++ b/openpype/hosts/aftereffects/plugins/publish/collect_review.py @@ -0,0 +1,25 @@ +""" +Requires: + None + +Provides: + instance -> family ("review") +""" +import pyblish.api + + +class CollectReview(pyblish.api.ContextPlugin): + """Add review to families if instance created with 'mark_for_review' flag + """ + label = "Collect Review" + hosts = ["aftereffects"] + order = pyblish.api.CollectorOrder + 0.1 + + def process(self, context): + for instance in context: + creator_attributes = instance.data.get("creator_attributes") or {} + if ( + creator_attributes.get("mark_for_review") + and "review" not in instance.data["families"] + ): + instance.data["families"].append("review") diff --git a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py index 3c5013b3bd..c21c3623c3 100644 --- a/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py +++ b/openpype/hosts/aftereffects/plugins/publish/collect_workfile.py @@ -53,10 +53,10 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "active": True, "asset": asset_entity["name"], "task": task, - "frameStart": asset_entity["data"]["frameStart"], - "frameEnd": asset_entity["data"]["frameEnd"], - "handleStart": asset_entity["data"]["handleStart"], - "handleEnd": asset_entity["data"]["handleEnd"], + "frameStart": context.data['frameStart'], + "frameEnd": context.data['frameEnd'], + "handleStart": context.data['handleStart'], + "handleEnd": context.data['handleEnd'], "fps": asset_entity["data"]["fps"], "resolutionWidth": asset_entity["data"].get( "resolutionWidth", diff --git a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py index dc65cee61d..c70aa41dbe 100644 --- a/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/extract_local_render.py @@ -21,64 +21,54 @@ class ExtractLocalRender(publish.Extractor): def process(self, instance): stub = get_stub() staging_dir = instance.data["stagingDir"] - self.log.info("staging_dir::{}".format(staging_dir)) + self.log.debug("staging_dir::{}".format(staging_dir)) - # pull file name from Render Queue Output module - render_q = stub.get_render_info() - stub.render(staging_dir) - if not render_q: + # pull file name collected value from Render Queue Output module + if not instance.data["file_names"]: raise ValueError("No file extension set in Render Queue") - _, ext = os.path.splitext(os.path.basename(render_q.file_name)) - ext = ext[1:] - first_file_path = None - files = [] - self.log.info("files::{}".format(os.listdir(staging_dir))) - for file_name in os.listdir(staging_dir): - files.append(file_name) - if first_file_path is None: - first_file_path = os.path.join(staging_dir, - file_name) + comp_id = instance.data['comp_id'] + stub.render(staging_dir, comp_id) - resulting_files = files - if len(files) == 1: - resulting_files = files[0] + representations = [] + for file_name in instance.data["file_names"]: + _, ext = os.path.splitext(os.path.basename(file_name)) + ext = ext[1:] - repre_data = { - "frameStart": instance.data["frameStart"], - "frameEnd": instance.data["frameEnd"], - "name": ext, - "ext": ext, - "files": resulting_files, - "stagingDir": staging_dir - } - if instance.data["review"]: - repre_data["tags"] = ["review"] + first_file_path = None + files = [] + for found_file_name in os.listdir(staging_dir): + if not found_file_name.endswith(ext): + continue - instance.data["representations"] = [repre_data] + files.append(found_file_name) + if first_file_path is None: + first_file_path = os.path.join(staging_dir, + found_file_name) - ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - # Generate thumbnail. - thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") + if not files: + self.log.info("no files") + return - args = [ - ffmpeg_path, "-y", - "-i", first_file_path, - "-vf", "scale=300:-1", - "-vframes", "1", - thumbnail_path - ] - self.log.debug("Thumbnail args:: {}".format(args)) - try: - output = run_subprocess(args) - except TypeError: - self.log.warning("Error in creating thumbnail") - six.reraise(*sys.exc_info()) + # single file cannot be wrapped in array + resulting_files = files + if len(files) == 1: + resulting_files = files[0] - instance.data["representations"].append({ - "name": "thumbnail", - "ext": "jpg", - "files": os.path.basename(thumbnail_path), - "stagingDir": staging_dir, - "tags": ["thumbnail"] - }) + repre_data = { + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "name": ext, + "ext": ext, + "files": resulting_files, + "stagingDir": staging_dir + } + first_repre = not representations + if instance.data["review"] and first_repre: + repre_data["tags"] = ["review"] + thumbnail_path = os.path.join(staging_dir, files[0]) + instance.data["thumbnailSource"] = thumbnail_path + + representations.append(repre_data) + + instance.data["representations"] = representations diff --git a/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml b/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml index 13f03a9b9a..edf62a5141 100644 --- a/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml +++ b/openpype/hosts/aftereffects/plugins/publish/help/validate_instance_asset.xml @@ -9,7 +9,7 @@ Context of the given subset doesn't match your current scene. ### How to repair? -You can fix this with "repair" button on the right. +You can fix this with "repair" button on the right and refresh Publish at the bottom right. ### __Detailed Info__ (optional) diff --git a/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py index 03ec184524..85a42830a4 100644 --- a/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py +++ b/openpype/hosts/aftereffects/plugins/publish/pre_collect_render.py @@ -1,6 +1,6 @@ import json import pyblish.api -from openpype.hosts.aftereffects.api import list_instances +from openpype.hosts.aftereffects.api import AfterEffectsHost class PreCollectRender(pyblish.api.ContextPlugin): @@ -25,7 +25,7 @@ class PreCollectRender(pyblish.api.ContextPlugin): self.log.debug("Not applicable for New Publisher, skip") return - for inst in list_instances(): + for inst in AfterEffectsHost().list_instances(): if inst.get("creator_attributes"): raise ValueError("Instance created in New publisher, " "cannot be published in Pyblish.\n" diff --git a/openpype/hosts/blender/api/__init__.py b/openpype/hosts/blender/api/__init__.py index e017d74d91..75a11affde 100644 --- a/openpype/hosts/blender/api/__init__.py +++ b/openpype/hosts/blender/api/__init__.py @@ -31,10 +31,13 @@ from .lib import ( lsattrs, read, maintained_selection, + maintained_time, get_selection, # unique_name, ) +from .capture import capture + __all__ = [ "install", @@ -56,9 +59,11 @@ __all__ = [ # Utility functions "maintained_selection", + "maintained_time", "lsattr", "lsattrs", "read", "get_selection", + "capture", # "unique_name", ] diff --git a/openpype/hosts/blender/api/capture.py b/openpype/hosts/blender/api/capture.py new file mode 100644 index 0000000000..849f8ee629 --- /dev/null +++ b/openpype/hosts/blender/api/capture.py @@ -0,0 +1,278 @@ + +"""Blender Capture +Playblasting with independent viewport, camera and display options +""" +import contextlib +import bpy + +from .lib import maintained_time +from .plugin import deselect_all, create_blender_context + + +def capture( + camera=None, + width=None, + height=None, + filename=None, + start_frame=None, + end_frame=None, + step_frame=None, + sound=None, + isolate=None, + maintain_aspect_ratio=True, + overwrite=False, + image_settings=None, + display_options=None +): + """Playblast in an independent windows + Arguments: + camera (str, optional): Name of camera, defaults to "Camera" + width (int, optional): Width of output in pixels + height (int, optional): Height of output in pixels + filename (str, optional): Name of output file path. Defaults to current + render output path. + start_frame (int, optional): Defaults to current start frame. + end_frame (int, optional): Defaults to current end frame. + step_frame (int, optional): Defaults to 1. + sound (str, optional): Specify the sound node to be used during + playblast. When None (default) no sound will be used. + isolate (list): List of nodes to isolate upon capturing + maintain_aspect_ratio (bool, optional): Modify height in order to + maintain aspect ratio. + overwrite (bool, optional): Whether or not to overwrite if file + already exists. If disabled and file exists and error will be + raised. + image_settings (dict, optional): Supplied image settings for render, + using `ImageSettings` + display_options (dict, optional): Supplied display options for render + """ + + scene = bpy.context.scene + camera = camera or "Camera" + + # Ensure camera exists. + if camera not in scene.objects and camera != "AUTO": + raise RuntimeError("Camera does not exist: {0}".format(camera)) + + # Ensure resolution. + if width and height: + maintain_aspect_ratio = False + width = width or scene.render.resolution_x + height = height or scene.render.resolution_y + if maintain_aspect_ratio: + ratio = scene.render.resolution_x / scene.render.resolution_y + height = round(width / ratio) + + # Get frame range. + if start_frame is None: + start_frame = scene.frame_start + if end_frame is None: + end_frame = scene.frame_end + if step_frame is None: + step_frame = 1 + frame_range = (start_frame, end_frame, step_frame) + + if filename is None: + filename = scene.render.filepath + + render_options = { + "filepath": "{}.".format(filename.rstrip(".")), + "resolution_x": width, + "resolution_y": height, + "use_overwrite": overwrite, + } + + with _independent_window() as window: + + applied_view(window, camera, isolate, options=display_options) + + with contextlib.ExitStack() as stack: + stack.enter_context(maintain_camera(window, camera)) + stack.enter_context(applied_frame_range(window, *frame_range)) + stack.enter_context(applied_render_options(window, render_options)) + stack.enter_context(applied_image_settings(window, image_settings)) + stack.enter_context(maintained_time()) + + bpy.ops.render.opengl( + animation=True, + render_keyed_only=False, + sequencer=False, + write_still=False, + view_context=True + ) + + return filename + + +ImageSettings = { + "file_format": "FFMPEG", + "color_mode": "RGB", + "ffmpeg": { + "format": "QUICKTIME", + "use_autosplit": False, + "codec": "H264", + "constant_rate_factor": "MEDIUM", + "gopsize": 18, + "use_max_b_frames": False, + }, +} + + +def isolate_objects(window, objects): + """Isolate selection""" + deselect_all() + + for obj in objects: + obj.select_set(True) + + context = create_blender_context(selected=objects, window=window) + + bpy.ops.view3d.view_axis(context, type="FRONT") + bpy.ops.view3d.localview(context) + + deselect_all() + + +def _apply_options(entity, options): + for option, value in options.items(): + if isinstance(value, dict): + _apply_options(getattr(entity, option), value) + else: + setattr(entity, option, value) + + +def applied_view(window, camera, isolate=None, options=None): + """Apply view options to window.""" + area = window.screen.areas[0] + space = area.spaces[0] + + area.ui_type = "VIEW_3D" + + meshes = [obj for obj in window.scene.objects if obj.type == "MESH"] + + if camera == "AUTO": + space.region_3d.view_perspective = "ORTHO" + isolate_objects(window, isolate or meshes) + else: + isolate_objects(window, isolate or meshes) + space.camera = window.scene.objects.get(camera) + space.region_3d.view_perspective = "CAMERA" + + if isinstance(options, dict): + _apply_options(space, options) + else: + space.shading.type = "SOLID" + space.shading.color_type = "MATERIAL" + space.show_gizmo = False + space.overlay.show_overlays = False + + +@contextlib.contextmanager +def applied_frame_range(window, start, end, step): + """Context manager for setting frame range.""" + # Store current frame range + current_frame_start = window.scene.frame_start + current_frame_end = window.scene.frame_end + current_frame_step = window.scene.frame_step + # Apply frame range + window.scene.frame_start = start + window.scene.frame_end = end + window.scene.frame_step = step + try: + yield + finally: + # Restore frame range + window.scene.frame_start = current_frame_start + window.scene.frame_end = current_frame_end + window.scene.frame_step = current_frame_step + + +@contextlib.contextmanager +def applied_render_options(window, options): + """Context manager for setting render options.""" + render = window.scene.render + + # Store current settings + original = {} + for opt in options.copy(): + try: + original[opt] = getattr(render, opt) + except ValueError: + options.pop(opt) + + # Apply settings + _apply_options(render, options) + + try: + yield + finally: + # Restore previous settings + _apply_options(render, original) + + +@contextlib.contextmanager +def applied_image_settings(window, options): + """Context manager to override image settings.""" + + options = options or ImageSettings.copy() + ffmpeg = options.pop("ffmpeg", {}) + render = window.scene.render + + # Store current image settings + original = {} + for opt in options.copy(): + try: + original[opt] = getattr(render.image_settings, opt) + except ValueError: + options.pop(opt) + + # Store current ffmpeg settings + original_ffmpeg = {} + for opt in ffmpeg.copy(): + try: + original_ffmpeg[opt] = getattr(render.ffmpeg, opt) + except ValueError: + ffmpeg.pop(opt) + + # Apply image settings + for opt, value in options.items(): + setattr(render.image_settings, opt, value) + + # Apply ffmpeg settings + for opt, value in ffmpeg.items(): + setattr(render.ffmpeg, opt, value) + + try: + yield + finally: + # Restore previous settings + for opt, value in original.items(): + setattr(render.image_settings, opt, value) + for opt, value in original_ffmpeg.items(): + setattr(render.ffmpeg, opt, value) + + +@contextlib.contextmanager +def maintain_camera(window, camera): + """Context manager to override camera.""" + current_camera = window.scene.camera + if camera in window.scene.objects: + window.scene.camera = window.scene.objects.get(camera) + try: + yield + finally: + window.scene.camera = current_camera + + +@contextlib.contextmanager +def _independent_window(): + """Create capture-window context.""" + context = create_blender_context() + current_windows = set(bpy.context.window_manager.windows) + bpy.ops.wm.window_new(context) + window = list(set(bpy.context.window_manager.windows) - current_windows)[0] + context["window"] = window + try: + yield window + finally: + bpy.ops.wm.window_close(context) diff --git a/openpype/hosts/blender/api/lib.py b/openpype/hosts/blender/api/lib.py index 05912885f7..6526f1fb87 100644 --- a/openpype/hosts/blender/api/lib.py +++ b/openpype/hosts/blender/api/lib.py @@ -284,3 +284,13 @@ def maintained_selection(): # This could happen if the active node was deleted during the # context. log.exception("Failed to set active object.") + + +@contextlib.contextmanager +def maintained_time(): + """Maintain current frame during context.""" + current_time = bpy.context.scene.frame_current + try: + yield + finally: + bpy.context.scene.frame_current = current_time diff --git a/openpype/hosts/blender/api/ops.py b/openpype/hosts/blender/api/ops.py index 481c199db2..91cbfe524f 100644 --- a/openpype/hosts/blender/api/ops.py +++ b/openpype/hosts/blender/api/ops.py @@ -24,7 +24,7 @@ from .workio import OpenFileCacher PREVIEW_COLLECTIONS: Dict = dict() # This seems like a good value to keep the Qt app responsive and doesn't slow -# down Blender. At least on macOS I the interace of Blender gets very laggy if +# down Blender. At least on macOS I the interface of Blender gets very laggy if # you make it smaller. TIMER_INTERVAL: float = 0.01 if platform.system() == "Windows" else 0.1 @@ -84,11 +84,11 @@ class MainThreadItem: self.kwargs = kwargs def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ print("Executing process in main thread") if self.done: @@ -382,8 +382,8 @@ class TOPBAR_MT_avalon(bpy.types.Menu): layout.operator(LaunchLibrary.bl_idname, text="Library...") layout.separator() layout.operator(LaunchWorkFiles.bl_idname, text="Work Files...") - # TODO (jasper): maybe add 'Reload Pipeline', 'Reset Frame Range' and - # 'Reset Resolution'? + # TODO (jasper): maybe add 'Reload Pipeline', 'Set Frame Range' and + # 'Set Resolution'? def draw_avalon_menu(self, context): diff --git a/openpype/hosts/blender/api/pipeline.py b/openpype/hosts/blender/api/pipeline.py index c2aee1e653..9cc557c01a 100644 --- a/openpype/hosts/blender/api/pipeline.py +++ b/openpype/hosts/blender/api/pipeline.py @@ -26,6 +26,8 @@ from openpype.lib import ( emit_event ) import openpype.hosts.blender +from openpype.settings import get_project_settings + HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.blender.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") @@ -83,6 +85,31 @@ def uninstall(): ops.unregister() +def show_message(title, message): + from openpype.widgets.message_window import Window + from .ops import BlenderApplication + + BlenderApplication.get_app() + + Window( + parent=None, + title=title, + message=message, + level="warning") + + +def message_window(title, message): + from .ops import ( + MainThreadItem, + execute_in_main_thread, + _process_app_events + ) + + mti = MainThreadItem(show_message, title, message) + execute_in_main_thread(mti) + _process_app_events() + + def set_start_end_frames(): project_name = legacy_io.active_project() asset_name = legacy_io.Session["AVALON_ASSET"] @@ -125,10 +152,36 @@ def set_start_end_frames(): def on_new(): set_start_end_frames() + project = os.environ.get("AVALON_PROJECT") + settings = get_project_settings(project) + + unit_scale_settings = settings.get("blender").get("unit_scale_settings") + unit_scale_enabled = unit_scale_settings.get("enabled") + if unit_scale_enabled: + unit_scale = unit_scale_settings.get("base_file_unit_scale") + bpy.context.scene.unit_settings.scale_length = unit_scale + def on_open(): set_start_end_frames() + project = os.environ.get("AVALON_PROJECT") + settings = get_project_settings(project) + + unit_scale_settings = settings.get("blender").get("unit_scale_settings") + unit_scale_enabled = unit_scale_settings.get("enabled") + apply_on_opening = unit_scale_settings.get("apply_on_opening") + if unit_scale_enabled and apply_on_opening: + unit_scale = unit_scale_settings.get("base_file_unit_scale") + prev_unit_scale = bpy.context.scene.unit_settings.scale_length + + if unit_scale != prev_unit_scale: + bpy.context.scene.unit_settings.scale_length = unit_scale + + message_window( + "Base file unit scale changed", + "Base file unit scale changed to match the project settings.") + @bpy.app.handlers.persistent def _on_save_pre(*args): diff --git a/openpype/hosts/blender/api/plugin.py b/openpype/hosts/blender/api/plugin.py index c59be8d7ff..1274795c6b 100644 --- a/openpype/hosts/blender/api/plugin.py +++ b/openpype/hosts/blender/api/plugin.py @@ -62,7 +62,8 @@ def prepare_data(data, container_name=None): def create_blender_context(active: Optional[bpy.types.Object] = None, - selected: Optional[bpy.types.Object] = None,): + selected: Optional[bpy.types.Object] = None, + window: Optional[bpy.types.Window] = None): """Create a new Blender context. If an object is passed as parameter, it is set as selected and active. """ @@ -72,7 +73,9 @@ def create_blender_context(active: Optional[bpy.types.Object] = None, override_context = bpy.context.copy() - for win in bpy.context.window_manager.windows: + windows = [window] if window else bpy.context.window_manager.windows + + for win in windows: for area in win.screen.areas: if area.type == 'VIEW_3D': for region in area.regions: diff --git a/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py new file mode 100644 index 0000000000..559e9ae0ce --- /dev/null +++ b/openpype/hosts/blender/hooks/pre_add_run_python_script_arg.py @@ -0,0 +1,55 @@ +from pathlib import Path + +from openpype.lib import PreLaunchHook + + +class AddPythonScriptToLaunchArgs(PreLaunchHook): + """Add python script to be executed before Blender launch.""" + + # Append after file argument + order = 15 + app_groups = [ + "blender", + ] + + def execute(self): + if not self.launch_context.data.get("python_scripts"): + return + + # Add path to workfile to arguments + for python_script_path in self.launch_context.data["python_scripts"]: + self.log.info( + f"Adding python script {python_script_path} to launch" + ) + # Test script path exists + python_script_path = Path(python_script_path) + if not python_script_path.exists(): + self.log.warning( + f"Python script {python_script_path} doesn't exist. " + "Skipped..." + ) + continue + + if "--" in self.launch_context.launch_args: + # Insert before separator + separator_index = self.launch_context.launch_args.index("--") + self.launch_context.launch_args.insert( + separator_index, + "-P", + ) + self.launch_context.launch_args.insert( + separator_index + 1, + python_script_path.as_posix(), + ) + else: + self.launch_context.launch_args.extend( + ["-P", python_script_path.as_posix()] + ) + + # Ensure separator + if "--" not in self.launch_context.launch_args: + self.launch_context.launch_args.append("--") + + self.launch_context.launch_args.extend( + [*self.launch_context.data.get("script_args", [])] + ) diff --git a/openpype/hosts/blender/plugins/create/create_review.py b/openpype/hosts/blender/plugins/create/create_review.py new file mode 100644 index 0000000000..bf4ea6a7cd --- /dev/null +++ b/openpype/hosts/blender/plugins/create/create_review.py @@ -0,0 +1,47 @@ +"""Create review.""" + +import bpy + +from openpype.pipeline import legacy_io +from openpype.hosts.blender.api import plugin, lib, ops +from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES + + +class CreateReview(plugin.Creator): + """Single baked camera""" + + name = "reviewDefault" + label = "Review" + family = "review" + icon = "video-camera" + + def process(self): + """ Run the creator on Blender main thread""" + mti = ops.MainThreadItem(self._process) + ops.execute_in_main_thread(mti) + + def _process(self): + # Get Instance Container or create it if it does not exist + instances = bpy.data.collections.get(AVALON_INSTANCES) + if not instances: + instances = bpy.data.collections.new(name=AVALON_INSTANCES) + bpy.context.scene.collection.children.link(instances) + + # Create instance object + asset = self.data["asset"] + subset = self.data["subset"] + name = plugin.asset_name(asset, subset) + asset_group = bpy.data.collections.new(name=name) + instances.children.link(asset_group) + self.data['task'] = legacy_io.Session.get('AVALON_TASK') + lib.imprint(asset_group, self.data) + + if (self.options or {}).get("useSelection"): + selected = lib.get_selection() + for obj in selected: + asset_group.objects.link(obj) + elif (self.options or {}).get("asset_group"): + obj = (self.options or {}).get("asset_group") + asset_group.objects.link(obj) + + return asset_group diff --git a/openpype/hosts/blender/plugins/load/import_workfile.py b/openpype/hosts/blender/plugins/load/import_workfile.py index 618fb83e31..bbdf1c7ea0 100644 --- a/openpype/hosts/blender/plugins/load/import_workfile.py +++ b/openpype/hosts/blender/plugins/load/import_workfile.py @@ -44,7 +44,7 @@ class AppendBlendLoader(plugin.AssetLoader): """ representations = ["blend"] - families = ["*"] + families = ["workfile"] label = "Append Workfile" order = 9 @@ -68,7 +68,7 @@ class ImportBlendLoader(plugin.AssetLoader): """ representations = ["blend"] - families = ["*"] + families = ["workfile"] label = "Import Workfile" order = 9 diff --git a/openpype/hosts/blender/plugins/load/load_abc.py b/openpype/hosts/blender/plugins/load/load_abc.py index 1b2e800769..c1d73eff02 100644 --- a/openpype/hosts/blender/plugins/load/load_abc.py +++ b/openpype/hosts/blender/plugins/load/load_abc.py @@ -65,37 +65,19 @@ class CacheModelLoader(plugin.AssetLoader): imported = lib.get_selection() - empties = [obj for obj in imported if obj.type == 'EMPTY'] - - container = None - - for empty in empties: - if not empty.parent: - container = empty - break - - assert container, "No asset group found" - # Children must be linked before parents, # otherwise the hierarchy will break objects = [] - nodes = list(container.children) - for obj in nodes: + for obj in imported: obj.parent = asset_group - bpy.data.objects.remove(container) - - for obj in nodes: + for obj in imported: objects.append(obj) - nodes.extend(list(obj.children)) + imported.extend(list(obj.children)) objects.reverse() - for obj in objects: - parent.objects.link(obj) - collection.objects.unlink(obj) - for obj in objects: name = obj.name obj.name = f"{group_name}:{name}" @@ -138,13 +120,14 @@ class CacheModelLoader(plugin.AssetLoader): group_name = plugin.asset_name(asset, subset, unique_number) namespace = namespace or f"{asset}_{unique_number}" - avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) - if not avalon_container: - avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) - bpy.context.scene.collection.children.link(avalon_container) + avalon_containers = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_containers: + avalon_containers = bpy.data.collections.new( + name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_containers) asset_group = bpy.data.objects.new(group_name, object_data=None) - avalon_container.objects.link(asset_group) + avalon_containers.objects.link(asset_group) objects = self._process(libpath, asset_group, group_name) diff --git a/openpype/hosts/blender/plugins/load/load_camera_abc.py b/openpype/hosts/blender/plugins/load/load_camera_abc.py new file mode 100644 index 0000000000..21b48f409f --- /dev/null +++ b/openpype/hosts/blender/plugins/load/load_camera_abc.py @@ -0,0 +1,209 @@ +"""Load an asset in Blender from an Alembic file.""" + +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import bpy + +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID, +) +from openpype.hosts.blender.api import plugin, lib +from openpype.hosts.blender.api.pipeline import ( + AVALON_CONTAINERS, + AVALON_PROPERTY, +) + + +class AbcCameraLoader(plugin.AssetLoader): + """Load a camera from Alembic file. + + Stores the imported asset in an empty named after the asset. + """ + + families = ["camera"] + representations = ["abc"] + + label = "Load Camera (ABC)" + icon = "code-fork" + color = "orange" + + def _remove(self, asset_group): + objects = list(asset_group.children) + + for obj in objects: + if obj.type == "CAMERA": + bpy.data.cameras.remove(obj.data) + elif obj.type == "EMPTY": + objects.extend(obj.children) + bpy.data.objects.remove(obj) + + def _process(self, libpath, asset_group, group_name): + plugin.deselect_all() + + bpy.ops.wm.alembic_import(filepath=libpath) + + objects = lib.get_selection() + + for obj in objects: + obj.parent = asset_group + + for obj in objects: + name = obj.name + obj.name = f"{group_name}:{name}" + if obj.type != "EMPTY": + name_data = obj.data.name + obj.data.name = f"{group_name}:{name_data}" + + if not obj.get(AVALON_PROPERTY): + obj[AVALON_PROPERTY] = dict() + + avalon_info = obj[AVALON_PROPERTY] + avalon_info.update({"container_name": group_name}) + + plugin.deselect_all() + + return objects + + def process_asset( + self, + context: dict, + name: str, + namespace: Optional[str] = None, + options: Optional[Dict] = None, + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + + asset_name = plugin.asset_name(asset, subset) + unique_number = plugin.get_unique_number(asset, subset) + group_name = plugin.asset_name(asset, subset, unique_number) + namespace = namespace or f"{asset}_{unique_number}" + + avalon_container = bpy.data.collections.get(AVALON_CONTAINERS) + if not avalon_container: + avalon_container = bpy.data.collections.new(name=AVALON_CONTAINERS) + bpy.context.scene.collection.children.link(avalon_container) + + asset_group = bpy.data.objects.new(group_name, object_data=None) + avalon_container.objects.link(asset_group) + + objects = self._process(libpath, asset_group, group_name) + + objects = [] + nodes = list(asset_group.children) + + for obj in nodes: + objects.append(obj) + nodes.extend(list(obj.children)) + + bpy.context.scene.collection.objects.link(asset_group) + + asset_group[AVALON_PROPERTY] = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": namespace or "", + "loader": str(self.__class__.__name__), + "representation": str(context["representation"]["_id"]), + "libpath": libpath, + "asset_name": asset_name, + "parent": str(context["representation"]["parent"]), + "family": context["representation"]["context"]["family"], + "objectName": group_name, + } + + self[:] = objects + return objects + + def exec_update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + libpath = Path(get_representation_path(representation)) + extension = libpath.suffix.lower() + + self.log.info( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert asset_group, ( + f"The asset is not loaded: {container['objectName']}") + assert libpath, ( + f"No existing library file found for {container['objectName']}") + assert libpath.is_file(), f"The file doesn't exist: {libpath}" + assert extension in plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}") + + metadata = asset_group.get(AVALON_PROPERTY) + group_libpath = metadata["libpath"] + + normalized_group_libpath = str( + Path(bpy.path.abspath(group_libpath)).resolve()) + normalized_libpath = str( + Path(bpy.path.abspath(str(libpath))).resolve()) + self.log.debug( + "normalized_group_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_group_libpath, + normalized_libpath, + ) + if normalized_group_libpath == normalized_libpath: + self.log.info("Library already loaded, not updating...") + return + + mat = asset_group.matrix_basis.copy() + + self._remove(asset_group) + self._process(str(libpath), asset_group, object_name) + + asset_group.matrix_basis = mat + + metadata["libpath"] = str(libpath) + metadata["representation"] = str(representation["_id"]) + + def exec_remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (openpype:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + object_name = container["objectName"] + asset_group = bpy.data.objects.get(object_name) + + if not asset_group: + return False + + self._remove(asset_group) + + bpy.data.objects.remove(asset_group) + + return True diff --git a/openpype/hosts/blender/plugins/publish/collect_review.py b/openpype/hosts/blender/plugins/publish/collect_review.py new file mode 100644 index 0000000000..d6abd9d967 --- /dev/null +++ b/openpype/hosts/blender/plugins/publish/collect_review.py @@ -0,0 +1,64 @@ +import bpy + +import pyblish.api +from openpype.pipeline import legacy_io + + +class CollectReview(pyblish.api.InstancePlugin): + """Collect Review data + + """ + + order = pyblish.api.CollectorOrder + 0.3 + label = "Collect Review Data" + families = ["review"] + + def process(self, instance): + + self.log.debug(f"instance: {instance}") + + # get cameras + cameras = [ + obj + for obj in instance + if isinstance(obj, bpy.types.Object) and obj.type == "CAMERA" + ] + + assert len(cameras) == 1, ( + f"Not a single camera found in extraction: {cameras}" + ) + camera = cameras[0].name + self.log.debug(f"camera: {camera}") + + # get isolate objects list from meshes instance members . + isolate_objects = [ + obj + for obj in instance + if isinstance(obj, bpy.types.Object) and obj.type == "MESH" + ] + + if not instance.data.get("remove"): + + task = legacy_io.Session.get("AVALON_TASK") + + instance.data.update({ + "subset": f"{task}Review", + "review_camera": camera, + "frameStart": instance.context.data["frameStart"], + "frameEnd": instance.context.data["frameEnd"], + "fps": instance.context.data["fps"], + "isolate": isolate_objects, + }) + + self.log.debug(f"instance data: {instance.data}") + + # TODO : Collect audio + audio_tracks = [] + instance.data["audio"] = [] + for track in audio_tracks: + instance.data["audio"].append( + { + "offset": track.offset.get(), + "filename": track.filename.get(), + } + ) diff --git a/openpype/hosts/blender/plugins/publish/extract_playblast.py b/openpype/hosts/blender/plugins/publish/extract_playblast.py new file mode 100644 index 0000000000..196e75b8cc --- /dev/null +++ b/openpype/hosts/blender/plugins/publish/extract_playblast.py @@ -0,0 +1,122 @@ +import os +import clique + +import bpy + +import pyblish.api +from openpype.pipeline import publish +from openpype.hosts.blender.api import capture +from openpype.hosts.blender.api.lib import maintained_time + + +class ExtractPlayblast(publish.Extractor): + """ + Extract viewport playblast. + + Takes review camera and creates review Quicktime video based on viewport + capture. + """ + + label = "Extract Playblast" + hosts = ["blender"] + families = ["review"] + optional = True + order = pyblish.api.ExtractorOrder + 0.01 + + def process(self, instance): + self.log.info("Extracting capture..") + + self.log.info(instance.data) + + # get scene fps + fps = instance.data.get("fps") + if fps is None: + fps = bpy.context.scene.render.fps + instance.data["fps"] = fps + + self.log.info(f"fps: {fps}") + + # If start and end frames cannot be determined, + # get them from Blender timeline. + start = instance.data.get("frameStart", bpy.context.scene.frame_start) + end = instance.data.get("frameEnd", bpy.context.scene.frame_end) + + self.log.info(f"start: {start}, end: {end}") + assert end > start, "Invalid time range !" + + # get cameras + camera = instance.data("review_camera", None) + + # get isolate objects list + isolate = instance.data("isolate", None) + + # get output path + stagingdir = self.staging_dir(instance) + filename = instance.name + path = os.path.join(stagingdir, filename) + + self.log.info(f"Outputting images to {path}") + + project_settings = instance.context.data["project_settings"]["blender"] + presets = project_settings["publish"]["ExtractPlayblast"]["presets"] + preset = presets.get("default") + preset.update({ + "camera": camera, + "start_frame": start, + "end_frame": end, + "filename": path, + "overwrite": True, + "isolate": isolate, + }) + preset.setdefault( + "image_settings", + { + "file_format": "PNG", + "color_mode": "RGB", + "color_depth": "8", + "compression": 15, + }, + ) + + with maintained_time(): + path = capture(**preset) + + self.log.debug(f"playblast path {path}") + + collected_files = os.listdir(stagingdir) + collections, remainder = clique.assemble( + collected_files, + patterns=[f"{filename}\\.{clique.DIGITS_PATTERN}\\.png$"], + ) + + if len(collections) > 1: + raise RuntimeError( + f"More than one collection found in stagingdir: {stagingdir}" + ) + elif len(collections) == 0: + raise RuntimeError( + f"No collection found in stagingdir: {stagingdir}" + ) + + frame_collection = collections[0] + + self.log.info(f"We found collection of interest {frame_collection}") + + instance.data.setdefault("representations", []) + + tags = ["review"] + if not instance.data.get("keepImages"): + tags.append("delete") + + representation = { + "name": "png", + "ext": "png", + "files": list(frame_collection), + "stagingDir": stagingdir, + "frameStart": start, + "frameEnd": end, + "fps": fps, + "tags": tags, + "camera_name": camera + } + instance.data["representations"].append(representation) diff --git a/openpype/hosts/blender/plugins/publish/extract_thumbnail.py b/openpype/hosts/blender/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..65c3627375 --- /dev/null +++ b/openpype/hosts/blender/plugins/publish/extract_thumbnail.py @@ -0,0 +1,99 @@ +import os +import glob + +import pyblish.api +from openpype.pipeline import publish +from openpype.hosts.blender.api import capture +from openpype.hosts.blender.api.lib import maintained_time + +import bpy + + +class ExtractThumbnail(publish.Extractor): + """Extract viewport thumbnail. + + Takes review camera and creates a thumbnail based on viewport + capture. + + """ + + label = "Extract Thumbnail" + hosts = ["blender"] + families = ["review"] + order = pyblish.api.ExtractorOrder + 0.01 + presets = {} + + def process(self, instance): + self.log.info("Extracting capture..") + + stagingdir = self.staging_dir(instance) + filename = instance.name + path = os.path.join(stagingdir, filename) + + self.log.info(f"Outputting images to {path}") + + camera = instance.data.get("review_camera", "AUTO") + start = instance.data.get("frameStart", bpy.context.scene.frame_start) + family = instance.data.get("family") + isolate = instance.data("isolate", None) + + preset = self.presets.get(family, {}) + + preset.update({ + "camera": camera, + "start_frame": start, + "end_frame": start, + "filename": path, + "overwrite": True, + "isolate": isolate, + }) + preset.setdefault( + "image_settings", + { + "file_format": "JPEG", + "color_mode": "RGB", + "quality": 100, + }, + ) + + with maintained_time(): + path = capture(**preset) + + thumbnail = os.path.basename(self._fix_output_path(path)) + + self.log.info(f"thumbnail: {thumbnail}") + + instance.data.setdefault("representations", []) + + representation = { + "name": "thumbnail", + "ext": "jpg", + "files": thumbnail, + "stagingDir": stagingdir, + "thumbnail": True + } + instance.data["representations"].append(representation) + + def _fix_output_path(self, filepath): + """"Workaround to return correct filepath. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning( + "Playblast did not result in output path. " + "Playblast is probably interrupted." + ) + return None + + if not os.path.exists(filepath): + files = glob.glob(f"{filepath}.*.jpg") + + if not files: + raise RuntimeError(f"Couldn't find playblast from: {filepath}") + filepath = max(files, key=os.path.getmtime) + + return filepath diff --git a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py index 84b9dd1a6e..48c267fd18 100644 --- a/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py +++ b/openpype/hosts/blender/plugins/publish/validate_camera_zero_keyframe.py @@ -19,7 +19,6 @@ class ValidateCameraZeroKeyframe(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["camera"] - version = (0, 1, 0) label = "Zero Keyframe" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py index cee855671d..edf47193be 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_has_uv.py @@ -14,7 +14,6 @@ class ValidateMeshHasUvs(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["model"] - category = "geometry" label = "Mesh Has UV's" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] optional = True diff --git a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py index 45ac08811d..618feb95c1 100644 --- a/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py +++ b/openpype/hosts/blender/plugins/publish/validate_mesh_no_negative_scale.py @@ -14,7 +14,6 @@ class ValidateMeshNoNegativeScale(pyblish.api.Validator): order = ValidateContentsOrder hosts = ["blender"] families = ["model"] - category = "geometry" label = "Mesh No Negative Scale" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py index f5dc9fdd5c..1a98ec4c1d 100644 --- a/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py +++ b/openpype/hosts/blender/plugins/publish/validate_no_colons_in_name.py @@ -19,7 +19,6 @@ class ValidateNoColonsInName(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["model", "rig"] - version = (0, 1, 0) label = "No Colons in names" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py index 742826d3d9..66ef731e6e 100644 --- a/openpype/hosts/blender/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/blender/plugins/publish/validate_transform_zero.py @@ -21,7 +21,6 @@ class ValidateTransformZero(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ["blender"] families = ["model"] - version = (0, 1, 0) label = "Transform Zero" actions = [openpype.hosts.blender.api.action.SelectInvalidAction] diff --git a/openpype/hosts/celaction/hooks/pre_celaction_setup.py b/openpype/hosts/celaction/hooks/pre_celaction_setup.py index 62cebf99ed..96e784875c 100644 --- a/openpype/hosts/celaction/hooks/pre_celaction_setup.py +++ b/openpype/hosts/celaction/hooks/pre_celaction_setup.py @@ -38,8 +38,9 @@ class CelactionPrelaunchHook(PreLaunchHook): ) path_to_cli = os.path.join(CELACTION_SCRIPTS_DIR, "publish_cli.py") - subproces_args = get_openpype_execute_args("run", path_to_cli) - openpype_executable = subproces_args.pop(0) + subprocess_args = get_openpype_execute_args("run", path_to_cli) + openpype_executable = subprocess_args.pop(0) + workfile_settings = self.get_workfile_settings() winreg.SetValueEx( hKey, @@ -49,20 +50,34 @@ class CelactionPrelaunchHook(PreLaunchHook): openpype_executable ) - parameters = subproces_args + [ - "--currentFile", "*SCENE*", - "--chunk", "*CHUNK*", - "--frameStart", "*START*", - "--frameEnd", "*END*", - "--resolutionWidth", "*X*", - "--resolutionHeight", "*Y*" + # add required arguments for workfile path + parameters = subprocess_args + [ + "--currentFile", "*SCENE*" ] + # Add custom parameters from workfile settings + if "render_chunk" in workfile_settings["submission_overrides"]: + parameters += [ + "--chunk", "*CHUNK*" + ] + if "resolution" in workfile_settings["submission_overrides"]: + parameters += [ + "--resolutionWidth", "*X*", + "--resolutionHeight", "*Y*" + ] + if "frame_range" in workfile_settings["submission_overrides"]: + parameters += [ + "--frameStart", "*START*", + "--frameEnd", "*END*" + ] + winreg.SetValueEx( hKey, "SubmitParametersTitle", 0, winreg.REG_SZ, subprocess.list2cmdline(parameters) ) + self.log.debug(f"__ parameters: \"{parameters}\"") + # setting resolution parameters path_submit = "\\".join([ path_user_settings, "Dialogs", "SubmitOutput" @@ -135,3 +150,6 @@ class CelactionPrelaunchHook(PreLaunchHook): self.log.info(f"Workfile to open: \"{workfile_path}\"") return workfile_path + + def get_workfile_settings(self): + return self.data["project_settings"]["celaction"]["workfile"] diff --git a/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py b/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py index bf97dd744b..54dea15dff 100644 --- a/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py +++ b/openpype/hosts/celaction/plugins/publish/collect_celaction_cli_kwargs.py @@ -1,5 +1,4 @@ import pyblish.api -import argparse import sys from pprint import pformat @@ -11,20 +10,40 @@ class CollectCelactionCliKwargs(pyblish.api.Collector): order = pyblish.api.Collector.order - 0.1 def process(self, context): - parser = argparse.ArgumentParser(prog="celaction") - parser.add_argument("--currentFile", - help="Pass file to Context as `currentFile`") - parser.add_argument("--chunk", - help=("Render chanks on farm")) - parser.add_argument("--frameStart", - help=("Start of frame range")) - parser.add_argument("--frameEnd", - help=("End of frame range")) - parser.add_argument("--resolutionWidth", - help=("Width of resolution")) - parser.add_argument("--resolutionHeight", - help=("Height of resolution")) - passing_kwargs = parser.parse_args(sys.argv[1:]).__dict__ + args = list(sys.argv[1:]) + self.log.info(str(args)) + missing_kwargs = [] + passing_kwargs = {} + for key in ( + "chunk", + "frameStart", + "frameEnd", + "resolutionWidth", + "resolutionHeight", + "currentFile", + ): + arg_key = f"--{key}" + if arg_key not in args: + missing_kwargs.append(key) + continue + arg_idx = args.index(arg_key) + args.pop(arg_idx) + if key != "currentFile": + value = args.pop(arg_idx) + else: + path_parts = [] + while arg_idx < len(args): + path_parts.append(args.pop(arg_idx)) + value = " ".join(path_parts).strip('"') + + passing_kwargs[key] = value + + if missing_kwargs: + self.log.debug("Missing arguments {}".format( + ", ".join( + [f'"{key}"' for key in missing_kwargs] + ) + )) self.log.info("Storing kwargs ...") self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs))) diff --git a/openpype/hosts/flame/api/lib.py b/openpype/hosts/flame/api/lib.py index 6aca5c5ce6..ab713aed84 100644 --- a/openpype/hosts/flame/api/lib.py +++ b/openpype/hosts/flame/api/lib.py @@ -773,7 +773,7 @@ class MediaInfoFile(object): if logger: self.log = logger - # test if `dl_get_media_info` paht exists + # test if `dl_get_media_info` path exists self._validate_media_script_path() # derivate other feed variables @@ -993,7 +993,7 @@ class MediaInfoFile(object): def _validate_media_script_path(self): if not os.path.isfile(self.MEDIA_SCRIPT_PATH): - raise IOError("Media Scirpt does not exist: `{}`".format( + raise IOError("Media Script does not exist: `{}`".format( self.MEDIA_SCRIPT_PATH)) def _generate_media_info_file(self, fpath, feed_ext, feed_dir): diff --git a/openpype/hosts/flame/api/pipeline.py b/openpype/hosts/flame/api/pipeline.py index 3a23389961..d6fbf750ba 100644 --- a/openpype/hosts/flame/api/pipeline.py +++ b/openpype/hosts/flame/api/pipeline.py @@ -38,7 +38,7 @@ def install(): pyblish.register_plugin_path(PUBLISH_PATH) register_loader_plugin_path(LOAD_PATH) register_creator_plugin_path(CREATE_PATH) - log.info("OpenPype Flame plug-ins registred ...") + log.info("OpenPype Flame plug-ins registered ...") # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) diff --git a/openpype/hosts/flame/api/plugin.py b/openpype/hosts/flame/api/plugin.py index b1db612671..3289187fa0 100644 --- a/openpype/hosts/flame/api/plugin.py +++ b/openpype/hosts/flame/api/plugin.py @@ -10,6 +10,7 @@ from qtpy import QtCore, QtWidgets from openpype import style from openpype.lib import Logger, StringTemplate from openpype.pipeline import LegacyCreator, LoaderPlugin +from openpype.pipeline.colorspace import get_remapped_colorspace_to_native from openpype.settings import get_current_project_settings from . import constants @@ -157,7 +158,7 @@ class CreatorWidget(QtWidgets.QDialog): # convert label text to normal capitalized text with spaces label_text = self.camel_case_split(text) - # assign the new text to lable widget + # assign the new text to label widget label = QtWidgets.QLabel(label_text) label.setObjectName("LineLabel") @@ -345,8 +346,8 @@ class PublishableClip: "track": "sequence", } - # parents search patern - parents_search_patern = r"\{([a-z]*?)\}" + # parents search pattern + parents_search_pattern = r"\{([a-z]*?)\}" # default templates for non-ui use rename_default = False @@ -445,7 +446,7 @@ class PublishableClip: return self.current_segment def _populate_segment_default_data(self): - """ Populate default formating data from segment. """ + """ Populate default formatting data from segment. """ self.current_segment_default_data = { "_folder_": "shots", @@ -538,7 +539,7 @@ class PublishableClip: if not self.index_from_segment: self.count_steps *= self.rename_index - hierarchy_formating_data = {} + hierarchy_formatting_data = {} hierarchy_data = deepcopy(self.hierarchy_data) _data = self.current_segment_default_data.copy() if self.ui_inputs: @@ -552,7 +553,7 @@ class PublishableClip: # mark review layer if self.review_track and ( self.review_track not in self.review_track_default): - # if review layer is defined and not the same as defalut + # if review layer is defined and not the same as default self.review_layer = self.review_track # shot num calculate @@ -578,13 +579,13 @@ class PublishableClip: # fill up pythonic expresisons in hierarchy data for k, _v in hierarchy_data.items(): - hierarchy_formating_data[k] = _v["value"].format(**_data) + hierarchy_formatting_data[k] = _v["value"].format(**_data) else: # if no gui mode then just pass default data - hierarchy_formating_data = hierarchy_data + hierarchy_formatting_data = hierarchy_data tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formating_data + hierarchy_formatting_data ) tag_hierarchy_data.update({"heroTrack": True}) @@ -615,27 +616,27 @@ class PublishableClip: # in case track name and subset name is the same then add if self.subset_name == self.track_name: _hero_data["subset"] = self.subset - # assing data to return hierarchy data to tag + # assign data to return hierarchy data to tag tag_hierarchy_data = _hero_data break # add data to return data dict self.marker_data.update(tag_hierarchy_data) - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): """ Solve marker data from hierarchy data and templates. """ # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) - clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) # remove shot from hierarchy data: is not needed anymore - hierarchy_formating_data.pop("shot") + hierarchy_formatting_data.pop("shot") return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, "parents": self.parents, - "hierarchyData": hierarchy_formating_data, + "hierarchyData": hierarchy_formatting_data, "subset": self.subset, "family": self.subset_family, "families": [self.family] @@ -650,17 +651,17 @@ class PublishableClip: type ) - # first collect formating data to use for formating template - formating_data = {} + # first collect formatting data to use for formatting template + formatting_data = {} for _k, _v in self.hierarchy_data.items(): value = _v["value"].format( **self.current_segment_default_data) - formating_data[_k] = value + formatting_data[_k] = value return { "entity_type": entity_type, "entity_name": template.format( - **formating_data + **formatting_data ) } @@ -668,9 +669,9 @@ class PublishableClip: """ Create parents and return it in list. """ self.parents = [] - patern = re.compile(self.parents_search_patern) + pattern = re.compile(self.parents_search_pattern) - par_split = [(patern.findall(t).pop(), t) + par_split = [(pattern.findall(t).pop(), t) for t in self.hierarchy.split("/")] for type, template in par_split: @@ -701,6 +702,38 @@ class ClipLoader(LoaderPlugin): ] _mapping = None + _host_settings = None + + def apply_settings(cls, project_settings, system_settings): + + plugin_type_settings = ( + project_settings + .get("flame", {}) + .get("load", {}) + ) + + if not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + print(" - is disabled by preset") + elif option == "representations": + continue + else: + print(" - setting `{}`: `{}`".format(option, value)) + setattr(cls, option, value) def get_colorspace(self, context): """Get colorspace name @@ -738,15 +771,26 @@ class ClipLoader(LoaderPlugin): Returns: str: native colorspace name defined in mapping or None """ + # TODO: rewrite to support only pipeline's remapping + if not cls._host_settings: + cls._host_settings = get_current_project_settings()["flame"] + + # [Deprecated] way of remapping if not cls._mapping: - settings = get_current_project_settings()["flame"] - mapping = settings["imageio"]["profilesMapping"]["inputs"] + mapping = ( + cls._host_settings["imageio"]["profilesMapping"]["inputs"]) cls._mapping = { input["ocioName"]: input["flameName"] for input in mapping } - return cls._mapping.get(input_colorspace) + native_name = cls._mapping.get(input_colorspace) + + if not native_name: + native_name = get_remapped_colorspace_to_native( + input_colorspace, "flame", cls._host_settings["imageio"]) + + return native_name class OpenClipSolver(flib.MediaInfoFile): @@ -871,22 +915,22 @@ class OpenClipSolver(flib.MediaInfoFile): ): return - formating_data = self._update_formating_data( + formatting_data = self._update_formatting_data( layerName=layer_name, layerUID=layer_uid ) name_obj.text = StringTemplate( self.layer_rename_template - ).format(formating_data) + ).format(formatting_data) - def _update_formating_data(self, **kwargs): - """ Updating formating data for layer rename + def _update_formatting_data(self, **kwargs): + """ Updating formatting data for layer rename Attributes: - key=value (optional): will be included to formating data + key=value (optional): will be included to formatting data as {key: value} Returns: - dict: anatomy context data for formating + dict: anatomy context data for formatting """ self.log.debug(">> self.clip_data: {}".format(self.clip_data)) clip_name_obj = self.clip_data.find("name") diff --git a/openpype/hosts/flame/api/scripts/wiretap_com.py b/openpype/hosts/flame/api/scripts/wiretap_com.py index 4825ff4386..a74172c405 100644 --- a/openpype/hosts/flame/api/scripts/wiretap_com.py +++ b/openpype/hosts/flame/api/scripts/wiretap_com.py @@ -203,7 +203,7 @@ class WireTapCom(object): list: all available volumes in server Rises: - AttributeError: unable to get any volumes childs from server + AttributeError: unable to get any volumes children from server """ root = WireTapNodeHandle(self._server, "/volumes") children_num = WireTapInt(0) diff --git a/openpype/hosts/flame/api/utils.py b/openpype/hosts/flame/api/utils.py index fb8bdee42d..80a5c47e89 100644 --- a/openpype/hosts/flame/api/utils.py +++ b/openpype/hosts/flame/api/utils.py @@ -108,7 +108,7 @@ def _sync_utility_scripts(env=None): shutil.copy2(src, dst) except (PermissionError, FileExistsError) as msg: log.warning( - "Not able to coppy to: `{}`, Problem with: `{}`".format( + "Not able to copy to: `{}`, Problem with: `{}`".format( dst, msg ) diff --git a/openpype/hosts/flame/hooks/pre_flame_setup.py b/openpype/hosts/flame/hooks/pre_flame_setup.py index 713daf1031..83110bb6b5 100644 --- a/openpype/hosts/flame/hooks/pre_flame_setup.py +++ b/openpype/hosts/flame/hooks/pre_flame_setup.py @@ -47,6 +47,17 @@ class FlamePrelaunch(PreLaunchHook): imageio_flame = project_settings["flame"]["imageio"] + # Check whether 'enabled' key from host imageio settings exists + # so we can tell if host is using the new colormanagement framework. + # If the 'enabled' isn't found we want 'colormanaged' set to True + # because prior to the key existing we always did colormanagement for + # Flame + colormanaged = imageio_flame.get("enabled") + # if key was not found, set to True + # ensuring backward compatibility + if colormanaged is None: + colormanaged = True + # get user name and host name user_name = get_openpype_username() user_name = user_name.replace(".", "_") @@ -68,9 +79,7 @@ class FlamePrelaunch(PreLaunchHook): "FrameWidth": int(width), "FrameHeight": int(height), "AspectRatio": float((width / height) * _db_p_data["pixelAspect"]), - "FrameRate": self._get_flame_fps(fps), - "FrameDepth": str(imageio_flame["project"]["frameDepth"]), - "FieldDominance": str(imageio_flame["project"]["fieldDominance"]) + "FrameRate": self._get_flame_fps(fps) } data_to_script = { @@ -78,7 +87,6 @@ class FlamePrelaunch(PreLaunchHook): "host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname, "volume_name": volume_name, "group_name": _env.get("FLAME_WIRETAP_GROUP"), - "color_policy": str(imageio_flame["project"]["colourPolicy"]), # from project "project_name": project_name, @@ -86,6 +94,16 @@ class FlamePrelaunch(PreLaunchHook): "project_data": project_data } + # add color management data + if colormanaged: + project_data.update({ + "FrameDepth": str(imageio_flame["project"]["frameDepth"]), + "FieldDominance": str( + imageio_flame["project"]["fieldDominance"]) + }) + data_to_script["color_policy"] = str( + imageio_flame["project"]["colourPolicy"]) + self.log.info(pformat(dict(_env))) self.log.info(pformat(data_to_script)) @@ -153,7 +171,7 @@ class FlamePrelaunch(PreLaunchHook): def _add_pythonpath(self): pythonpath = self.launch_context.env.get("PYTHONPATH") - # separate it explicity by `;` that is what we use in settings + # separate it explicitly by `;` that is what we use in settings new_pythonpath = self.flame_pythonpath.split(os.pathsep) new_pythonpath += pythonpath.split(os.pathsep) diff --git a/openpype/hosts/flame/plugins/create/create_shot_clip.py b/openpype/hosts/flame/plugins/create/create_shot_clip.py index 4fb041a4b2..b01354c313 100644 --- a/openpype/hosts/flame/plugins/create/create_shot_clip.py +++ b/openpype/hosts/flame/plugins/create/create_shot_clip.py @@ -209,7 +209,7 @@ class CreateShotClip(opfapi.Creator): "type": "QComboBox", "label": "Subset Name", "target": "ui", - "toolTip": "chose subset name patern, if [ track name ] is selected, name of track layer will be used", # noqa + "toolTip": "chose subset name pattern, if [ track name ] is selected, name of track layer will be used", # noqa "order": 0}, "subsetFamily": { "value": ["plate", "take"], diff --git a/openpype/hosts/flame/plugins/load/load_clip.py b/openpype/hosts/flame/plugins/load/load_clip.py index 6f47c23d57..dfb2d2b6f0 100644 --- a/openpype/hosts/flame/plugins/load/load_clip.py +++ b/openpype/hosts/flame/plugins/load/load_clip.py @@ -4,6 +4,10 @@ import flame from pprint import pformat import openpype.hosts.flame.api as opfapi from openpype.lib import StringTemplate +from openpype.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) class LoadClip(opfapi.ClipLoader): @@ -14,7 +18,10 @@ class LoadClip(opfapi.ClipLoader): """ families = ["render2d", "source", "plate", "render", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) label = "Load as clip" order = -10 @@ -54,9 +61,9 @@ class LoadClip(opfapi.ClipLoader): self.layer_rename_template = self.layer_rename_template.replace( "output", "representation") - formating_data = deepcopy(context["representation"]["context"]) + formatting_data = deepcopy(context["representation"]["context"]) clip_name = StringTemplate(self.clip_name_template).format( - formating_data) + formatting_data) # convert colorspace with ocio to flame mapping # in imageio flame section @@ -81,7 +88,7 @@ class LoadClip(opfapi.ClipLoader): "version": "v{:0>3}".format(version_name), "layer_rename_template": self.layer_rename_template, "layer_rename_patterns": self.layer_rename_patterns, - "context_data": formating_data + "context_data": formatting_data } self.log.debug(pformat( loading_context diff --git a/openpype/hosts/flame/plugins/load/load_clip_batch.py b/openpype/hosts/flame/plugins/load/load_clip_batch.py index 5975c6e42f..5c5a77f0d0 100644 --- a/openpype/hosts/flame/plugins/load/load_clip_batch.py +++ b/openpype/hosts/flame/plugins/load/load_clip_batch.py @@ -4,7 +4,10 @@ import flame from pprint import pformat import openpype.hosts.flame.api as opfapi from openpype.lib import StringTemplate - +from openpype.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) class LoadClipBatch(opfapi.ClipLoader): """Load a subset to timeline as clip @@ -14,7 +17,10 @@ class LoadClipBatch(opfapi.ClipLoader): """ families = ["render2d", "source", "plate", "render", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) label = "Load as clip to current batch" order = -10 @@ -52,11 +58,11 @@ class LoadClipBatch(opfapi.ClipLoader): self.layer_rename_template = self.layer_rename_template.replace( "output", "representation") - formating_data = deepcopy(context["representation"]["context"]) - formating_data["batch"] = self.batch.name.get_value() + formatting_data = deepcopy(context["representation"]["context"]) + formatting_data["batch"] = self.batch.name.get_value() clip_name = StringTemplate(self.clip_name_template).format( - formating_data) + formatting_data) # convert colorspace with ocio to flame mapping # in imageio flame section @@ -82,7 +88,7 @@ class LoadClipBatch(opfapi.ClipLoader): "version": "v{:0>3}".format(version_name), "layer_rename_template": self.layer_rename_template, "layer_rename_patterns": self.layer_rename_patterns, - "context_data": formating_data + "context_data": formatting_data } self.log.debug(pformat( loading_context diff --git a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py index 76d48dded2..23fdf5e785 100644 --- a/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py +++ b/openpype/hosts/flame/plugins/publish/collect_timeline_instances.py @@ -203,7 +203,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): self._get_xml_preset_attrs( attributes, split) - # add xml overides resolution to instance data + # add xml overrides resolution to instance data xml_overrides = attributes["xml_overrides"] if xml_overrides.get("width"): attributes.update({ @@ -284,7 +284,7 @@ class CollectTimelineInstances(pyblish.api.ContextPlugin): self.log.debug("__ head: `{}`".format(head)) self.log.debug("__ tail: `{}`".format(tail)) - # HACK: it is here to serve for versions bellow 2021.1 + # HACK: it is here to serve for versions below 2021.1 if not any([head, tail]): retimed_attributes = get_media_range_with_retimes( otio_clip, handle_start, handle_end) diff --git a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py index d5294d61c2..a7979ab4d5 100644 --- a/openpype/hosts/flame/plugins/publish/extract_subset_resources.py +++ b/openpype/hosts/flame/plugins/publish/extract_subset_resources.py @@ -143,6 +143,9 @@ class ExtractSubsetResources(publish.Extractor): # create staging dir path staging_dir = self.staging_dir(instance) + # append staging dir for later cleanup + instance.context.data["cleanupFullPaths"].append(staging_dir) + # add default preset type for thumbnail and reviewable video # update them with settings and override in case the same # are found in there @@ -224,7 +227,7 @@ class ExtractSubsetResources(publish.Extractor): self.hide_others( exporting_clip, segment_name, s_track_name) - # change name patern + # change name pattern name_patern_xml = ( "__{}.").format( unique_name) @@ -355,7 +358,7 @@ class ExtractSubsetResources(publish.Extractor): representation_data["stagingDir"] = n_stage_dir files = n_files - # add files to represetation but add + # add files to representation but add # imagesequence as list if ( # first check if path in files is not mov extension @@ -548,30 +551,3 @@ class ExtractSubsetResources(publish.Extractor): "Path `{}` is containing more that one clip".format(path) ) return clips[0] - - def staging_dir(self, instance): - """Provide a temporary directory in which to store extracted files - - Upon calling this method the staging directory is stored inside - the instance.data['stagingDir'] - """ - staging_dir = instance.data.get('stagingDir', None) - openpype_temp_dir = os.getenv("OPENPYPE_TEMP_DIR") - - if not staging_dir: - if openpype_temp_dir and os.path.exists(openpype_temp_dir): - staging_dir = os.path.normpath( - tempfile.mkdtemp( - prefix="pyblish_tmp_", - dir=openpype_temp_dir - ) - ) - else: - staging_dir = os.path.normpath( - tempfile.mkdtemp(prefix="pyblish_tmp_") - ) - instance.data['stagingDir'] = staging_dir - - instance.context.data["cleanupFullPaths"].append(staging_dir) - - return staging_dir diff --git a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py index 4d45f67ded..4f3945bb0f 100644 --- a/openpype/hosts/flame/plugins/publish/integrate_batch_group.py +++ b/openpype/hosts/flame/plugins/publish/integrate_batch_group.py @@ -50,7 +50,7 @@ class IntegrateBatchGroup(pyblish.api.InstancePlugin): self._load_clip_to_context(instance, bgroup) def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group): - # get write file node properties > OrederDict because order does mater + # get write file node properties > OrederDict because order does matter write_pref_data = self._get_write_prefs(instance, task_data) batch_nodes = [ diff --git a/openpype/hosts/fusion/__init__.py b/openpype/hosts/fusion/__init__.py index ddae01890b..1da11ba9d1 100644 --- a/openpype/hosts/fusion/__init__.py +++ b/openpype/hosts/fusion/__init__.py @@ -1,10 +1,14 @@ from .addon import ( + get_fusion_version, FusionAddon, FUSION_HOST_DIR, + FUSION_VERSIONS_DICT, ) __all__ = ( + "get_fusion_version", "FusionAddon", "FUSION_HOST_DIR", + "FUSION_VERSIONS_DICT", ) diff --git a/openpype/hosts/fusion/addon.py b/openpype/hosts/fusion/addon.py index d1bd1566b7..45683cfbde 100644 --- a/openpype/hosts/fusion/addon.py +++ b/openpype/hosts/fusion/addon.py @@ -1,8 +1,52 @@ import os +import re from openpype.modules import OpenPypeModule, IHostAddon +from openpype.lib import Logger FUSION_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) +# FUSION_VERSIONS_DICT is used by the pre-launch hooks +# The keys correspond to all currently supported Fusion versions +# Each value is a list of corresponding Python home variables and a profile +# number, which is used by the profile hook to set Fusion profile variables. +FUSION_VERSIONS_DICT = { + 9: ("FUSION_PYTHON36_HOME", 9), + 16: ("FUSION16_PYTHON36_HOME", 16), + 17: ("FUSION16_PYTHON36_HOME", 16), + 18: ("FUSION_PYTHON3_HOME", 16), +} + + +def get_fusion_version(app_name): + """ + The function is triggered by the prelaunch hooks to get the fusion version. + + `app_name` is obtained by prelaunch hooks from the + `launch_context.env.get("AVALON_APP_NAME")`. + + To get a correct Fusion version, a version number should be present + in the `applications/fusion/variants` key + of the Blackmagic Fusion Application Settings. + """ + + log = Logger.get_logger(__name__) + + if not app_name: + return + + app_version_candidates = re.findall(r"\d+", app_name) + if not app_version_candidates: + return + for app_version in app_version_candidates: + if int(app_version) in FUSION_VERSIONS_DICT: + return int(app_version) + else: + log.info( + "Unsupported Fusion version: {app_version}".format( + app_version=app_version + ) + ) + class FusionAddon(OpenPypeModule, IHostAddon): name = "fusion" @@ -14,15 +58,11 @@ class FusionAddon(OpenPypeModule, IHostAddon): def get_launch_hook_paths(self, app): if app.host_name != self.host_name: return [] - return [ - os.path.join(FUSION_HOST_DIR, "hooks") - ] + return [os.path.join(FUSION_HOST_DIR, "hooks")] def add_implementation_envs(self, env, _app): # Set default values if are not already set via settings - defaults = { - "OPENPYPE_LOG_NO_COLORS": "Yes" - } + defaults = {"OPENPYPE_LOG_NO_COLORS": "Yes"} for key, value in defaults.items(): if not env.get(key): env[key] = value diff --git a/openpype/hosts/fusion/api/__init__.py b/openpype/hosts/fusion/api/__init__.py index ed70dbca50..dba55a98d9 100644 --- a/openpype/hosts/fusion/api/__init__.py +++ b/openpype/hosts/fusion/api/__init__.py @@ -1,20 +1,11 @@ from .pipeline import ( - install, - uninstall, - + FusionHost, ls, imprint_container, - parse_container -) - -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root + parse_container, + list_instances, + remove_instance ) from .lib import ( @@ -22,6 +13,7 @@ from .lib import ( update_frame_range, set_asset_framerange, get_current_comp, + get_bmd_library, comp_lock_and_undo_chunk ) @@ -30,21 +22,11 @@ from .menu import launch_openpype_menu __all__ = [ # pipeline - "install", - "uninstall", "ls", "imprint_container", "parse_container", - # workio - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - # lib "maintained_selection", "update_frame_range", diff --git a/openpype/hosts/fusion/api/action.py b/openpype/hosts/fusion/api/action.py new file mode 100644 index 0000000000..ff5dd14caa --- /dev/null +++ b/openpype/hosts/fusion/api/action.py @@ -0,0 +1,59 @@ +import pyblish.api + + +from openpype.hosts.fusion.api.lib import get_current_comp +from openpype.pipeline.publish import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid nodes in Fusion when plug-in failed. + + To retrieve the invalid nodes this assumes a static `get_invalid()` + method is available on the plugin. + + """ + + label = "Select invalid" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context(context) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(errored_instances, plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + invalid = list() + for instance in instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning( + "Plug-in returned to be invalid, " + "but has no selectable nodes." + ) + + if not invalid: + # Assume relevant comp is current comp and clear selection + self.log.info("No invalid tools found.") + comp = get_current_comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + return + + # Assume a single comp + first_tool = invalid[0] + comp = first_tool.Comp() + flow = comp.CurrentFrame.FlowView + flow.Select() # No args equals clearing selection + names = set() + for tool in invalid: + flow.Select(tool, True) + names.add(tool.Name) + self.log.info( + "Selecting invalid tools: %s" % ", ".join(sorted(names)) + ) diff --git a/openpype/hosts/fusion/api/lib.py b/openpype/hosts/fusion/api/lib.py index a33e5cf289..cba8c38c2f 100644 --- a/openpype/hosts/fusion/api/lib.py +++ b/openpype/hosts/fusion/api/lib.py @@ -210,7 +210,8 @@ def switch_item(container, if any(not x for x in [asset_name, subset_name, representation_name]): repre_id = container["representation"] representation = get_representation_by_id(project_name, repre_id) - repre_parent_docs = get_representation_parents(representation) + repre_parent_docs = get_representation_parents( + project_name, representation) if repre_parent_docs: version, subset, asset, _ = repre_parent_docs else: @@ -255,8 +256,11 @@ def switch_item(container, @contextlib.contextmanager -def maintained_selection(): - comp = get_current_comp() +def maintained_selection(comp=None): + """Reset comp selection from before the context after the context""" + if comp is None: + comp = get_current_comp() + previous_selection = comp.GetToolList(True).values() try: yield @@ -268,6 +272,33 @@ def maintained_selection(): flow.Select(tool, True) +@contextlib.contextmanager +def maintained_comp_range(comp=None, + global_start=True, + global_end=True, + render_start=True, + render_end=True): + """Reset comp frame ranges from before the context after the context""" + if comp is None: + comp = get_current_comp() + + comp_attrs = comp.GetAttrs() + preserve_attrs = {} + if global_start: + preserve_attrs["COMPN_GlobalStart"] = comp_attrs["COMPN_GlobalStart"] + if global_end: + preserve_attrs["COMPN_GlobalEnd"] = comp_attrs["COMPN_GlobalEnd"] + if render_start: + preserve_attrs["COMPN_RenderStart"] = comp_attrs["COMPN_RenderStart"] + if render_end: + preserve_attrs["COMPN_RenderEnd"] = comp_attrs["COMPN_RenderEnd"] + + try: + yield + finally: + comp.SetAttrs(preserve_attrs) + + def get_frame_path(path): """Get filename for the Fusion Saver with padded number as '#' @@ -302,10 +333,24 @@ def get_frame_path(path): return filename, padding, ext -def get_current_comp(): - """Hack to get current comp in this session""" +def get_fusion_module(): + """Get current Fusion instance""" fusion = getattr(sys.modules["__main__"], "fusion", None) - return fusion.CurrentComp if fusion else None + return fusion + + +def get_bmd_library(): + """Get bmd library""" + bmd = getattr(sys.modules["__main__"], "bmd", None) + return bmd + + +def get_current_comp(): + """Get current comp in this session""" + fusion = get_fusion_module() + if fusion is not None: + comp = fusion.CurrentComp + return comp @contextlib.contextmanager diff --git a/openpype/hosts/fusion/api/menu.py b/openpype/hosts/fusion/api/menu.py index 42fbab70a6..92f38a64c2 100644 --- a/openpype/hosts/fusion/api/menu.py +++ b/openpype/hosts/fusion/api/menu.py @@ -6,12 +6,11 @@ from openpype.tools.utils import host_tools from openpype.style import load_stylesheet from openpype.lib import register_event_callback from openpype.hosts.fusion.scripts import ( - set_rendermode, - duplicate_with_inputs + duplicate_with_inputs, ) from openpype.hosts.fusion.api.lib import ( set_asset_framerange, - set_asset_resolution + set_asset_resolution, ) from openpype.pipeline import legacy_io from openpype.resources import get_openpype_icon_filepath @@ -45,20 +44,21 @@ class OpenPypeMenu(QtWidgets.QWidget): self.setWindowTitle("OpenPype") asset_label = QtWidgets.QLabel("Context", self) - asset_label.setStyleSheet("""QLabel { + asset_label.setStyleSheet( + """QLabel { font-size: 14px; font-weight: 600; color: #5f9fb8; - }""") + }""" + ) asset_label.setAlignment(QtCore.Qt.AlignHCenter) workfiles_btn = QtWidgets.QPushButton("Workfiles...", self) create_btn = QtWidgets.QPushButton("Create...", self) - publish_btn = QtWidgets.QPushButton("Publish...", self) load_btn = QtWidgets.QPushButton("Load...", self) + publish_btn = QtWidgets.QPushButton("Publish...", self) manager_btn = QtWidgets.QPushButton("Manage...", self) libload_btn = QtWidgets.QPushButton("Library...", self) - rendermode_btn = QtWidgets.QPushButton("Set render mode...", self) set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self) set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self) duplicate_with_inputs_btn = QtWidgets.QPushButton( @@ -89,7 +89,6 @@ class OpenPypeMenu(QtWidgets.QWidget): layout.addWidget(set_framerange_btn) layout.addWidget(set_resolution_btn) - layout.addWidget(rendermode_btn) layout.addSpacing(20) @@ -106,9 +105,9 @@ class OpenPypeMenu(QtWidgets.QWidget): load_btn.clicked.connect(self.on_load_clicked) manager_btn.clicked.connect(self.on_manager_clicked) libload_btn.clicked.connect(self.on_libload_clicked) - rendermode_btn.clicked.connect(self.on_rendermode_clicked) duplicate_with_inputs_btn.clicked.connect( - self.on_duplicate_with_inputs_clicked) + self.on_duplicate_with_inputs_clicked + ) set_resolution_btn.clicked.connect(self.on_set_resolution_clicked) set_framerange_btn.clicked.connect(self.on_set_framerange_clicked) @@ -130,7 +129,6 @@ class OpenPypeMenu(QtWidgets.QWidget): self.asset_label.setText(label) def register_callback(self, name, fn): - # Create a wrapper callback that we only store # for as long as we want it to persist as callback def _callback(*args): @@ -146,10 +144,10 @@ class OpenPypeMenu(QtWidgets.QWidget): host_tools.show_workfiles() def on_create_clicked(self): - host_tools.show_creator() + host_tools.show_publisher(tab="create") def on_publish_clicked(self): - host_tools.show_publish() + host_tools.show_publisher(tab="publish") def on_load_clicked(self): host_tools.show_loader(use_context=True) @@ -160,15 +158,6 @@ class OpenPypeMenu(QtWidgets.QWidget): def on_libload_clicked(self): host_tools.show_library_loader() - def on_rendermode_clicked(self): - if self.render_mode_widget is None: - window = set_rendermode.SetRenderMode() - window.setStyleSheet(load_stylesheet()) - window.show() - self.render_mode_widget = window - else: - self.render_mode_widget.show() - def on_duplicate_with_inputs_clicked(self): duplicate_with_inputs.duplicate_with_input_connections() diff --git a/openpype/hosts/fusion/api/pipeline.py b/openpype/hosts/fusion/api/pipeline.py index 6315fe443d..a768a3f0f8 100644 --- a/openpype/hosts/fusion/api/pipeline.py +++ b/openpype/hosts/fusion/api/pipeline.py @@ -4,6 +4,7 @@ Basic avalon integration import os import sys import logging +import contextlib import pyblish.api from qtpy import QtCore @@ -17,15 +18,14 @@ from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, register_inventory_action_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - deregister_inventory_action_path, AVALON_CONTAINER_ID, ) from openpype.pipeline.load import any_outdated_containers from openpype.hosts.fusion import FUSION_HOST_DIR +from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost from openpype.tools.utils import host_tools + from .lib import ( get_current_comp, comp_lock_and_undo_chunk, @@ -66,94 +66,98 @@ class FusionLogHandler(logging.Handler): self.print(entry) -def install(): - """Install fusion-specific functionality of OpenPype. +class FusionHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "fusion" - This is where you install menus and register families, data - and loaders into fusion. + def install(self): + """Install fusion-specific functionality of OpenPype. - It is called automatically when installing via - `openpype.pipeline.install_host(openpype.hosts.fusion.api)` + This is where you install menus and register families, data + and loaders into fusion. - See the Maya equivalent for inspiration on how to implement this. + It is called automatically when installing via + `openpype.pipeline.install_host(openpype.hosts.fusion.api)` - """ - # Remove all handlers associated with the root logger object, because - # that one always logs as "warnings" incorrectly. - for handler in logging.root.handlers[:]: - logging.root.removeHandler(handler) + See the Maya equivalent for inspiration on how to implement this. - # Attach default logging handler that prints to active comp - logger = logging.getLogger() - formatter = logging.Formatter(fmt="%(message)s\n") - handler = FusionLogHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.DEBUG) + """ + # Remove all handlers associated with the root logger object, because + # that one always logs as "warnings" incorrectly. + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) - pyblish.api.register_host("fusion") - pyblish.api.register_plugin_path(PUBLISH_PATH) - log.info("Registering Fusion plug-ins..") + # Attach default logging handler that prints to active comp + logger = logging.getLogger() + formatter = logging.Formatter(fmt="%(message)s\n") + handler = FusionLogHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging.DEBUG) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) + pyblish.api.register_host("fusion") + pyblish.api.register_plugin_path(PUBLISH_PATH) + log.info("Registering Fusion plug-ins..") - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled - ) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) - # Register events - register_event_callback("open", on_after_open) - register_event_callback("save", on_save) - register_event_callback("new", on_new) + # Register events + register_event_callback("open", on_after_open) + register_event_callback("save", on_save) + register_event_callback("new", on_new) + # region workfile io api + def has_unsaved_changes(self): + comp = get_current_comp() + return comp.GetAttrs()["COMPB_Modified"] -def uninstall(): - """Uninstall all that was installed + def get_workfile_extensions(self): + return [".comp"] - This is where you undo everything that was done in `install()`. - That means, removing menus, deregistering families and data - and everything. It should be as though `install()` was never run, - because odds are calling this function means the user is interested - in re-installing shortly afterwards. If, for example, he has been - modifying the menu or registered families. + def save_workfile(self, dst_path=None): + comp = get_current_comp() + comp.Save(dst_path) - """ - pyblish.api.deregister_host("fusion") - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - log.info("Deregistering Fusion plug-ins..") + def open_workfile(self, filepath): + # Hack to get fusion, see + # openpype.hosts.fusion.api.pipeline.get_current_comp() + fusion = getattr(sys.modules["__main__"], "fusion", None) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - deregister_inventory_action_path(INVENTORY_PATH) + return fusion.LoadComp(filepath) - pyblish.api.deregister_callback( - "instanceToggled", on_pyblish_instance_toggled - ) + def get_current_workfile(self): + comp = get_current_comp() + current_filepath = comp.GetAttrs()["COMPS_FileName"] + if not current_filepath: + return None + return current_filepath -def on_pyblish_instance_toggled(instance, old_value, new_value): - """Toggle saver tool passthrough states on instance toggles.""" - comp = instance.context.data.get("currentComp") - if not comp: - return + def work_root(self, session): + work_dir = session["AVALON_WORKDIR"] + scene_dir = session.get("AVALON_SCENEDIR") + if scene_dir: + return os.path.join(work_dir, scene_dir) + else: + return work_dir + # endregion - savers = [tool for tool in instance if - getattr(tool, "ID", None) == "Saver"] - if not savers: - return + @contextlib.contextmanager + def maintained_selection(self): + from .lib import maintained_selection + return maintained_selection() - # Whether instances should be passthrough based on new value - passthrough = not new_value - with comp_lock_and_undo_chunk(comp, - undo_queue_name="Change instance " - "active state"): - for tool in savers: - attrs = tool.GetAttrs() - current = attrs["TOOLB_PassThrough"] - if current != passthrough: - tool.SetAttrs({"TOOLB_PassThrough": passthrough}) + def get_containers(self): + return ls() + + def update_context_data(self, data, changes): + comp = get_current_comp() + comp.SetData("openpype", data) + + def get_context_data(self): + comp = get_current_comp() + return comp.GetData("openpype") or {} def on_new(event): @@ -283,9 +287,51 @@ def parse_container(tool): return container +# TODO: Function below is currently unused prototypes +def list_instances(creator_id=None): + """Return created instances in current workfile which will be published. + Returns: + (list) of dictionaries matching instances format + """ + + comp = get_current_comp() + tools = comp.GetToolList(False).values() + + instance_signature = { + "id": "pyblish.avalon.instance", + "identifier": creator_id + } + instances = [] + for tool in tools: + + data = tool.GetData('openpype') + if not isinstance(data, dict): + continue + + if data.get("id") != instance_signature["id"]: + continue + + if creator_id and data.get("identifier") != creator_id: + continue + + instances.append(tool) + + return instances + + +# TODO: Function below is currently unused prototypes +def remove_instance(instance): + """Remove instance from current workfile. + + Args: + instance (dict): instance representation from subsetmanager model + """ + # Assume instance is a Fusion tool directly + instance["tool"].Delete() + + class FusionEventThread(QtCore.QThread): """QThread which will periodically ping Fusion app for any events. - The fusion.UIManager must be set up to be notified of events before they'll be reported by this thread, for example: fusion.UIManager.AddNotify("Comp_Save", None) diff --git a/openpype/hosts/fusion/api/workio.py b/openpype/hosts/fusion/api/workio.py deleted file mode 100644 index 939b2ff4be..0000000000 --- a/openpype/hosts/fusion/api/workio.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Host API required Work Files tool""" -import sys -import os - -from .lib import get_current_comp - - -def file_extensions(): - return [".comp"] - - -def has_unsaved_changes(): - comp = get_current_comp() - return comp.GetAttrs()["COMPB_Modified"] - - -def save_file(filepath): - comp = get_current_comp() - comp.Save(filepath) - - -def open_file(filepath): - # Hack to get fusion, see - # openpype.hosts.fusion.api.pipeline.get_current_comp() - fusion = getattr(sys.modules["__main__"], "fusion", None) - - return fusion.LoadComp(filepath) - - -def current_file(): - comp = get_current_comp() - current_filepath = comp.GetAttrs()["COMPS_FileName"] - if not current_filepath: - return None - - return current_filepath - - -def work_root(session): - work_dir = session["AVALON_WORKDIR"] - scene_dir = session.get("AVALON_SCENEDIR") - if scene_dir: - return os.path.join(work_dir, scene_dir) - else: - return work_dir diff --git a/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py b/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py index 2918c552c8..685e58d58f 100644 --- a/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py +++ b/openpype/hosts/fusion/deploy/MenuScripts/openpype_menu.py @@ -13,11 +13,11 @@ def main(env): # However the contents of that folder can conflict with Qt library dlls # so we make sure to move out of it to avoid DLL Load Failed errors. os.chdir("..") - from openpype.hosts.fusion import api + from openpype.hosts.fusion.api import FusionHost from openpype.hosts.fusion.api import menu # activate resolve from pype - install_host(api) + install_host(FusionHost()) log = Logger.get_logger(__name__) log.info(f"Registered host: {registered_host()}") diff --git a/openpype/hosts/fusion/deploy/fusion_shared.prefs b/openpype/hosts/fusion/deploy/fusion_shared.prefs index 998c6a6d66..b379ea7c66 100644 --- a/openpype/hosts/fusion/deploy/fusion_shared.prefs +++ b/openpype/hosts/fusion/deploy/fusion_shared.prefs @@ -1,19 +1,19 @@ { Locked = true, Global = { - Paths = { - Map = { - ["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy", - ["Reactor:"] = "$(REACTOR)", - - ["Config:"] = "UserPaths:Config;OpenPype:Config", - ["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts", - ["UserPaths:"] = "UserData:;AllData:;Fusion:;Reactor:Deploy" - }, - }, - Script = { - PythonVersion = 3, - Python3Forced = true - }, + Paths = { + Map = { + ["OpenPype:"] = "$(OPENPYPE_FUSION)/deploy", + ["Config:"] = "UserPaths:Config;OpenPype:Config", + ["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts;OpenPype:Scripts", }, -} \ No newline at end of file + }, + Script = { + PythonVersion = 3, + Python3Forced = true + }, + UserInterface = { + Language = "en_US" + }, + }, +} diff --git a/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py b/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py deleted file mode 100644 index d1ae5f64fd..0000000000 --- a/openpype/hosts/fusion/hooks/pre_fusion_ocio_hook.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import platform - -from openpype.lib import PreLaunchHook, ApplicationLaunchFailed - - -class FusionPreLaunchOCIO(PreLaunchHook): - """Set OCIO environment variable for Fusion""" - app_groups = ["fusion"] - - def execute(self): - """Hook entry method.""" - - # get image io - project_settings = self.data["project_settings"] - - # make sure anatomy settings are having flame key - imageio_fusion = project_settings["fusion"]["imageio"] - - ocio = imageio_fusion.get("ocio") - enabled = ocio.get("enabled", False) - if not enabled: - return - - platform_key = platform.system().lower() - ocio_path = ocio["configFilePath"][platform_key] - if not ocio_path: - raise ApplicationLaunchFailed( - "Fusion OCIO is enabled in project settings but no OCIO config" - f"path is set for your current platform: {platform_key}" - ) - - self.log.info(f"Setting OCIO config path: {ocio_path}") - self.launch_context.env["OCIO"] = os.pathsep.join(ocio_path) diff --git a/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py b/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py new file mode 100644 index 0000000000..fd726ccda1 --- /dev/null +++ b/openpype/hosts/fusion/hooks/pre_fusion_profile_hook.py @@ -0,0 +1,161 @@ +import os +import shutil +import platform +from pathlib import Path +from openpype.lib import PreLaunchHook, ApplicationLaunchFailed +from openpype.hosts.fusion import ( + FUSION_HOST_DIR, + FUSION_VERSIONS_DICT, + get_fusion_version, +) + + +class FusionCopyPrefsPrelaunch(PreLaunchHook): + """ + Prepares local Fusion profile directory, copies existing Fusion profile. + This also sets FUSION MasterPrefs variable, which is used + to apply Master.prefs file to override some Fusion profile settings to: + - enable the OpenPype menu + - force Python 3 over Python 2 + - force English interface + Master.prefs is defined in openpype/hosts/fusion/deploy/fusion_shared.prefs + """ + + app_groups = ["fusion"] + order = 2 + + def get_fusion_profile_name(self, profile_version) -> str: + # Returns 'Default', unless FUSION16_PROFILE is set + return os.getenv(f"FUSION{profile_version}_PROFILE", "Default") + + def get_fusion_profile_dir(self, profile_version) -> Path: + # Get FUSION_PROFILE_DIR variable + fusion_profile = self.get_fusion_profile_name(profile_version) + fusion_var_prefs_dir = os.getenv( + f"FUSION{profile_version}_PROFILE_DIR" + ) + + # Check if FUSION_PROFILE_DIR exists + if fusion_var_prefs_dir and Path(fusion_var_prefs_dir).is_dir(): + fu_prefs_dir = Path(fusion_var_prefs_dir, fusion_profile) + self.log.info(f"{fusion_var_prefs_dir} is set to {fu_prefs_dir}") + return fu_prefs_dir + + def get_profile_source(self, profile_version) -> Path: + """Get Fusion preferences profile location. + See Per-User_Preferences_and_Paths on VFXpedia for reference. + """ + fusion_profile = self.get_fusion_profile_name(profile_version) + profile_source = self.get_fusion_profile_dir(profile_version) + if profile_source: + return profile_source + # otherwise get default location of the profile folder + fu_prefs_dir = f"Blackmagic Design/Fusion/Profiles/{fusion_profile}" + if platform.system() == "Windows": + profile_source = Path(os.getenv("AppData"), fu_prefs_dir) + elif platform.system() == "Darwin": + profile_source = Path( + "~/Library/Application Support/", fu_prefs_dir + ).expanduser() + elif platform.system() == "Linux": + profile_source = Path("~/.fusion", fu_prefs_dir).expanduser() + self.log.info( + f"Locating source Fusion prefs directory: {profile_source}" + ) + return profile_source + + def get_copy_fusion_prefs_settings(self): + # Get copy preferences options from the global application settings + + copy_fusion_settings = self.data["project_settings"]["fusion"].get( + "copy_fusion_settings", {} + ) + if not copy_fusion_settings: + self.log.error("Copy prefs settings not found") + copy_status = copy_fusion_settings.get("copy_status", False) + force_sync = copy_fusion_settings.get("force_sync", False) + copy_path = copy_fusion_settings.get("copy_path") or None + if copy_path: + copy_path = Path(copy_path).expanduser() + return copy_status, copy_path, force_sync + + def copy_fusion_profile( + self, copy_from: Path, copy_to: Path, force_sync: bool + ) -> None: + """On the first Fusion launch copy the contents of Fusion profile + directory to the working predefined location. If the Openpype profile + folder exists, skip copying, unless re-sync is checked. + If the prefs were not copied on the first launch, + clean Fusion profile will be created in fu_profile_dir. + """ + if copy_to.exists() and not force_sync: + self.log.info( + "Destination Fusion preferences folder already exists: " + f"{copy_to} " + ) + return + self.log.info("Starting copying Fusion preferences") + self.log.debug(f"force_sync option is set to {force_sync}") + try: + copy_to.mkdir(exist_ok=True, parents=True) + except PermissionError: + self.log.warning(f"Creating the folder not permitted at {copy_to}") + return + if not copy_from.exists(): + self.log.warning(f"Fusion preferences not found in {copy_from}") + return + for file in copy_from.iterdir(): + if file.suffix in ( + ".prefs", + ".def", + ".blocklist", + ".fu", + ".toolbars", + ): + # convert Path to str to be compatible with Python 3.6+ + shutil.copy(str(file), str(copy_to)) + self.log.info( + f"Successfully copied preferences: {copy_from} to {copy_to}" + ) + + def execute(self): + ( + copy_status, + fu_profile_dir, + force_sync, + ) = self.get_copy_fusion_prefs_settings() + + # Get launched application context and return correct app version + app_name = self.launch_context.env.get("AVALON_APP_NAME") + app_version = get_fusion_version(app_name) + if app_version is None: + version_names = ", ".join(str(x) for x in FUSION_VERSIONS_DICT) + raise ApplicationLaunchFailed( + "Unable to detect valid Fusion version number from app " + f"name: {app_name}.\nMake sure to include at least a digit " + "to indicate the Fusion version like '18'.\n" + f"Detectable Fusion versions are: {version_names}" + ) + + _, profile_version = FUSION_VERSIONS_DICT[app_version] + fu_profile = self.get_fusion_profile_name(profile_version) + + # do a copy of Fusion profile if copy_status toggle is enabled + if copy_status and fu_profile_dir is not None: + profile_source = self.get_profile_source(profile_version) + dest_folder = Path(fu_profile_dir, fu_profile) + self.copy_fusion_profile(profile_source, dest_folder, force_sync) + + # Add temporary profile directory variables to customize Fusion + # to define where it can read custom scripts and tools from + fu_profile_dir_variable = f"FUSION{profile_version}_PROFILE_DIR" + self.log.info(f"Setting {fu_profile_dir_variable}: {fu_profile_dir}") + self.launch_context.env[fu_profile_dir_variable] = str(fu_profile_dir) + + # Add custom Fusion Master Prefs and the temporary + # profile directory variables to customize Fusion + # to define where it can read custom scripts and tools from + master_prefs_variable = f"FUSION{profile_version}_MasterPrefs" + master_prefs = Path(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs") + self.log.info(f"Setting {master_prefs_variable}: {master_prefs}") + self.launch_context.env[master_prefs_variable] = str(master_prefs) diff --git a/openpype/hosts/fusion/hooks/pre_fusion_setup.py b/openpype/hosts/fusion/hooks/pre_fusion_setup.py index d043d54322..f27cd1674b 100644 --- a/openpype/hosts/fusion/hooks/pre_fusion_setup.py +++ b/openpype/hosts/fusion/hooks/pre_fusion_setup.py @@ -1,32 +1,43 @@ import os from openpype.lib import PreLaunchHook, ApplicationLaunchFailed -from openpype.hosts.fusion import FUSION_HOST_DIR +from openpype.hosts.fusion import ( + FUSION_HOST_DIR, + FUSION_VERSIONS_DICT, + get_fusion_version, +) class FusionPrelaunch(PreLaunchHook): - """Prepares OpenPype Fusion environment - - Requires FUSION_PYTHON3_HOME to be defined in the environment for Fusion - to point at a valid Python 3 build for Fusion. That is Python 3.3-3.10 - for Fusion 18 and Fusion 3.6 for Fusion 16 and 17. - - This also sets FUSION16_MasterPrefs to apply the fusion master prefs - as set in openpype/hosts/fusion/deploy/fusion_shared.prefs to enable - the OpenPype menu and force Python 3 over Python 2. - """ + Prepares OpenPype Fusion environment. + Requires correct Python home variable to be defined in the environment + settings for Fusion to point at a valid Python 3 build for Fusion. + Python3 versions that are supported by Fusion: + Fusion 9, 16, 17 : Python 3.6 + Fusion 18 : Python 3.6 - 3.10 + """ + app_groups = ["fusion"] + order = 1 def execute(self): # making sure python 3 is installed at provided path # Py 3.3-3.10 for Fusion 18+ or Py 3.6 for Fu 16-17 - py3_var = "FUSION_PYTHON3_HOME" + app_data = self.launch_context.env.get("AVALON_APP_NAME") + app_version = get_fusion_version(app_data) + if not app_version: + raise ApplicationLaunchFailed( + "Fusion version information not found in System settings.\n" + "The key field in the 'applications/fusion/variants' should " + "consist a number, corresponding to major Fusion version." + ) + py3_var, _ = FUSION_VERSIONS_DICT[app_version] fusion_python3_home = self.launch_context.env.get(py3_var, "") - self.log.info(f"Looking for Python 3 in: {fusion_python3_home}") for path in fusion_python3_home.split(os.pathsep): - # Allow defining multiple paths to allow "fallback" to other - # path. But make to set only a single path as final variable. + # Allow defining multiple paths, separated by os.pathsep, + # to allow "fallback" to other path. + # But make to set only a single path as final variable. py3_dir = os.path.normpath(path) if os.path.isdir(py3_dir): break @@ -36,26 +47,17 @@ class FusionPrelaunch(PreLaunchHook): "Make sure the environment in fusion settings has " "'FUSION_PYTHON3_HOME' set correctly and make sure " "Python 3 is installed in the given path." - f"\n\nPYTHON36: {fusion_python3_home}" + f"\n\nPYTHON PATH: {fusion_python3_home}" ) self.log.info(f"Setting {py3_var}: '{py3_dir}'...") self.launch_context.env[py3_var] = py3_dir # Fusion 18+ requires FUSION_PYTHON3_HOME to also be on PATH - self.launch_context.env["PATH"] += ";" + py3_dir + if app_version >= 18: + self.launch_context.env["PATH"] += os.pathsep + py3_dir - # Fusion 16 and 17 use FUSION16_PYTHON36_HOME instead of - # FUSION_PYTHON3_HOME and will only work with a Python 3.6 version - # TODO: Detect Fusion version to only set for specific Fusion build - self.launch_context.env["FUSION16_PYTHON36_HOME"] = py3_dir + self.launch_context.env[py3_var] = py3_dir - # Add our Fusion Master Prefs which is the only way to customize - # Fusion to define where it can read custom scripts and tools from self.log.info(f"Setting OPENPYPE_FUSION: {FUSION_HOST_DIR}") self.launch_context.env["OPENPYPE_FUSION"] = FUSION_HOST_DIR - - pref_var = "FUSION16_MasterPrefs" # used by Fusion 16, 17 and 18 - prefs = os.path.join(FUSION_HOST_DIR, "deploy", "fusion_shared.prefs") - self.log.info(f"Setting {pref_var}: {prefs}") - self.launch_context.env[pref_var] = prefs diff --git a/openpype/hosts/fusion/plugins/create/create_exr_saver.py b/openpype/hosts/fusion/plugins/create/create_exr_saver.py deleted file mode 100644 index 6d93fe710a..0000000000 --- a/openpype/hosts/fusion/plugins/create/create_exr_saver.py +++ /dev/null @@ -1,49 +0,0 @@ -import os - -from openpype.pipeline import ( - LegacyCreator, - legacy_io -) -from openpype.hosts.fusion.api import ( - get_current_comp, - comp_lock_and_undo_chunk -) - - -class CreateOpenEXRSaver(LegacyCreator): - - name = "openexrDefault" - label = "Create OpenEXR Saver" - hosts = ["fusion"] - family = "render" - defaults = ["Main"] - - def process(self): - - file_format = "OpenEXRFormat" - - comp = get_current_comp() - - workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"]) - - filename = "{}..exr".format(self.name) - filepath = os.path.join(workdir, "render", filename) - - with comp_lock_and_undo_chunk(comp): - args = (-32768, -32768) # Magical position numbers - saver = comp.AddTool("Saver", *args) - saver.SetAttrs({"TOOLS_Name": self.name}) - - # Setting input attributes is different from basic attributes - # Not confused with "MainInputAttributes" which - saver["Clip"] = filepath - saver["OutputFormat"] = file_format - - # Check file format settings are available - if saver[file_format] is None: - raise RuntimeError("File format is not set to {}, " - "this is a bug".format(file_format)) - - # Set file format attributes - saver[file_format]["Depth"] = 1 # int8 | int16 | float32 | other - saver[file_format]["SaveAlpha"] = 0 diff --git a/openpype/hosts/fusion/plugins/create/create_saver.py b/openpype/hosts/fusion/plugins/create/create_saver.py new file mode 100644 index 0000000000..04898d0a45 --- /dev/null +++ b/openpype/hosts/fusion/plugins/create/create_saver.py @@ -0,0 +1,273 @@ +from copy import deepcopy +import os + +from openpype.hosts.fusion.api import ( + get_current_comp, + comp_lock_and_undo_chunk, +) + +from openpype.lib import ( + BoolDef, + EnumDef, +) +from openpype.pipeline import ( + legacy_io, + Creator as NewCreator, + CreatedInstance, + Anatomy +) + + +class CreateSaver(NewCreator): + identifier = "io.openpype.creators.fusion.saver" + label = "Render (saver)" + name = "render" + family = "render" + default_variants = ["Main", "Mask"] + description = "Fusion Saver to generate image sequence" + icon = "fa5.eye" + + instance_attributes = [ + "reviewable" + ] + default_variants = [ + "Main", + "Mask" + ] + + # TODO: This should be renamed together with Nuke so it is aligned + temp_rendering_path_template = ( + "{workdir}/renders/fusion/{subset}/{subset}.{frame}.{ext}") + + def create(self, subset_name, instance_data, pre_create_data): + self.pass_pre_attributes_to_instance( + instance_data, + pre_create_data + ) + + instance_data.update({ + "id": "pyblish.avalon.instance", + "subset": subset_name + }) + + # TODO: Add pre_create attributes to choose file format? + file_format = "OpenEXRFormat" + + comp = get_current_comp() + with comp_lock_and_undo_chunk(comp): + args = (-32768, -32768) # Magical position numbers + saver = comp.AddTool("Saver", *args) + + self._update_tool_with_data(saver, data=instance_data) + + saver["OutputFormat"] = file_format + + # Check file format settings are available + if saver[file_format] is None: + raise RuntimeError( + f"File format is not set to {file_format}, this is a bug" + ) + + # Set file format attributes + saver[file_format]["Depth"] = 0 # Auto | float16 | float32 + # TODO Is this needed? + saver[file_format]["SaveAlpha"] = 1 + + self._imprint(saver, instance_data) + + # Register the CreatedInstance + instance = CreatedInstance( + family=self.family, + subset_name=subset_name, + data=instance_data, + creator=self, + ) + + # Insert the transient data + instance.transient_data["tool"] = saver + + self._add_instance_to_context(instance) + + return instance + + def collect_instances(self): + comp = get_current_comp() + tools = comp.GetToolList(False, "Saver").values() + for tool in tools: + data = self.get_managed_tool_data(tool) + if not data: + continue + + # Add instance + created_instance = CreatedInstance.from_existing(data, self) + + # Collect transient data + created_instance.transient_data["tool"] = tool + + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + new_data = created_inst.data_to_store() + tool = created_inst.transient_data["tool"] + self._update_tool_with_data(tool, new_data) + self._imprint(tool, new_data) + + def remove_instances(self, instances): + for instance in instances: + # Remove the tool from the scene + + tool = instance.transient_data["tool"] + if tool: + tool.Delete() + + # Remove the collected CreatedInstance to remove from UI directly + self._remove_instance_from_context(instance) + + def _imprint(self, tool, data): + # Save all data in a "openpype.{key}" = value data + + active = data.pop("active", None) + if active is not None: + # Use active value to set the passthrough state + tool.SetAttrs({"TOOLB_PassThrough": not active}) + + for key, value in data.items(): + tool.SetData(f"openpype.{key}", value) + + def _update_tool_with_data(self, tool, data): + """Update tool node name and output path based on subset data""" + if "subset" not in data: + return + + original_subset = tool.GetData("openpype.subset") + subset = data["subset"] + if original_subset != subset: + self._configure_saver_tool(data, tool, subset) + + def _configure_saver_tool(self, data, tool, subset): + formatting_data = deepcopy(data) + + # get frame padding from anatomy templates + anatomy = Anatomy() + frame_padding = int( + anatomy.templates["render"].get("frame_padding", 4) + ) + + # Subset change detected + workdir = os.path.normpath(legacy_io.Session["AVALON_WORKDIR"]) + formatting_data.update({ + "workdir": workdir, + "frame": "0" * frame_padding, + "ext": "exr" + }) + + # build file path to render + filepath = self.temp_rendering_path_template.format( + **formatting_data) + + tool["Clip"] = os.path.normpath(filepath) + + # Rename tool + if tool.Name != subset: + print(f"Renaming {tool.Name} -> {subset}") + tool.SetAttrs({"TOOLS_Name": subset}) + + def get_managed_tool_data(self, tool): + """Return data of the tool if it matches creator identifier""" + data = tool.GetData("openpype") + if not isinstance(data, dict): + return + + required = { + "id": "pyblish.avalon.instance", + "creator_identifier": self.identifier, + } + for key, value in required.items(): + if key not in data or data[key] != value: + return + + # Get active state from the actual tool state + attrs = tool.GetAttrs() + passthrough = attrs["TOOLB_PassThrough"] + data["active"] = not passthrough + + return data + + def get_pre_create_attr_defs(self): + """Settings for create page""" + attr_defs = [ + self._get_render_target_enum(), + self._get_reviewable_bool(), + self._get_frame_range_enum() + ] + return attr_defs + + def get_instance_attr_defs(self): + """Settings for publish page""" + return self.get_pre_create_attr_defs() + + def pass_pre_attributes_to_instance( + self, + instance_data, + pre_create_data + ): + creator_attrs = instance_data["creator_attributes"] = {} + for pass_key in pre_create_data.keys(): + creator_attrs[pass_key] = pre_create_data[pass_key] + + # These functions below should be moved to another file + # so it can be used by other plugins. plugin.py ? + def _get_render_target_enum(self): + rendering_targets = { + "local": "Local machine rendering", + "frames": "Use existing frames", + } + if "farm_rendering" in self.instance_attributes: + rendering_targets["farm"] = "Farm rendering" + + return EnumDef( + "render_target", items=rendering_targets, label="Render target" + ) + + def _get_frame_range_enum(self): + frame_range_options = { + "asset_db": "Current asset context", + "render_range": "From render in/out", + "comp_range": "From composition timeline" + } + + return EnumDef( + "frame_range_source", + items=frame_range_options, + label="Frame range source" + ) + + def _get_reviewable_bool(self): + return BoolDef( + "review", + default=("reviewable" in self.instance_attributes), + label="Review", + ) + + def apply_settings( + self, + project_settings, + system_settings + ): + """Method called on initialization of plugin to apply settings.""" + + # plugin settings + plugin_settings = ( + project_settings["fusion"]["create"][self.__class__.__name__] + ) + + # individual attributes + self.instance_attributes = plugin_settings.get( + "instance_attributes") or self.instance_attributes + self.default_variants = plugin_settings.get( + "default_variants") or self.default_variants + self.temp_rendering_path_template = ( + plugin_settings.get("temp_rendering_path_template") + or self.temp_rendering_path_template + ) diff --git a/openpype/hosts/fusion/plugins/create/create_workfile.py b/openpype/hosts/fusion/plugins/create/create_workfile.py new file mode 100644 index 0000000000..40721ea88a --- /dev/null +++ b/openpype/hosts/fusion/plugins/create/create_workfile.py @@ -0,0 +1,105 @@ +from openpype.hosts.fusion.api import ( + get_current_comp +) +from openpype.client import get_asset_by_name +from openpype.pipeline import ( + AutoCreator, + CreatedInstance, + legacy_io, +) + + +class FusionWorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + label = "Workfile" + icon = "fa5.file" + + default_variant = "Main" + + create_allow_context_change = False + + data_key = "openpype_workfile" + + def collect_instances(self): + + comp = get_current_comp() + data = comp.GetData(self.data_key) + if not data: + return + + instance = CreatedInstance( + family=self.family, + subset_name=data["subset"], + data=data, + creator=self + ) + instance.transient_data["comp"] = comp + + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + comp = created_inst.transient_data["comp"] + if not hasattr(comp, "SetData"): + # Comp is not alive anymore, likely closed by the user + self.log.error("Workfile comp not found for existing instance." + " Comp might have been closed in the meantime.") + continue + + # Imprint data into the comp + data = created_inst.data_to_store() + comp.SetData(self.data_key, data) + + def create(self, options=None): + + comp = get_current_comp() + if not comp: + self.log.error("Unable to find current comp") + return + + existing_instance = None + for instance in self.create_context.instances: + if instance.family == self.family: + existing_instance = instance + break + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": self.default_variant + } + data.update(self.get_dynamic_data( + self.default_variant, task_name, asset_doc, + project_name, host_name, None + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + new_instance.transient_data["comp"] = comp + self._add_instance_to_context(new_instance) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/fusion/plugins/load/actions.py b/openpype/hosts/fusion/plugins/load/actions.py index 819c9272fd..f83ab433ee 100644 --- a/openpype/hosts/fusion/plugins/load/actions.py +++ b/openpype/hosts/fusion/plugins/load/actions.py @@ -15,6 +15,7 @@ class FusionSetFrameRangeLoader(load.LoaderPlugin): "pointcache", "render"] representations = ["*"] + extensions = {"*"} label = "Set frame range" order = 11 @@ -71,8 +72,7 @@ class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin): return # Include handles - handles = version_data.get("handles", 0) - start -= handles - end += handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) lib.update_frame_range(start, end) diff --git a/openpype/hosts/fusion/plugins/load/load_alembic.py b/openpype/hosts/fusion/plugins/load/load_alembic.py index f8b8c2cb0a..11bf59af12 100644 --- a/openpype/hosts/fusion/plugins/load/load_alembic.py +++ b/openpype/hosts/fusion/plugins/load/load_alembic.py @@ -13,7 +13,8 @@ class FusionLoadAlembicMesh(load.LoaderPlugin): """Load Alembic mesh into Fusion""" families = ["pointcache", "model"] - representations = ["abc"] + representations = ["*"] + extensions = {"abc"} label = "Load alembic mesh" order = -10 diff --git a/openpype/hosts/fusion/plugins/load/load_fbx.py b/openpype/hosts/fusion/plugins/load/load_fbx.py index 70fe82ffef..c73ad78394 100644 --- a/openpype/hosts/fusion/plugins/load/load_fbx.py +++ b/openpype/hosts/fusion/plugins/load/load_fbx.py @@ -1,4 +1,3 @@ - from openpype.pipeline import ( load, get_representation_path, @@ -6,7 +5,7 @@ from openpype.pipeline import ( from openpype.hosts.fusion.api import ( imprint_container, get_current_comp, - comp_lock_and_undo_chunk + comp_lock_and_undo_chunk, ) @@ -14,7 +13,22 @@ class FusionLoadFBXMesh(load.LoaderPlugin): """Load FBX mesh into Fusion""" families = ["*"] - representations = ["fbx"] + representations = ["*"] + extensions = { + "3ds", + "amc", + "aoa", + "asf", + "bvh", + "c3d", + "dae", + "dxf", + "fbx", + "htr", + "mcd", + "obj", + "trc", + } label = "Load FBX mesh" order = -10 @@ -26,23 +40,24 @@ class FusionLoadFBXMesh(load.LoaderPlugin): def load(self, context, name, namespace, data): # Fallback to asset name when namespace is None if namespace is None: - namespace = context['asset']['name'] + namespace = context["asset"]["name"] # Create the Loader with the filename path set comp = get_current_comp() with comp_lock_and_undo_chunk(comp, "Create tool"): - path = self.fname args = (-32768, -32768) tool = comp.AddTool(self.tool_type, *args) tool["ImportFile"] = path - imprint_container(tool, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__) + imprint_container( + tool, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + ) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/fusion/plugins/load/load_sequence.py b/openpype/hosts/fusion/plugins/load/load_sequence.py index 6f44c61d1b..552e282587 100644 --- a/openpype/hosts/fusion/plugins/load/load_sequence.py +++ b/openpype/hosts/fusion/plugins/load/load_sequence.py @@ -1,17 +1,16 @@ -import os import contextlib -from openpype.client import get_version_by_id -from openpype.pipeline import ( - load, - legacy_io, - get_representation_path, +import openpype.pipeline.load as load +from openpype.pipeline.load import ( + get_representation_context, + get_representation_path_from_context, ) from openpype.hosts.fusion.api import ( imprint_container, get_current_comp, - comp_lock_and_undo_chunk + comp_lock_and_undo_chunk, ) +from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS comp = get_current_comp() @@ -55,20 +54,23 @@ def preserve_trim(loader, log=None): try: yield finally: - length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1 if trim_from_start > length: trim_from_start = length if log: - log.warning("Reducing trim in to %d " - "(because of less frames)" % trim_from_start) + log.warning( + "Reducing trim in to %d " + "(because of less frames)" % trim_from_start + ) remainder = length - trim_from_start if trim_from_end > remainder: trim_from_end = remainder if log: - log.warning("Reducing trim in to %d " - "(because of less frames)" % trim_from_end) + log.warning( + "Reducing trim in to %d " + "(because of less frames)" % trim_from_end + ) loader["ClipTimeStart"][time] = trim_from_start loader["ClipTimeEnd"][time] = length - trim_from_end @@ -107,11 +109,15 @@ def loader_shift(loader, frame, relative=True): # Shifting global in will try to automatically compensate for the change # in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those # input values to "just shift" the clip - with preserve_inputs(loader, inputs=["ClipTimeStart", - "ClipTimeEnd", - "HoldFirstFrame", - "HoldLastFrame"]): - + with preserve_inputs( + loader, + inputs=[ + "ClipTimeStart", + "ClipTimeEnd", + "HoldFirstFrame", + "HoldLastFrame", + ], + ): # GlobalIn cannot be set past GlobalOut or vice versa # so we must apply them in the order of the shift. if shift > 0: @@ -127,8 +133,18 @@ def loader_shift(loader, frame, relative=True): class FusionLoadSequence(load.LoaderPlugin): """Load image sequence into Fusion""" - families = ["imagesequence", "review", "render", "plate"] + families = [ + "imagesequence", + "review", + "render", + "plate", + "image", + "onilne", + ] representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) label = "Load sequence" order = -10 @@ -138,15 +154,14 @@ class FusionLoadSequence(load.LoaderPlugin): def load(self, context, name, namespace, data): # Fallback to asset name when namespace is None if namespace is None: - namespace = context['asset']['name'] + namespace = context["asset"]["name"] # Use the first file for now - path = self._get_first_image(os.path.dirname(self.fname)) + path = get_representation_path_from_context(context) # Create the Loader with the filename path set comp = get_current_comp() with comp_lock_and_undo_chunk(comp, "Create Loader"): - args = (-32768, -32768) tool = comp.AddTool("Loader", *args) tool["Clip"] = path @@ -155,11 +170,13 @@ class FusionLoadSequence(load.LoaderPlugin): start = self._get_start(context["version"], tool) loader_shift(tool, start, relative=False) - imprint_container(tool, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__) + imprint_container( + tool, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + ) def switch(self, container, representation): self.update(container, representation) @@ -210,33 +227,35 @@ class FusionLoadSequence(load.LoaderPlugin): assert tool.ID == "Loader", "Must be Loader" comp = tool.Comp() - root = os.path.dirname(get_representation_path(representation)) - path = self._get_first_image(root) + context = get_representation_context(representation) + path = get_representation_path_from_context(context) # Get start frame from version data - project_name = legacy_io.active_project() - version = get_version_by_id(project_name, representation["parent"]) - start = self._get_start(version, tool) + start = self._get_start(context["version"], tool) with comp_lock_and_undo_chunk(comp, "Update Loader"): - # Update the loader's path whilst preserving some values with preserve_trim(tool, log=self.log): - with preserve_inputs(tool, - inputs=("HoldFirstFrame", - "HoldLastFrame", - "Reverse", - "Depth", - "KeyCode", - "TimeCodeOffset")): + with preserve_inputs( + tool, + inputs=( + "HoldFirstFrame", + "HoldLastFrame", + "Reverse", + "Depth", + "KeyCode", + "TimeCodeOffset", + ), + ): tool["Clip"] = path # Set the global in to the start frame of the sequence global_in_changed = loader_shift(tool, start, relative=False) if global_in_changed: # Log this change to the user - self.log.debug("Changed '%s' global in: %d" % (tool.Name, - start)) + self.log.debug( + "Changed '%s' global in: %d" % (tool.Name, start) + ) # Update the imprinted representation tool.SetData("avalon.representation", str(representation["_id"])) @@ -249,11 +268,6 @@ class FusionLoadSequence(load.LoaderPlugin): with comp_lock_and_undo_chunk(comp, "Remove Loader"): tool.Delete() - def _get_first_image(self, root): - """Get first file in representation root""" - files = sorted(os.listdir(root)) - return os.path.join(root, files[0]) - def _get_start(self, version_doc, tool): """Return real start frame of published files (incl. handles)""" data = version_doc["data"] @@ -266,9 +280,11 @@ class FusionLoadSequence(load.LoaderPlugin): # Get frame start without handles start = data.get("frameStart") if start is None: - self.log.warning("Missing start frame for version " - "assuming starts at frame 0 for: " - "{}".format(tool.Name)) + self.log.warning( + "Missing start frame for version " + "assuming starts at frame 0 for: " + "{}".format(tool.Name) + ) return 0 # Use `handleStart` if the data is available diff --git a/openpype/hosts/fusion/plugins/load/load_workfile.py b/openpype/hosts/fusion/plugins/load/load_workfile.py new file mode 100644 index 0000000000..b49d104a15 --- /dev/null +++ b/openpype/hosts/fusion/plugins/load/load_workfile.py @@ -0,0 +1,32 @@ +"""Import workfiles into your current comp. +As all imported nodes are free floating and will probably be changed there +is no update or reload function added for this plugin +""" + +from openpype.pipeline import load + +from openpype.hosts.fusion.api import ( + get_current_comp, + get_bmd_library, +) + + +class FusionLoadWorkfile(load.LoaderPlugin): + """Load the content of a workfile into Fusion""" + + families = ["workfile"] + representations = ["*"] + extensions = {"comp"} + + label = "Load Workfile" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, data): + # Get needed elements + bmd = get_bmd_library() + comp = get_current_comp() + + # Paste the content of the file into the current comp + comp.Paste(bmd.readfile(self.fname)) diff --git a/openpype/hosts/fusion/plugins/publish/collect_comp.py b/openpype/hosts/fusion/plugins/publish/collect_comp.py index dfa540fa7f..d1c49790fa 100644 --- a/openpype/hosts/fusion/plugins/publish/collect_comp.py +++ b/openpype/hosts/fusion/plugins/publish/collect_comp.py @@ -1,5 +1,3 @@ -import os - import pyblish.api from openpype.hosts.fusion.api import get_current_comp diff --git a/openpype/hosts/fusion/plugins/publish/collect_comp_frame_range.py b/openpype/hosts/fusion/plugins/publish/collect_comp_frame_range.py new file mode 100644 index 0000000000..24a9a92337 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_comp_frame_range.py @@ -0,0 +1,44 @@ +import pyblish.api + + +def get_comp_render_range(comp): + """Return comp's start-end render range and global start-end range.""" + comp_attrs = comp.GetAttrs() + start = comp_attrs["COMPN_RenderStart"] + end = comp_attrs["COMPN_RenderEnd"] + global_start = comp_attrs["COMPN_GlobalStart"] + global_end = comp_attrs["COMPN_GlobalEnd"] + + # Whenever render ranges are undefined fall back + # to the comp's global start and end + if start == -1000000000: + start = global_start + if end == -1000000000: + end = global_end + + return start, end, global_start, global_end + + +class CollectFusionCompFrameRanges(pyblish.api.ContextPlugin): + """Collect current comp""" + + # We run this after CollectorOrder - 0.1 otherwise it gets + # overridden by global plug-in `CollectContextEntities` + order = pyblish.api.CollectorOrder - 0.05 + label = "Collect Comp Frame Ranges" + hosts = ["fusion"] + + def process(self, context): + """Collect all image sequence tools""" + + comp = context.data["currentComp"] + + # Store comp render ranges + start, end, global_start, global_end = get_comp_render_range(comp) + + context.data.update({ + "renderFrameStart": int(start), + "renderFrameEnd": int(end), + "compFrameStart": int(global_start), + "compFrameEnd": int(global_end) + }) diff --git a/openpype/hosts/fusion/plugins/publish/collect_fusion_version.py b/openpype/hosts/fusion/plugins/publish/collect_fusion_version.py deleted file mode 100644 index 65d8386f33..0000000000 --- a/openpype/hosts/fusion/plugins/publish/collect_fusion_version.py +++ /dev/null @@ -1,22 +0,0 @@ -import pyblish.api - - -class CollectFusionVersion(pyblish.api.ContextPlugin): - """Collect current comp""" - - order = pyblish.api.CollectorOrder - label = "Collect Fusion Version" - hosts = ["fusion"] - - def process(self, context): - """Collect all image sequence tools""" - - comp = context.data.get("currentComp") - if not comp: - raise RuntimeError("No comp previously collected, unable to " - "retrieve Fusion version.") - - version = comp.GetApp().Version - context.data["fusionVersion"] = version - - self.log.info("Fusion version: %s" % version) diff --git a/openpype/hosts/fusion/plugins/publish/collect_inputs.py b/openpype/hosts/fusion/plugins/publish/collect_inputs.py index 8f9857b02f..a6628300db 100644 --- a/openpype/hosts/fusion/plugins/publish/collect_inputs.py +++ b/openpype/hosts/fusion/plugins/publish/collect_inputs.py @@ -1,5 +1,3 @@ -from bson.objectid import ObjectId - import pyblish.api from openpype.pipeline import registered_host @@ -97,10 +95,15 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): label = "Collect Inputs" order = pyblish.api.CollectorOrder + 0.2 hosts = ["fusion"] + families = ["render"] def process(self, instance): # Get all upstream and include itself + if not any(instance[:]): + self.log.debug("No tool found in instance, skipping..") + return + tool = instance[0] nodes = list(iter_upstream(tool)) nodes.append(tool) @@ -108,7 +111,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): # Collect containers for the given set of nodes containers = collect_input_containers(nodes) - inputs = [ObjectId(c["representation"]) for c in containers] + inputs = [c["representation"] for c in containers] instance.data["inputRepresentations"] = inputs - - self.log.info("Collected inputs: %s" % inputs) + self.log.debug("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/fusion/plugins/publish/collect_instances.py b/openpype/hosts/fusion/plugins/publish/collect_instances.py index fe60b83827..6016baa2a9 100644 --- a/openpype/hosts/fusion/plugins/publish/collect_instances.py +++ b/openpype/hosts/fusion/plugins/publish/collect_instances.py @@ -1,27 +1,7 @@ -import os - import pyblish.api -def get_comp_render_range(comp): - """Return comp's start-end render range and global start-end range.""" - comp_attrs = comp.GetAttrs() - start = comp_attrs["COMPN_RenderStart"] - end = comp_attrs["COMPN_RenderEnd"] - global_start = comp_attrs["COMPN_GlobalStart"] - global_end = comp_attrs["COMPN_GlobalEnd"] - - # Whenever render ranges are undefined fall back - # to the comp's global start and end - if start == -1000000000: - start = global_start - if end == -1000000000: - end = global_end - - return start, end, global_start, global_end - - -class CollectInstances(pyblish.api.ContextPlugin): +class CollectInstanceData(pyblish.api.InstancePlugin): """Collect Fusion saver instances This additionally stores the Comp start and end render range in the @@ -30,76 +10,80 @@ class CollectInstances(pyblish.api.ContextPlugin): """ order = pyblish.api.CollectorOrder - label = "Collect Instances" + label = "Collect Instances Data" hosts = ["fusion"] - def process(self, context): + def process(self, instance): """Collect all image sequence tools""" - from openpype.hosts.fusion.api.lib import get_frame_path + context = instance.context - comp = context.data["currentComp"] + # Include creator attributes directly as instance data + creator_attributes = instance.data["creator_attributes"] + instance.data.update(creator_attributes) - # Get all savers in the comp - tools = comp.GetToolList(False).values() - savers = [tool for tool in tools if tool.ID == "Saver"] + frame_range_source = creator_attributes.get("frame_range_source") + instance.data["frame_range_source"] = frame_range_source - start, end, global_start, global_end = get_comp_render_range(comp) - context.data["frameStart"] = int(start) - context.data["frameEnd"] = int(end) - context.data["frameStartHandle"] = int(global_start) - context.data["frameEndHandle"] = int(global_end) + # get asset frame ranges to all instances + # render family instances `asset_db` render target + start = context.data["frameStart"] + end = context.data["frameEnd"] + handle_start = context.data["handleStart"] + handle_end = context.data["handleEnd"] + start_with_handle = start - handle_start + end_with_handle = end + handle_end - for tool in savers: - path = tool["Clip"][comp.TIME_UNDEFINED] + # conditions for render family instances + if frame_range_source == "render_range": + # set comp render frame ranges + start = context.data["renderFrameStart"] + end = context.data["renderFrameEnd"] + handle_start = 0 + handle_end = 0 + start_with_handle = start + end_with_handle = end - tool_attrs = tool.GetAttrs() - active = not tool_attrs["TOOLB_PassThrough"] + if frame_range_source == "comp_range": + comp_start = context.data["compFrameStart"] + comp_end = context.data["compFrameEnd"] + render_start = context.data["renderFrameStart"] + render_end = context.data["renderFrameEnd"] + # set comp frame ranges + start = render_start + end = render_end + handle_start = render_start - comp_start + handle_end = comp_end - render_end + start_with_handle = comp_start + end_with_handle = comp_end - if not path: - self.log.warning("Skipping saver because it " - "has no path set: {}".format(tool.Name)) - continue + # Include start and end render frame in label + subset = instance.data["subset"] + label = ( + "{subset} ({start}-{end}) [{handle_start}-{handle_end}]" + ).format( + subset=subset, + start=int(start), + end=int(end), + handle_start=int(handle_start), + handle_end=int(handle_end) + ) - filename = os.path.basename(path) - head, padding, tail = get_frame_path(filename) - ext = os.path.splitext(path)[1] - assert tail == ext, ("Tail does not match %s" % ext) - subset = head.rstrip("_. ") # subset is head of the filename + instance.data.update({ + "label": label, - # Include start and end render frame in label - label = "{subset} ({start}-{end})".format(subset=subset, - start=int(start), - end=int(end)) + # todo: Allow custom frame range per instance + "frameStart": start, + "frameEnd": end, + "frameStartHandle": start_with_handle, + "frameEndHandle": end_with_handle, + "handleStart": handle_start, + "handleEnd": handle_end, + "fps": context.data["fps"], + }) - instance = context.create_instance(subset) - instance.data.update({ - "asset": os.environ["AVALON_ASSET"], # todo: not a constant - "subset": subset, - "path": path, - "outputDir": os.path.dirname(path), - "ext": ext, # todo: should be redundant - "label": label, - "frameStart": context.data["frameStart"], - "frameEnd": context.data["frameEnd"], - "frameStartHandle": context.data["frameStartHandle"], - "frameEndHandle": context.data["frameStartHandle"], - "fps": context.data["fps"], - "families": ["render", "review"], - "family": "render", - "active": active, - "publish": active # backwards compatibility - }) - - instance.append(tool) - - self.log.info("Found: \"%s\" " % path) - - # Sort/grouped by family (preserving local index) - context[:] = sorted(context, key=self.sort_by_family) - - return context - - def sort_by_family(self, instance): - """Sort by family""" - return instance.data.get("families", instance.data.get("family")) + # Add review family if the instance is marked as 'review' + # This could be done through a 'review' Creator attribute. + if instance.data.get("review", False): + self.log.info("Adding review family..") + instance.data["families"].append("review") diff --git a/openpype/hosts/fusion/plugins/publish/collect_render.py b/openpype/hosts/fusion/plugins/publish/collect_render.py new file mode 100644 index 0000000000..a20a142701 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_render.py @@ -0,0 +1,209 @@ +import os +import attr +import pyblish.api + +from openpype.pipeline import publish +from openpype.pipeline.publish import RenderInstance +from openpype.hosts.fusion.api.lib import get_frame_path + + +@attr.s +class FusionRenderInstance(RenderInstance): + # extend generic, composition name is needed + fps = attr.ib(default=None) + projectEntity = attr.ib(default=None) + stagingDir = attr.ib(default=None) + app_version = attr.ib(default=None) + tool = attr.ib(default=None) + workfileComp = attr.ib(default=None) + publish_attributes = attr.ib(default={}) + frameStartHandle = attr.ib(default=None) + frameEndHandle = attr.ib(default=None) + + +class CollectFusionRender( + publish.AbstractCollectRender, + publish.ColormanagedPyblishPluginMixin +): + + order = pyblish.api.CollectorOrder + 0.09 + label = "Collect Fusion Render" + hosts = ["fusion"] + + def get_instances(self, context): + + comp = context.data.get("currentComp") + comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat") + aspect_x = comp_frame_format_prefs["AspectX"] + aspect_y = comp_frame_format_prefs["AspectY"] + + instances = [] + instances_to_remove = [] + + current_file = context.data["currentFile"] + version = context.data["version"] + + project_entity = context.data["projectEntity"] + + for inst in context: + if not inst.data.get("active", True): + continue + + family = inst.data["family"] + if family != "render": + continue + + task_name = context.data["task"] + tool = inst.data["transientData"]["tool"] + + instance_families = inst.data.get("families", []) + subset_name = inst.data["subset"] + instance = FusionRenderInstance( + family="render", + tool=tool, + workfileComp=comp, + families=instance_families, + version=version, + time="", + source=current_file, + label=inst.data["label"], + subset=subset_name, + asset=inst.data["asset"], + task=task_name, + attachTo=False, + setMembers='', + publish=True, + name=subset_name, + resolutionWidth=comp_frame_format_prefs.get("Width"), + resolutionHeight=comp_frame_format_prefs.get("Height"), + pixelAspect=aspect_x / aspect_y, + tileRendering=False, + tilesX=0, + tilesY=0, + review="review" in instance_families, + frameStart=inst.data["frameStart"], + frameEnd=inst.data["frameEnd"], + handleStart=inst.data["handleStart"], + handleEnd=inst.data["handleEnd"], + frameStartHandle=inst.data["frameStartHandle"], + frameEndHandle=inst.data["frameEndHandle"], + frameStep=1, + fps=comp_frame_format_prefs.get("Rate"), + app_version=comp.GetApp().Version, + publish_attributes=inst.data.get("publish_attributes", {}) + ) + + render_target = inst.data["creator_attributes"]["render_target"] + + # Add render target family + render_target_family = f"render.{render_target}" + if render_target_family not in instance.families: + instance.families.append(render_target_family) + + # Add render target specific data + if render_target in {"local", "frames"}: + instance.projectEntity = project_entity + + if render_target == "farm": + fam = "render.farm" + if fam not in instance.families: + instance.families.append(fam) + instance.toBeRenderedOn = "deadline" + instance.farm = True # to skip integrate + if "review" in instance.families: + # to skip ExtractReview locally + instance.families.remove("review") + + # add new instance to the list and remove the original + # instance since it is not needed anymore + instances.append(instance) + instances_to_remove.append(inst) + + for instance in instances_to_remove: + context.remove(instance) + + return instances + + def post_collecting_action(self): + for instance in self._context: + if "render.frames" in instance.data.get("families", []): + # adding representation data to the instance + self._update_for_frames(instance) + + def get_expected_files(self, render_instance): + """ + Returns list of rendered files that should be created by + Deadline. These are not published directly, they are source + for later 'submit_publish_job'. + + Args: + render_instance (RenderInstance): to pull anatomy and parts used + in url + + Returns: + (list) of absolute urls to rendered file + """ + start = render_instance.frameStart - render_instance.handleStart + end = render_instance.frameEnd + render_instance.handleEnd + + path = ( + render_instance.tool["Clip"] + [render_instance.workfileComp.TIME_UNDEFINED] + ) + output_dir = os.path.dirname(path) + render_instance.outputDir = output_dir + + basename = os.path.basename(path) + + head, padding, ext = get_frame_path(basename) + + expected_files = [] + for frame in range(start, end + 1): + expected_files.append( + os.path.join( + output_dir, + f"{head}{str(frame).zfill(padding)}{ext}" + ) + ) + + return expected_files + + def _update_for_frames(self, instance): + """Updating instance for render.frames family + + Adding representation data to the instance. Also setting + colorspaceData to the representation based on file rules. + """ + + expected_files = instance.data["expectedFiles"] + + start = instance.data["frameStart"] - instance.data["handleStart"] + + path = expected_files[0] + basename = os.path.basename(path) + staging_dir = os.path.dirname(path) + _, padding, ext = get_frame_path(basename) + + repre = { + "name": ext[1:], + "ext": ext[1:], + "frameStart": f"%0{padding}d" % start, + "files": [os.path.basename(f) for f in expected_files], + "stagingDir": staging_dir, + } + + self.set_representation_colorspace( + representation=repre, + context=instance.context, + ) + + # review representation + if instance.data.get("review", False): + repre["tags"] = ["review"] + + # add the repre to the instance + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(repre) + + return instance diff --git a/openpype/hosts/fusion/plugins/publish/collect_render_target.py b/openpype/hosts/fusion/plugins/publish/collect_render_target.py deleted file mode 100644 index 39017f32e0..0000000000 --- a/openpype/hosts/fusion/plugins/publish/collect_render_target.py +++ /dev/null @@ -1,44 +0,0 @@ -import pyblish.api - - -class CollectFusionRenderMode(pyblish.api.InstancePlugin): - """Collect current comp's render Mode - - Options: - local - farm - - Note that this value is set for each comp separately. When you save the - comp this information will be stored in that file. If for some reason the - available tool does not visualize which render mode is set for the - current comp, please run the following line in the console (Py2) - - comp.GetData("openpype.rendermode") - - This will return the name of the current render mode as seen above under - Options. - - """ - - order = pyblish.api.CollectorOrder + 0.4 - label = "Collect Render Mode" - hosts = ["fusion"] - families = ["render"] - - def process(self, instance): - """Collect all image sequence tools""" - options = ["local", "farm"] - - comp = instance.context.data.get("currentComp") - if not comp: - raise RuntimeError("No comp previously collected, unable to " - "retrieve Fusion version.") - - rendermode = comp.GetData("openpype.rendermode") or "local" - assert rendermode in options, "Must be supported render mode" - - self.log.info("Render mode: {0}".format(rendermode)) - - # Append family - family = "render.{0}".format(rendermode) - instance.data["families"].append(family) diff --git a/openpype/hosts/fusion/plugins/publish/collect_workfile.py b/openpype/hosts/fusion/plugins/publish/collect_workfile.py new file mode 100644 index 0000000000..4c288edb3e --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/collect_workfile.py @@ -0,0 +1,26 @@ +import os + +import pyblish.api + + +class CollectFusionWorkfile(pyblish.api.InstancePlugin): + """Collect Fusion workfile representation.""" + + order = pyblish.api.CollectorOrder + 0.1 + label = "Collect Workfile" + hosts = ["fusion"] + families = ["workfile"] + + def process(self, instance): + + current_file = instance.context.data["currentFile"] + + folder, file = os.path.split(current_file) + filename, ext = os.path.splitext(file) + + instance.data['representations'] = [{ + 'name': ext.lstrip("."), + 'ext': ext.lstrip("."), + 'files': file, + "stagingDir": folder, + }] diff --git a/openpype/hosts/fusion/plugins/publish/extract_render_local.py b/openpype/hosts/fusion/plugins/publish/extract_render_local.py new file mode 100644 index 0000000000..25c101cf00 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/extract_render_local.py @@ -0,0 +1,199 @@ +import os +import logging +import contextlib +import collections +import pyblish.api + +from openpype.pipeline import publish +from openpype.hosts.fusion.api import comp_lock_and_undo_chunk +from openpype.hosts.fusion.api.lib import get_frame_path, maintained_comp_range + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def enabled_savers(comp, savers): + """Enable only the `savers` in Comp during the context. + + Any Saver tool in the passed composition that is not in the savers list + will be set to passthrough during the context. + + Args: + comp (object): Fusion composition object. + savers (list): List of Saver tool objects. + + """ + passthrough_key = "TOOLB_PassThrough" + original_states = {} + enabled_save_names = {saver.Name for saver in savers} + try: + all_savers = comp.GetToolList(False, "Saver").values() + for saver in all_savers: + original_state = saver.GetAttrs()[passthrough_key] + original_states[saver] = original_state + + # The passthrough state we want to set (passthrough != enabled) + state = saver.Name not in enabled_save_names + if state != original_state: + saver.SetAttrs({passthrough_key: state}) + yield + finally: + for saver, original_state in original_states.items(): + saver.SetAttrs({"TOOLB_PassThrough": original_state}) + + +class FusionRenderLocal( + pyblish.api.InstancePlugin, + publish.ColormanagedPyblishPluginMixin +): + """Render the current Fusion composition locally.""" + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Render Local" + hosts = ["fusion"] + families = ["render.local"] + + is_rendered_key = "_fusionrenderlocal_has_rendered" + + def process(self, instance): + + # Start render + result = self.render(instance) + if result is False: + raise RuntimeError(f"Comp render failed for {instance}") + + self._add_representation(instance) + + # Log render status + self.log.info( + "Rendered '{nm}' for asset '{ast}' under the task '{tsk}'".format( + nm=instance.data["name"], + ast=instance.data["asset"], + tsk=instance.data["task"], + ) + ) + + def render(self, instance): + """Render instance. + + We try to render the minimal amount of times by combining the instances + that have a matching frame range in one Fusion render. Then for the + batch of instances we store whether the render succeeded or failed. + + """ + + if self.is_rendered_key in instance.data: + # This instance was already processed in batch with another + # instance, so we just return the render result directly + self.log.debug(f"Instance {instance} was already rendered") + return instance.data[self.is_rendered_key] + + instances_by_frame_range = self.get_render_instances_by_frame_range( + instance.context + ) + + # Render matching batch of instances that share the same frame range + frame_range = self.get_instance_render_frame_range(instance) + render_instances = instances_by_frame_range[frame_range] + + # We initialize render state false to indicate it wasn't successful + # yet to keep track of whether Fusion succeeded. This is for cases + # where an error below this might cause the comp render result not + # to be stored for the instances of this batch + for render_instance in render_instances: + render_instance.data[self.is_rendered_key] = False + + savers_to_render = [inst.data["tool"] for inst in render_instances] + current_comp = instance.context.data["currentComp"] + frame_start, frame_end = frame_range + + self.log.info( + f"Starting Fusion render frame range {frame_start}-{frame_end}" + ) + saver_names = ", ".join(saver.Name for saver in savers_to_render) + self.log.info(f"Rendering tools: {saver_names}") + + with comp_lock_and_undo_chunk(current_comp): + with maintained_comp_range(current_comp): + with enabled_savers(current_comp, savers_to_render): + result = current_comp.Render( + { + "Start": frame_start, + "End": frame_end, + "Wait": True, + } + ) + + # Store the render state for all the rendered instances + for render_instance in render_instances: + render_instance.data[self.is_rendered_key] = bool(result) + + return result + + def _add_representation(self, instance): + """Add representation to instance""" + + expected_files = instance.data["expectedFiles"] + + start = instance.data["frameStart"] - instance.data["handleStart"] + + path = expected_files[0] + _, padding, ext = get_frame_path(path) + + staging_dir = os.path.dirname(path) + + repre = { + "name": ext[1:], + "ext": ext[1:], + "frameStart": f"%0{padding}d" % start, + "files": [os.path.basename(f) for f in expected_files], + "stagingDir": staging_dir, + } + + self.set_representation_colorspace( + representation=repre, + context=instance.context, + ) + + # review representation + if instance.data.get("review", False): + repre["tags"] = ["review"] + + # add the repre to the instance + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(repre) + + return instance + + def get_render_instances_by_frame_range(self, context): + """Return enabled render.local instances grouped by their frame range. + + Arguments: + context (pyblish.Context): The pyblish context + + Returns: + dict: (start, end): instances mapping + + """ + + instances_to_render = [ + instance for instance in context if + # Only active instances + instance.data.get("publish", True) and + # Only render.local instances + "render.local" in instance.data.get("families", []) + ] + + # Instances by frame ranges + instances_by_frame_range = collections.defaultdict(list) + for instance in instances_to_render: + start, end = self.get_instance_render_frame_range(instance) + instances_by_frame_range[(start, end)].append(instance) + + return dict(instances_by_frame_range) + + def get_instance_render_frame_range(self, instance): + start = instance.data["frameStartHandle"] + end = instance.data["frameEndHandle"] + return start, end diff --git a/openpype/hosts/fusion/plugins/publish/increment_current_file_deadline.py b/openpype/hosts/fusion/plugins/publish/increment_current_file.py similarity index 53% rename from openpype/hosts/fusion/plugins/publish/increment_current_file_deadline.py rename to openpype/hosts/fusion/plugins/publish/increment_current_file.py index 5c595638e9..08a65bf52d 100644 --- a/openpype/hosts/fusion/plugins/publish/increment_current_file_deadline.py +++ b/openpype/hosts/fusion/plugins/publish/increment_current_file.py @@ -1,29 +1,39 @@ import pyblish.api +from openpype.pipeline import OptionalPyblishPluginMixin +from openpype.pipeline import KnownPublishError -class FusionIncrementCurrentFile(pyblish.api.ContextPlugin): + +class FusionIncrementCurrentFile( + pyblish.api.ContextPlugin, OptionalPyblishPluginMixin +): """Increment the current file. Saves the current file with an increased version number. """ - label = "Increment current file" + label = "Increment workfile version" order = pyblish.api.IntegratorOrder + 9.0 hosts = ["fusion"] - families = ["render.farm"] optional = True def process(self, context): + if not self.is_active(context.data): + return from openpype.lib import version_up from openpype.pipeline.publish import get_errored_plugins_from_context errored_plugins = get_errored_plugins_from_context(context) - if any(plugin.__name__ == "FusionSubmitDeadline" - for plugin in errored_plugins): - raise RuntimeError("Skipping incrementing current file because " - "submission to render farm failed.") + if any( + plugin.__name__ == "FusionSubmitDeadline" + for plugin in errored_plugins + ): + raise KnownPublishError( + "Skipping incrementing current file because " + "submission to render farm failed." + ) comp = context.data.get("currentComp") assert comp, "Must have comp" diff --git a/openpype/hosts/fusion/plugins/publish/render_local.py b/openpype/hosts/fusion/plugins/publish/render_local.py deleted file mode 100644 index 79e458b40a..0000000000 --- a/openpype/hosts/fusion/plugins/publish/render_local.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -from pprint import pformat - -import pyblish.api -from openpype.hosts.fusion.api import comp_lock_and_undo_chunk - - -class Fusionlocal(pyblish.api.InstancePlugin): - """Render the current Fusion composition locally. - - Extract the result of savers by starting a comp render - This will run the local render of Fusion. - - """ - - order = pyblish.api.ExtractorOrder - 0.1 - label = "Render Local" - hosts = ["fusion"] - families = ["render.local"] - - def process(self, instance): - - # This plug-in runs only once and thus assumes all instances - # currently will render the same frame range - context = instance.context - key = "__hasRun{}".format(self.__class__.__name__) - if context.data.get(key, False): - return - else: - context.data[key] = True - - current_comp = context.data["currentComp"] - frame_start = context.data["frameStartHandle"] - frame_end = context.data["frameEndHandle"] - path = instance.data["path"] - output_dir = instance.data["outputDir"] - - ext = os.path.splitext(os.path.basename(path))[-1] - - self.log.info("Starting render") - self.log.info("Start frame: {}".format(frame_start)) - self.log.info("End frame: {}".format(frame_end)) - - with comp_lock_and_undo_chunk(current_comp): - result = current_comp.Render({ - "Start": frame_start, - "End": frame_end, - "Wait": True - }) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - collected_frames = os.listdir(output_dir) - repre = { - 'name': ext[1:], - 'ext': ext[1:], - 'frameStart': "%0{}d".format(len(str(frame_end))) % frame_start, - 'files': collected_frames, - "stagingDir": output_dir, - } - instance.data["representations"].append(repre) - - # review representation - repre_preview = repre.copy() - repre_preview["name"] = repre_preview["ext"] = "mp4" - repre_preview["tags"] = ["review", "preview", "ftrackreview", "delete"] - instance.data["representations"].append(repre_preview) - - self.log.debug(f"_ instance.data: {pformat(instance.data)}") - - if not result: - raise RuntimeError("Comp render failed") diff --git a/openpype/hosts/fusion/plugins/publish/save_scene.py b/openpype/hosts/fusion/plugins/publish/save_scene.py index 0cdfafa095..0798e7c8b7 100644 --- a/openpype/hosts/fusion/plugins/publish/save_scene.py +++ b/openpype/hosts/fusion/plugins/publish/save_scene.py @@ -7,7 +7,7 @@ class FusionSaveComp(pyblish.api.ContextPlugin): label = "Save current file" order = pyblish.api.ExtractorOrder - 0.49 hosts = ["fusion"] - families = ["render"] + families = ["render", "workfile"] def process(self, context): @@ -17,5 +17,5 @@ class FusionSaveComp(pyblish.api.ContextPlugin): current = comp.GetAttrs().get("COMPS_FileName", "") assert context.data['currentFile'] == current - self.log.info("Saving current file..") + self.log.info("Saving current file: {}".format(current)) comp.Save() diff --git a/openpype/hosts/fusion/plugins/publish/validate_background_depth.py b/openpype/hosts/fusion/plugins/publish/validate_background_depth.py index 4268fab528..6908889eb4 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_background_depth.py +++ b/openpype/hosts/fusion/plugins/publish/validate_background_depth.py @@ -1,21 +1,29 @@ import pyblish.api -from openpype.pipeline.publish import RepairAction +from openpype.pipeline import ( + publish, + OptionalPyblishPluginMixin, + PublishValidationError, +) + +from openpype.hosts.fusion.api.action import SelectInvalidAction -class ValidateBackgroundDepth(pyblish.api.InstancePlugin): +class ValidateBackgroundDepth( + pyblish.api.InstancePlugin, OptionalPyblishPluginMixin +): """Validate if all Background tool are set to float32 bit""" order = pyblish.api.ValidatorOrder label = "Validate Background Depth 32 bit" - actions = [RepairAction] hosts = ["fusion"] families = ["render"] optional = True + actions = [SelectInvalidAction, publish.RepairAction] + @classmethod def get_invalid(cls, instance): - context = instance.context comp = context.data.get("currentComp") assert comp, "Must have Comp object" @@ -27,10 +35,16 @@ class ValidateBackgroundDepth(pyblish.api.InstancePlugin): return [i for i in backgrounds if i.GetInput("Depth") != 4.0] def process(self, instance): + if not self.is_active(instance.data): + return + invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Found %i nodes which are not set to float32" - % len(invalid)) + raise PublishValidationError( + "Found {} Backgrounds tools which" + " are not set to float32".format(len(invalid)), + title=self.label, + ) @classmethod def repair(cls, instance): diff --git a/openpype/hosts/fusion/plugins/publish/validate_comp_saved.py b/openpype/hosts/fusion/plugins/publish/validate_comp_saved.py index cabe65af6e..748047e8cf 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_comp_saved.py +++ b/openpype/hosts/fusion/plugins/publish/validate_comp_saved.py @@ -1,6 +1,7 @@ import os import pyblish.api +from openpype.pipeline import PublishValidationError class ValidateFusionCompSaved(pyblish.api.ContextPlugin): @@ -19,10 +20,12 @@ class ValidateFusionCompSaved(pyblish.api.ContextPlugin): filename = attrs["COMPS_FileName"] if not filename: - raise RuntimeError("Comp is not saved.") + raise PublishValidationError("Comp is not saved.", + title=self.label) if not os.path.exists(filename): - raise RuntimeError("Comp file does not exist: %s" % filename) + raise PublishValidationError( + "Comp file does not exist: %s" % filename, title=self.label) if attrs["COMPB_Modified"]: self.log.warning("Comp is modified. Save your comp to ensure your " diff --git a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py index f6beefefc1..35c92163eb 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py +++ b/openpype/hosts/fusion/plugins/publish/validate_create_folder_checked.py @@ -1,6 +1,9 @@ import pyblish.api from openpype.pipeline.publish import RepairAction +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): @@ -11,28 +14,28 @@ class ValidateCreateFolderChecked(pyblish.api.InstancePlugin): """ order = pyblish.api.ValidatorOrder - actions = [RepairAction] label = "Validate Create Folder Checked" families = ["render"] hosts = ["fusion"] + actions = [RepairAction, SelectInvalidAction] @classmethod def get_invalid(cls, instance): - active = instance.data.get("active", instance.data.get("publish")) - if not active: - return [] - - tool = instance[0] + tool = instance.data["tool"] create_dir = tool.GetInput("CreateDir") if create_dir == 0.0: - cls.log.error("%s has Create Folder turned off" % instance[0].Name) + cls.log.error( + "%s has Create Folder turned off" % instance[0].Name + ) return [tool] def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Found Saver with Create Folder During " - "Render checked off") + raise PublishValidationError( + "Found Saver with Create Folder During Render checked off", + title=self.label, + ) @classmethod def repair(cls, instance): diff --git a/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py b/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py new file mode 100644 index 0000000000..3f84f59678 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/validate_expected_frames_existence.py @@ -0,0 +1,66 @@ +import os +import pyblish.api + +from openpype.pipeline.publish import RepairAction +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction + + +class ValidateLocalFramesExistence(pyblish.api.InstancePlugin): + """Checks if files for savers that's set + to publish expected frames exists + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Expected Frames Exists" + families = ["render.frames"] + hosts = ["fusion"] + actions = [RepairAction, SelectInvalidAction] + + @classmethod + def get_invalid(cls, instance, non_existing_frames=None): + if non_existing_frames is None: + non_existing_frames = [] + + tool = instance.data["tool"] + + expected_files = instance.data["expectedFiles"] + + for file in expected_files: + if not os.path.exists(file): + cls.log.error( + f"Missing file: {file}" + ) + non_existing_frames.append(file) + + if len(non_existing_frames) > 0: + cls.log.error(f"Some of {tool.Name}'s files does not exist") + return [tool] + + def process(self, instance): + non_existing_frames = [] + invalid = self.get_invalid(instance, non_existing_frames) + if invalid: + raise PublishValidationError( + "{} is set to publish existing frames but " + "some frames are missing. " + "The missing file(s) are:\n\n{}".format( + invalid[0].Name, + "\n\n".join(non_existing_frames), + ), + title=self.label, + ) + + @classmethod + def repair(cls, instance): + invalid = cls.get_invalid(instance) + if invalid: + tool = instance.data["tool"] + # Change render target to local to render locally + tool.SetData("openpype.creator_attributes.render_target", "local") + + cls.log.info( + f"Reload the publisher and {tool.Name} " + "will be set to render locally" + ) diff --git a/openpype/hosts/fusion/plugins/publish/validate_filename_has_extension.py b/openpype/hosts/fusion/plugins/publish/validate_filename_has_extension.py index 4795a2aa05..537e43c875 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_filename_has_extension.py +++ b/openpype/hosts/fusion/plugins/publish/validate_filename_has_extension.py @@ -1,6 +1,9 @@ import os import pyblish.api +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction class ValidateFilenameHasExtension(pyblish.api.InstancePlugin): @@ -16,20 +19,22 @@ class ValidateFilenameHasExtension(pyblish.api.InstancePlugin): label = "Validate Filename Has Extension" families = ["render"] hosts = ["fusion"] + actions = [SelectInvalidAction] def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Found Saver without an extension") + raise PublishValidationError("Found Saver without an extension", + title=self.label) @classmethod def get_invalid(cls, instance): - path = instance.data["path"] + path = instance.data["expectedFiles"][0] fname, ext = os.path.splitext(path) if not ext: - tool = instance[0] + tool = instance.data["tool"] cls.log.error("%s has no extension specified" % tool.Name) return [tool] diff --git a/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py b/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py new file mode 100644 index 0000000000..06cd0ca186 --- /dev/null +++ b/openpype/hosts/fusion/plugins/publish/validate_instance_frame_range.py @@ -0,0 +1,41 @@ +import pyblish.api + +from openpype.pipeline import PublishValidationError + + +class ValidateInstanceFrameRange(pyblish.api.InstancePlugin): + """Validate instance frame range is within comp's global render range.""" + + order = pyblish.api.ValidatorOrder + label = "Validate Filename Has Extension" + families = ["render"] + hosts = ["fusion"] + + def process(self, instance): + + context = instance.context + global_start = context.data["compFrameStart"] + global_end = context.data["compFrameEnd"] + + render_start = instance.data["frameStartHandle"] + render_end = instance.data["frameEndHandle"] + + if render_start < global_start or render_end > global_end: + + message = ( + f"Instance {instance} render frame range " + f"({render_start}-{render_end}) is outside of the comp's " + f"global render range ({global_start}-{global_end}) and thus " + f"can't be rendered. " + ) + description = ( + f"{message}\n\n" + f"Either update the comp's global range or the instance's " + f"frame range to ensure the comp's frame range includes the " + f"to render frame range for the instance." + ) + raise PublishValidationError( + title="Frame range outside of comp range", + message=message, + description=description + ) diff --git a/openpype/hosts/fusion/plugins/publish/validate_saver_has_input.py b/openpype/hosts/fusion/plugins/publish/validate_saver_has_input.py index 7243b44a3e..faf2102a8b 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_saver_has_input.py +++ b/openpype/hosts/fusion/plugins/publish/validate_saver_has_input.py @@ -1,4 +1,7 @@ import pyblish.api +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction class ValidateSaverHasInput(pyblish.api.InstancePlugin): @@ -12,11 +15,12 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin): label = "Validate Saver Has Input" families = ["render"] hosts = ["fusion"] + actions = [SelectInvalidAction] @classmethod def get_invalid(cls, instance): - saver = instance[0] + saver = instance.data["tool"] if not saver.Input.GetConnectedOutput(): return [saver] @@ -25,5 +29,8 @@ class ValidateSaverHasInput(pyblish.api.InstancePlugin): def process(self, instance): invalid = self.get_invalid(instance) if invalid: - raise RuntimeError("Saver has no incoming connection: " - "{} ({})".format(instance, invalid[0].Name)) + saver_name = invalid[0].Name + raise PublishValidationError( + "Saver has no incoming connection: {} ({})".format(instance, + saver_name), + title=self.label) diff --git a/openpype/hosts/fusion/plugins/publish/validate_saver_passthrough.py b/openpype/hosts/fusion/plugins/publish/validate_saver_passthrough.py index aed3835de3..9004976dc5 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_saver_passthrough.py +++ b/openpype/hosts/fusion/plugins/publish/validate_saver_passthrough.py @@ -1,4 +1,7 @@ import pyblish.api +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction class ValidateSaverPassthrough(pyblish.api.ContextPlugin): @@ -8,6 +11,7 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin): label = "Validate Saver Passthrough" families = ["render"] hosts = ["fusion"] + actions = [SelectInvalidAction] def process(self, context): @@ -27,16 +31,17 @@ class ValidateSaverPassthrough(pyblish.api.ContextPlugin): if invalid_instances: self.log.info("Reset pyblish to collect your current scene state, " "that should fix error.") - raise RuntimeError("Invalid instances: " - "{0}".format(invalid_instances)) + raise PublishValidationError( + "Invalid instances: {0}".format(invalid_instances), + title=self.label) def is_invalid(self, instance): - saver = instance[0] + saver = instance.data["tool"] attr = saver.GetAttrs() active = not attr["TOOLB_PassThrough"] - if active != instance.data["publish"]: + if active != instance.data.get("publish", True): self.log.info("Saver has different passthrough state than " "Pyblish: {} ({})".format(instance, saver.Name)) return [saver] diff --git a/openpype/hosts/fusion/plugins/publish/validate_unique_subsets.py b/openpype/hosts/fusion/plugins/publish/validate_unique_subsets.py index b218a311ba..5b6ceb2fdb 100644 --- a/openpype/hosts/fusion/plugins/publish/validate_unique_subsets.py +++ b/openpype/hosts/fusion/plugins/publish/validate_unique_subsets.py @@ -1,29 +1,55 @@ +from collections import defaultdict + import pyblish.api +from openpype.pipeline import PublishValidationError + +from openpype.hosts.fusion.api.action import SelectInvalidAction -class ValidateUniqueSubsets(pyblish.api.InstancePlugin): +class ValidateUniqueSubsets(pyblish.api.ContextPlugin): """Ensure all instances have a unique subset name""" order = pyblish.api.ValidatorOrder label = "Validate Unique Subsets" families = ["render"] hosts = ["fusion"] + actions = [SelectInvalidAction] @classmethod - def get_invalid(cls, instance): + def get_invalid(cls, context): - context = instance.context - subset = instance.data["subset"] - for other_instance in context: - if other_instance == instance: - continue + # Collect instances per subset per asset + instances_per_subset_asset = defaultdict(lambda: defaultdict(list)) + for instance in context: + asset = instance.data.get("asset", context.data.get("asset")) + subset = instance.data.get("subset", context.data.get("subset")) + instances_per_subset_asset[asset][subset].append(instance) - if other_instance.data["subset"] == subset: - return [instance] # current instance is invalid + # Find which asset + subset combination has more than one instance + # Those are considered invalid because they'd integrate to the same + # destination. + invalid = [] + for asset, instances_per_subset in instances_per_subset_asset.items(): + for subset, instances in instances_per_subset.items(): + if len(instances) > 1: + cls.log.warning( + "{asset} > {subset} used by more than " + "one instance: {instances}".format( + asset=asset, + subset=subset, + instances=instances + ) + ) + invalid.extend(instances) - return [] + # Return tools for the invalid instances so they can be selected + invalid = [instance.data["tool"] for instance in invalid] - def process(self, instance): - invalid = self.get_invalid(instance) + return invalid + + def process(self, context): + invalid = self.get_invalid(context) if invalid: - raise RuntimeError("Animation content is invalid. See log.") + raise PublishValidationError("Multiple instances are set to " + "the same asset > subset.", + title=self.label) diff --git a/openpype/hosts/fusion/scripts/set_rendermode.py b/openpype/hosts/fusion/scripts/set_rendermode.py deleted file mode 100644 index 9d2bfef310..0000000000 --- a/openpype/hosts/fusion/scripts/set_rendermode.py +++ /dev/null @@ -1,112 +0,0 @@ -from qtpy import QtWidgets -import qtawesome -from openpype.hosts.fusion.api import get_current_comp - - -_help = {"local": "Render the comp on your own machine and publish " - "it from that the destination folder", - "farm": "Submit a Fusion render job to a Render farm to use all other" - " computers and add a publish job"} - - -class SetRenderMode(QtWidgets.QWidget): - - def __init__(self, parent=None): - QtWidgets.QWidget.__init__(self, parent) - - self._comp = get_current_comp() - self._comp_name = self._get_comp_name() - - self.setWindowTitle("Set Render Mode") - self.setFixedSize(300, 175) - - layout = QtWidgets.QVBoxLayout() - - # region comp info - comp_info_layout = QtWidgets.QHBoxLayout() - - update_btn = QtWidgets.QPushButton(qtawesome.icon("fa.refresh", - color="white"), "") - update_btn.setFixedWidth(25) - update_btn.setFixedHeight(25) - - comp_information = QtWidgets.QLineEdit() - comp_information.setEnabled(False) - - comp_info_layout.addWidget(comp_information) - comp_info_layout.addWidget(update_btn) - # endregion comp info - - # region modes - mode_options = QtWidgets.QComboBox() - mode_options.addItems(_help.keys()) - - mode_information = QtWidgets.QTextEdit() - mode_information.setReadOnly(True) - # endregion modes - - accept_btn = QtWidgets.QPushButton("Accept") - - layout.addLayout(comp_info_layout) - layout.addWidget(mode_options) - layout.addWidget(mode_information) - layout.addWidget(accept_btn) - - self.setLayout(layout) - - self.comp_information = comp_information - self.update_btn = update_btn - - self.mode_options = mode_options - self.mode_information = mode_information - - self.accept_btn = accept_btn - - self.connections() - self.update() - - # Force updated render mode help text - self._update_rendermode_info() - - def connections(self): - """Build connections between code and buttons""" - - self.update_btn.clicked.connect(self.update) - self.accept_btn.clicked.connect(self._set_comp_rendermode) - self.mode_options.currentIndexChanged.connect( - self._update_rendermode_info) - - def update(self): - """Update all information in the UI""" - - self._comp = get_current_comp() - self._comp_name = self._get_comp_name() - self.comp_information.setText(self._comp_name) - - # Update current comp settings - mode = self._get_comp_rendermode() - index = self.mode_options.findText(mode) - self.mode_options.setCurrentIndex(index) - - def _update_rendermode_info(self): - rendermode = self.mode_options.currentText() - self.mode_information.setText(_help[rendermode]) - - def _get_comp_name(self): - return self._comp.GetAttrs("COMPS_Name") - - def _get_comp_rendermode(self): - return self._comp.GetData("openpype.rendermode") or "local" - - def _set_comp_rendermode(self): - rendermode = self.mode_options.currentText() - self._comp.SetData("openpype.rendermode", rendermode) - - self._comp.Print("Updated render mode to '%s'\n" % rendermode) - self.hide() - - def _validation(self): - ui_mode = self.mode_options.currentText() - comp_mode = self._get_comp_rendermode() - - return comp_mode == ui_mode diff --git a/openpype/hosts/harmony/api/README.md b/openpype/hosts/harmony/api/README.md index b39f900886..12f21f551a 100644 --- a/openpype/hosts/harmony/api/README.md +++ b/openpype/hosts/harmony/api/README.md @@ -432,11 +432,11 @@ copy_files = """function copyFile(srcFilename, dstFilename) import_files = """function %s_import_files() { - var PNGTransparencyMode = 0; // Premultiplied wih Black - var TGATransparencyMode = 0; // Premultiplied wih Black - var SGITransparencyMode = 0; // Premultiplied wih Black + var PNGTransparencyMode = 0; // Premultiplied with Black + var TGATransparencyMode = 0; // Premultiplied with Black + var SGITransparencyMode = 0; // Premultiplied with Black var LayeredPSDTransparencyMode = 1; // Straight - var FlatPSDTransparencyMode = 2; // Premultiplied wih White + var FlatPSDTransparencyMode = 2; // Premultiplied with White function getUniqueColumnName( column_prefix ) { diff --git a/openpype/hosts/harmony/api/TB_sceneOpened.js b/openpype/hosts/harmony/api/TB_sceneOpened.js index e7cd555332..a284a6ec5c 100644 --- a/openpype/hosts/harmony/api/TB_sceneOpened.js +++ b/openpype/hosts/harmony/api/TB_sceneOpened.js @@ -142,10 +142,10 @@ function Client() { }; /** - * Process recieved request. This will eval recieved function and produce + * Process received request. This will eval received function and produce * results. * @function - * @param {object} request - recieved request JSON + * @param {object} request - received request JSON * @return {object} result of evaled function. */ self.processRequest = function(request) { @@ -245,7 +245,7 @@ function Client() { var request = JSON.parse(to_parse); var mid = request.message_id; // self.logDebug('[' + mid + '] - Request: ' + '\n' + JSON.stringify(request)); - self.logDebug('[' + mid + '] Recieved.'); + self.logDebug('[' + mid + '] Received.'); request.result = self.processRequest(request); self.logDebug('[' + mid + '] Processing done.'); @@ -286,8 +286,8 @@ function Client() { /** Harmony 21.1 doesn't have QDataStream anymore. This means we aren't able to write bytes into QByteArray so we had - modify how content lenght is sent do the server. - Content lenght is sent as string of 8 char convertible into integer + modify how content length is sent do the server. + Content length is sent as string of 8 char convertible into integer (instead of 0x00000001[4 bytes] > "000000001"[8 bytes]) */ var codec_name = new QByteArray().append("UTF-8"); @@ -476,6 +476,25 @@ function start() { action.triggered.connect(self.onSubsetManage); } + /** + * Set scene settings from DB to the scene + */ + self.onSetSceneSettings = function() { + app.avalonClient.send( + { + "module": "openpype.hosts.harmony.api", + "method": "ensure_scene_settings", + "args": [] + }, + false + ); + }; + // add Set Scene Settings + if (app.avalonMenu == null) { + action = menu.addAction('Set Scene Settings...'); + action.triggered.connect(self.onSetSceneSettings); + } + /** * Show Experimental dialog */ diff --git a/openpype/hosts/harmony/api/lib.py b/openpype/hosts/harmony/api/lib.py index e1e77bfbee..b009dabb44 100644 --- a/openpype/hosts/harmony/api/lib.py +++ b/openpype/hosts/harmony/api/lib.py @@ -242,9 +242,15 @@ def launch_zip_file(filepath): print(f"Localizing {filepath}") temp_path = get_local_harmony_path(filepath) + scene_name = os.path.basename(temp_path) + if os.path.exists(os.path.join(temp_path, scene_name)): + # unzipped with duplicated scene_name + temp_path = os.path.join(temp_path, scene_name) + scene_path = os.path.join( - temp_path, os.path.basename(temp_path) + ".xstage" + temp_path, scene_name + ".xstage" ) + unzip = False if os.path.exists(scene_path): # Check remote scene is newer than local. @@ -262,6 +268,10 @@ def launch_zip_file(filepath): with _ZipFile(filepath, "r") as zip_ref: zip_ref.extractall(temp_path) + if os.path.exists(os.path.join(temp_path, scene_name)): + # unzipped with duplicated scene_name + temp_path = os.path.join(temp_path, scene_name) + # Close existing scene. if ProcessContext.pid: os.kill(ProcessContext.pid, signal.SIGTERM) @@ -309,7 +319,7 @@ def launch_zip_file(filepath): ) if not os.path.exists(scene_path): - print("error: cannot determine scene file") + print("error: cannot determine scene file {}".format(scene_path)) ProcessContext.server.stop() return @@ -394,7 +404,7 @@ def get_scene_data(): "function": "AvalonHarmony.getSceneData" })["result"] except json.decoder.JSONDecodeError: - # Means no sceen metadata has been made before. + # Means no scene metadata has been made before. return {} except KeyError: # Means no existing scene metadata has been made. @@ -465,7 +475,7 @@ def imprint(node_id, data, remove=False): Example: >>> from openpype.hosts.harmony.api import lib >>> node = "Top/Display" - >>> data = {"str": "someting", "int": 1, "float": 0.32, "bool": True} + >>> data = {"str": "something", "int": 1, "float": 0.32, "bool": True} >>> lib.imprint(layer, data) """ scene_data = get_scene_data() @@ -550,7 +560,7 @@ def save_scene(): method prevents this double request and safely saves the scene. """ - # Need to turn off the backgound watcher else the communication with + # Need to turn off the background watcher else the communication with # the server gets spammed with two requests at the same time. scene_path = send( {"function": "AvalonHarmony.saveScene"})["result"] diff --git a/openpype/hosts/harmony/api/pipeline.py b/openpype/hosts/harmony/api/pipeline.py index 4b9849c190..285ee806a1 100644 --- a/openpype/hosts/harmony/api/pipeline.py +++ b/openpype/hosts/harmony/api/pipeline.py @@ -126,10 +126,6 @@ def check_inventory(): def application_launch(event): """Event that is executed after Harmony is launched.""" - # FIXME: This is breaking server <-> client communication. - # It is now moved so it it manually called. - # ensure_scene_settings() - # check_inventory() # fills OPENPYPE_HARMONY_JS pype_harmony_path = Path(__file__).parent.parent / "js" / "PypeHarmony.js" pype_harmony_js = pype_harmony_path.read_text() @@ -146,6 +142,9 @@ def application_launch(event): harmony.send({"script": script}) inject_avalon_js() + # ensure_scene_settings() + check_inventory() + def export_template(backdrops, nodes, filepath): """Export Template to file. diff --git a/openpype/hosts/harmony/api/server.py b/openpype/hosts/harmony/api/server.py index 0de359ec61..04048e5c84 100644 --- a/openpype/hosts/harmony/api/server.py +++ b/openpype/hosts/harmony/api/server.py @@ -40,6 +40,7 @@ class Server(threading.Thread): # Create a TCP/IP socket self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Bind the socket to the port server_address = ("127.0.0.1", port) @@ -60,7 +61,7 @@ class Server(threading.Thread): "module": (str), # Module of method. "method" (str), # Name of method in module. "args" (list), # Arguments to pass to method. - "kwargs" (dict), # Keywork arguments to pass to method. + "kwargs" (dict), # Keyword arguments to pass to method. "reply" (bool), # Optional wait for method completion. } """ @@ -91,7 +92,13 @@ class Server(threading.Thread): self.log.info("wait ttt") # Receive the data in small chunks and retransmit it request = None - header = self.connection.recv(10) + try: + header = self.connection.recv(10) + except OSError: + # could happen on MacOS + self.log.info("") + break + if len(header) == 0: # null data received, socket is closing. self.log.info(f"[{self.timestamp()}] Connection closing.") diff --git a/openpype/hosts/harmony/plugins/load/load_imagesequence.py b/openpype/hosts/harmony/plugins/load/load_imagesequence.py index 1b64aff595..b95d25f507 100644 --- a/openpype/hosts/harmony/plugins/load/load_imagesequence.py +++ b/openpype/hosts/harmony/plugins/load/load_imagesequence.py @@ -20,8 +20,9 @@ class ImageSequenceLoader(load.LoaderPlugin): Stores the imported asset in a container named after the asset. """ - families = ["shot", "render", "image", "plate", "reference"] - representations = ["jpeg", "png", "jpg"] + families = ["shot", "render", "image", "plate", "reference", "review"] + representations = ["*"] + extensions = {"jpeg", "png", "jpg"} def load(self, context, name=None, namespace=None, data=None): """Plugin entry point. diff --git a/openpype/hosts/harmony/plugins/publish/extract_render.py b/openpype/hosts/harmony/plugins/publish/extract_render.py index 2f8169248e..38b09902c1 100644 --- a/openpype/hosts/harmony/plugins/publish/extract_render.py +++ b/openpype/hosts/harmony/plugins/publish/extract_render.py @@ -25,8 +25,9 @@ class ExtractRender(pyblish.api.InstancePlugin): application_path = instance.context.data.get("applicationPath") scene_path = instance.context.data.get("scenePath") frame_rate = instance.context.data.get("frameRate") - frame_start = instance.context.data.get("frameStart") - frame_end = instance.context.data.get("frameEnd") + # real value from timeline + frame_start = instance.context.data.get("frameStartHandle") + frame_end = instance.context.data.get("frameEndHandle") audio_path = instance.context.data.get("audioPath") if audio_path and os.path.exists(audio_path): @@ -55,9 +56,13 @@ class ExtractRender(pyblish.api.InstancePlugin): # Execute rendering. Ignoring error cause Harmony returns error code # always. - self.log.info(f"running [ {application_path} -batch {scene_path}") + + args = [application_path, "-batch", + "-frames", str(frame_start), str(frame_end), + "-scene", scene_path] + self.log.info(f"running [ {application_path} {' '.join(args)}") proc = subprocess.Popen( - [application_path, "-batch", scene_path], + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE @@ -108,9 +113,9 @@ class ExtractRender(pyblish.api.InstancePlugin): output = process.communicate()[0] if process.returncode != 0: - raise ValueError(output.decode("utf-8")) + raise ValueError(output.decode("utf-8", errors="backslashreplace")) - self.log.debug(output.decode("utf-8")) + self.log.debug(output.decode("utf-8", errors="backslashreplace")) # Generate representations. extension = collection.tail[1:] diff --git a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py index 936533abd6..6e4c6955e4 100644 --- a/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/harmony/plugins/publish/validate_scene_settings.py @@ -60,7 +60,8 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): # which is available on 'context.data["assetEntity"]' # - the same approach can be used in 'ValidateSceneSettingsRepair' expected_settings = harmony.get_asset_settings() - self.log.info("scene settings from DB:".format(expected_settings)) + self.log.info("scene settings from DB:{}".format(expected_settings)) + expected_settings.pop("entityType") # not useful for the validation expected_settings = _update_frames(dict.copy(expected_settings)) expected_settings["frameEndHandle"] = expected_settings["frameEnd"] +\ @@ -68,21 +69,32 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): if (any(re.search(pattern, os.getenv('AVALON_TASK')) for pattern in self.skip_resolution_check)): + self.log.info("Skipping resolution check because of " + "task name and pattern {}".format( + self.skip_resolution_check)) expected_settings.pop("resolutionWidth") expected_settings.pop("resolutionHeight") - entity_type = expected_settings.get("entityType") - if (any(re.search(pattern, entity_type) + if (any(re.search(pattern, os.getenv('AVALON_TASK')) for pattern in self.skip_timelines_check)): + self.log.info("Skipping frames check because of " + "task name and pattern {}".format( + self.skip_timelines_check)) expected_settings.pop('frameStart', None) expected_settings.pop('frameEnd', None) - - expected_settings.pop("entityType") # not useful after the check + expected_settings.pop('frameStartHandle', None) + expected_settings.pop('frameEndHandle', None) asset_name = instance.context.data['anatomyData']['asset'] if any(re.search(pattern, asset_name) for pattern in self.frame_check_filter): - expected_settings.pop("frameEnd") + self.log.info("Skipping frames check because of " + "task name and pattern {}".format( + self.frame_check_filter)) + expected_settings.pop('frameStart', None) + expected_settings.pop('frameEnd', None) + expected_settings.pop('frameStartHandle', None) + expected_settings.pop('frameEndHandle', None) # handle case where ftrack uses only two decimal places # 23.976023976023978 vs. 23.98 @@ -99,6 +111,7 @@ class ValidateSceneSettings(pyblish.api.InstancePlugin): "frameEnd": instance.context.data["frameEnd"], "handleStart": instance.context.data.get("handleStart"), "handleEnd": instance.context.data.get("handleEnd"), + "frameStartHandle": instance.context.data.get("frameStartHandle"), "frameEndHandle": instance.context.data.get("frameEndHandle"), "resolutionWidth": instance.context.data.get("resolutionWidth"), "resolutionHeight": instance.context.data.get("resolutionHeight"), diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/README.md b/openpype/hosts/harmony/vendor/OpenHarmony/README.md index 7c77fbfcfa..064afca86c 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/README.md +++ b/openpype/hosts/harmony/vendor/OpenHarmony/README.md @@ -6,7 +6,7 @@ Ever tried to make a simple script for toonboom Harmony, then got stumped by the Toonboom Harmony is a very powerful software, with hundreds of functions and tools, and it unlocks a great amount of possibilities for animation studios around the globe. And... being the produce of the hard work of a small team forced to prioritise, it can also be a bit rustic at times! -We are users at heart, animators and riggers, who just want to interact with the software as simply as possible. Simplicity is at the heart of the design of openHarmony. But we also are developpers, and we made the library for people like us who can't resist tweaking the software and bend it in all possible ways, and are looking for powerful functions to help them do it. +We are users at heart, animators and riggers, who just want to interact with the software as simply as possible. Simplicity is at the heart of the design of openHarmony. But we also are developers, and we made the library for people like us who can't resist tweaking the software and bend it in all possible ways, and are looking for powerful functions to help them do it. This library's aim is to create a more direct way to interact with Toonboom through scripts, by providing a more intuitive way to access its elements, and help with the cumbersome and repetitive tasks as well as help unlock untapped potential in its many available systems. So we can go from having to do things like this: @@ -78,7 +78,7 @@ All you have to do is call : ```javascript include("openHarmony.js"); ``` -at the beggining of your script. +at the beginning of your script. You can ask your users to download their copy of the library and store it alongside, or bundle it as you wish as long as you include the license file provided on this repository. @@ -129,7 +129,7 @@ Check that the environment variable `LIB_OPENHARMONY_PATH` is set correctly to t ## How to add openHarmony to vscode intellisense for autocompletion Although not fully supported, you can get most of the autocompletion features to work by adding the following lines to a `jsconfig.json` file placed at the root of your working folder. -The paths need to be relative which means the openHarmony source code must be placed directly in your developping environnement. +The paths need to be relative which means the openHarmony source code must be placed directly in your developping environment. For example, if your working folder contains the openHarmony source in a folder called `OpenHarmony` and your working scripts in a folder called `myScripts`, place the `jsconfig.json` file at the root of the folder and add these lines to the file: diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js index 530c0902c5..ae65d32a2b 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -78,7 +78,7 @@ * $.log("hello"); // prints out a message to the MessageLog. * var myPoint = new $.oPoint(0,0,0); // create a new class instance from an openHarmony class. * - * // function members of the $ objects get published to the global scope, which means $ can be ommited + * // function members of the $ objects get published to the global scope, which means $ can be omitted * * log("hello"); * var myPoint = new oPoint(0,0,0); // This is all valid @@ -118,7 +118,7 @@ Object.defineProperty( $, "directory", { /** - * Wether Harmony is run with the interface or simply from command line + * Whether Harmony is run with the interface or simply from command line */ Object.defineProperty( $, "batchMode", { get: function(){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js index ad1efc91be..a54f74e147 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_actions.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -67,7 +67,7 @@ * @hideconstructor * @namespace * @example - * // To check wether an action is available, call the synthax: + * // To check whether an action is available, call the synthax: * Action.validate (, ); * * // To launch an action, call the synthax: diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js index 9e9acb766c..5809cee694 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_application.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -409,7 +409,7 @@ $.oApp.prototype.getToolByName = function(toolName){ /** - * returns the list of stencils useable by the specified tool + * returns the list of stencils usable by the specified tool * @param {$.oTool} tool the tool object we want valid stencils for * @return {$.oStencil[]} the list of stencils compatible with the specified tool */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js index d4d2d791ae..fa044d5b74 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_attribute.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -338,7 +338,7 @@ Object.defineProperty($.oAttribute.prototype, "useSeparate", { * Returns the default value of the attribute for most keywords * @name $.oAttribute#defaultValue * @type {bool} - * @todo switch the implentation to types? + * @todo switch the implementation to types? * @example * // to reset an attribute to its default value: * // (mostly used for position/angle/skew parameters of pegs and drawing nodes) @@ -449,7 +449,7 @@ $.oAttribute.prototype.getLinkedColumns = function(){ /** * Recursively sets an attribute to the same value as another. Both must have the same keyword. - * @param {bool} [duplicateColumns=false] In the case that the attribute has a column, wether to duplicate the column before linking + * @param {bool} [duplicateColumns=false] In the case that the attribute has a column, whether to duplicate the column before linking * @private */ $.oAttribute.prototype.setToAttributeValue = function(attributeToCopy, duplicateColumns){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js index c98e194539..1d359f93c4 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_backdrop.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js index 7726be6cd6..ff06688e66 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_color.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -158,7 +158,7 @@ $.oColorValue.prototype.fromColorString = function (hexString){ /** - * Uses a color integer (used in backdrops) and parses the INT; applies the RGBA components of the INT to thos oColorValue + * Uses a color integer (used in backdrops) and parses the INT; applies the RGBA components of the INT to the oColorValue * @param { int } colorInt 24 bit-shifted integer containing RGBA values */ $.oColorValue.prototype.parseColorFromInt = function(colorInt){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js index 1b73c7943e..f73309049e 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_column.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js index 73964c5c38..5440b92875 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_database.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js index a6e16ecb78..3ab78b87d6 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_dialog.js @@ -5,7 +5,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -17,7 +17,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -250,7 +250,7 @@ $.oDialog.prototype.prompt = function( labelText, title, prefilledText){ /** * Prompts with a file selector window * @param {string} [text="Select a file:"] The title of the confirmation dialog. - * @param {string} [filter="*"] The filter for the file type and/or file name that can be selected. Accepts wildcard charater "*". + * @param {string} [filter="*"] The filter for the file type and/or file name that can be selected. Accepts wildcard character "*". * @param {string} [getExisting=true] Whether to select an existing file or a save location * @param {string} [acceptMultiple=false] Whether or not selecting more than one file is ok. Is ignored if getExisting is falses. * @param {string} [startDirectory] The directory showed at the opening of the dialog. @@ -327,14 +327,14 @@ $.oDialog.prototype.browseForFolder = function(text, startDirectory){ * @constructor * @classdesc An simple progress dialog to display the progress of a task. * To react to the user clicking the cancel button, connect a function to $.oProgressDialog.canceled() signal. - * When $.batchmode is true, the progress will be outputed as a "Progress : value/range" string to the Harmony stdout. + * When $.batchmode is true, the progress will be outputted as a "Progress : value/range" string to the Harmony stdout. * @param {string} [labelText] The text displayed above the progress bar. * @param {string} [range=100] The maximum value that represents a full progress bar. * @param {string} [title] The title of the dialog * @param {bool} [show=false] Whether to immediately show the dialog. * * @property {bool} wasCanceled Whether the progress bar was cancelled. - * @property {$.oSignal} canceled A Signal emited when the dialog is canceled. Can be connected to a callback. + * @property {$.oSignal} canceled A Signal emitted when the dialog is canceled. Can be connected to a callback. */ $.oProgressDialog = function( labelText, range, title, show ){ if (typeof title === 'undefined') var title = "Progress"; @@ -608,7 +608,7 @@ $.oPieMenu = function( name, widgets, show, minAngle, maxAngle, radius, position this.maxAngle = maxAngle; this.globalCenter = position; - // how wide outisde the icons is the slice drawn + // how wide outside the icons is the slice drawn this._circleMargin = 30; // set these values before calling show() to customize the menu appearance @@ -974,7 +974,7 @@ $.oPieMenu.prototype.getMenuRadius = function(){ var _minRadius = UiLoader.dpiScale(30); var _speed = 10; // the higher the value, the slower the progression - // hyperbolic tangent function to determin the radius + // hyperbolic tangent function to determine the radius var exp = Math.exp(2*itemsNumber/_speed); var _radius = ((exp-1)/(exp+1))*_maxRadius+_minRadius; @@ -1383,7 +1383,7 @@ $.oActionButton.prototype.activate = function(){ * This class is a subclass of QPushButton and all the methods from that class are available to modify this button. * @param {string} paletteName The name of the palette that contains the color * @param {string} colorName The name of the color (if more than one is present, will pick the first match) - * @param {bool} showName Wether to display the name of the color on the button + * @param {bool} showName Whether to display the name of the color on the button * @param {QWidget} parent The parent QWidget for the button. Automatically set during initialisation of the menu. * */ @@ -1437,7 +1437,7 @@ $.oColorButton.prototype.activate = function(){ * @name $.oScriptButton * @constructor * @classdescription This subclass of QPushButton provides an easy way to create a button for a widget that will launch a function from another script file.
- * The buttons created this way automatically load the icon named after the script if it finds one named like the funtion in a script-icons folder next to the script file.
+ * The buttons created this way automatically load the icon named after the script if it finds one named like the function in a script-icons folder next to the script file.
* It will also automatically set the callback to lanch the function from the script.
* This class is a subclass of QPushButton and all the methods from that class are available to modify this button. * @param {string} scriptFile The path to the script file that will be launched diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js index bad735f237..6f2bc19c0c 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_drawing.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -426,7 +426,7 @@ Object.defineProperty($.oDrawing.prototype, 'drawingData', { /** * Import a given file into an existing drawing. * @param {$.oFile} file The path to the file - * @param {bool} [convertToTvg=false] Wether to convert the bitmap to the tvg format (this doesn't vectorise the drawing) + * @param {bool} [convertToTvg=false] Whether to convert the bitmap to the tvg format (this doesn't vectorise the drawing) * * @return { $.oFile } the oFile object pointing to the drawing file after being it has been imported into the element folder. */ @@ -878,8 +878,8 @@ $.oArtLayer.prototype.drawCircle = function(center, radius, lineStyle, fillStyle * @param {$.oVertex[]} path an array of $.oVertex objects that describe a path. * @param {$.oLineStyle} [lineStyle] the line style to draw with. (By default, will use the current stencil selection) * @param {$.oFillStyle} [fillStyle] the fill information for the path. (By default, will use the current palette selection) - * @param {bool} [polygon] Wether bezier handles should be created for the points in the path (ignores "onCurve" properties of oVertex from path) - * @param {bool} [createUnderneath] Wether the new shape will appear on top or underneath the contents of the layer. (not working yet) + * @param {bool} [polygon] Whether bezier handles should be created for the points in the path (ignores "onCurve" properties of oVertex from path) + * @param {bool} [createUnderneath] Whether the new shape will appear on top or underneath the contents of the layer. (not working yet) */ $.oArtLayer.prototype.drawShape = function(path, lineStyle, fillStyle, polygon, createUnderneath){ if (typeof fillStyle === 'undefined') var fillStyle = new this.$.oFillStyle(); @@ -959,7 +959,7 @@ $.oArtLayer.prototype.drawContour = function(path, fillStyle){ * @param {float} width the width of the rectangle. * @param {float} height the height of the rectangle. * @param {$.oLineStyle} lineStyle a line style to use for the rectangle stroke. - * @param {$.oFillStyle} fillStyle a fill style to use for the rectange fill. + * @param {$.oFillStyle} fillStyle a fill style to use for the rectangle fill. * @returns {$.oShape} the shape containing the added stroke. */ $.oArtLayer.prototype.drawRectangle = function(x, y, width, height, lineStyle, fillStyle){ @@ -1514,7 +1514,7 @@ Object.defineProperty($.oStroke.prototype, "path", { /** - * The oVertex that are on the stroke (Bezier handles exluded.) + * The oVertex that are on the stroke (Bezier handles excluded.) * The first is repeated at the last position when the stroke is closed. * @name $.oStroke#points * @type {$.oVertex[]} @@ -1583,7 +1583,7 @@ Object.defineProperty($.oStroke.prototype, "style", { /** - * wether the stroke is a closed shape. + * whether the stroke is a closed shape. * @name $.oStroke#closed * @type {bool} */ @@ -1919,7 +1919,7 @@ $.oContour.prototype.toString = function(){ * @constructor * @classdesc * The $.oVertex class represents a single control point on a stroke. This class is used to get the index of the point in the stroke path sequence, as well as its position as a float along the stroke's length. - * The onCurve property describes wether this control point is a bezier handle or a point on the curve. + * The onCurve property describes whether this control point is a bezier handle or a point on the curve. * * @param {$.oStroke} stroke the stroke that this vertex belongs to * @param {float} x the x coordinate of the vertex, in drawing space diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js index ed50d6e50b..b64c8169ec 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_element.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js index 14dafa3b63..50e4b0d475 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_file.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -509,7 +509,7 @@ Object.defineProperty($.oFile.prototype, 'fullName', { /** - * The name of the file without extenstion. + * The name of the file without extension. * @name $.oFile#name * @type {string} */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js index 37bdede02a..e1d1dd7fad 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_frame.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -263,7 +263,7 @@ Object.defineProperty($.oFrame.prototype, 'duration', { return _sceneLength; } - // walk up the frames of the scene to the next keyFrame to determin duration + // walk up the frames of the scene to the next keyFrame to determine duration var _frames = this.column.frames for (var i=this.frameNumber+1; i<_sceneLength; i++){ if (_frames[i].isKeyframe) return _frames[i].frameNumber - _startFrame; @@ -426,7 +426,7 @@ Object.defineProperty($.oFrame.prototype, 'velocity', { * easeIn : a $.oPoint object representing the left handle for bezier columns, or a {point, ease} object for ease columns. * easeOut : a $.oPoint object representing the left handle for bezier columns, or a {point, ease} object for ease columns. * continuity : the type of bezier used by the point. - * constant : wether the frame is interpolated or a held value. + * constant : whether the frame is interpolated or a held value. * @name $.oFrame#ease * @type {oPoint/object} */ @@ -520,7 +520,7 @@ Object.defineProperty($.oFrame.prototype, 'easeOut', { /** - * Determines the frame's continuity setting. Can take the values "CORNER", (two independant bezier handles on each side), "SMOOTH"(handles are aligned) or "STRAIGHT" (no handles and in straight lines). + * Determines the frame's continuity setting. Can take the values "CORNER", (two independent bezier handles on each side), "SMOOTH"(handles are aligned) or "STRAIGHT" (no handles and in straight lines). * @name $.oFrame#continuity * @type {string} */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js index 9d02b1c2aa..63a5c0eeb8 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_list.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -516,5 +516,5 @@ Object.defineProperty($.oList.prototype, 'toString', { -//Needs all filtering, limiting. mapping, pop, concat, join, ect +//Needs all filtering, limiting. mapping, pop, concat, join, etc //Speed up by finessing the way it extends and tracks the enumerable properties. \ No newline at end of file diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js index c0d4ca99a7..06bfb51f30 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_math.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -193,7 +193,7 @@ $.oPoint.prototype.pointSubtract = function( sub_pt ){ /** * Subtracts the point to the coordinates of the current oPoint and returns a new oPoint with the result. * @param {$.oPoint} point The point to subtract to this point. - * @returns {$.oPoint} a new independant oPoint. + * @returns {$.oPoint} a new independent oPoint. */ $.oPoint.prototype.subtractPoint = function( point ){ var x = this.x - point.x; @@ -298,9 +298,9 @@ $.oPoint.prototype.convertToWorldspace = function(){ /** - * Linearily Interpolate between this (0.0) and the provided point (1.0) + * Linearly Interpolate between this (0.0) and the provided point (1.0) * @param {$.oPoint} point The target point at 100% - * @param {double} perc 0-1.0 value to linearily interp + * @param {double} perc 0-1.0 value to linearly interp * * @return: { $.oPoint } The interpolated value. */ @@ -410,9 +410,9 @@ $.oBox.prototype.include = function(box){ /** - * Checks wether the box contains another $.oBox. + * Checks whether the box contains another $.oBox. * @param {$.oBox} box The $.oBox to check for. - * @param {bool} [partial=false] wether to accept partially contained boxes. + * @param {bool} [partial=false] whether to accept partially contained boxes. */ $.oBox.prototype.contains = function(box, partial){ if (typeof partial === 'undefined') var partial = false; @@ -537,7 +537,7 @@ $.oMatrix.prototype.toString = function(){ * @classdesc The $.oVector is a replacement for the Vector3d objects of Harmony. * @param {float} x a x coordinate for this vector. * @param {float} y a y coordinate for this vector. - * @param {float} [z=0] a z coordinate for this vector. If ommited, will be set to 0 and vector will be 2D. + * @param {float} [z=0] a z coordinate for this vector. If omitted, will be set to 0 and vector will be 2D. */ $.oVector = function(x, y, z){ if (typeof z === "undefined" || isNaN(z)) var z = 0; diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js index c19e6d12f4..29afeb522c 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_metadata.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js index fec5d32816..6ef75f5560 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_misc.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -54,7 +54,7 @@ /** - * The $.oUtils helper class -- providing generic utilities. Doesn't need instanciation. + * The $.oUtils helper class -- providing generic utilities. Doesn't need instantiation. * @classdesc $.oUtils utility Class */ $.oUtils = function(){ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js index a4476d7591..2a6aa3519a 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_network.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -87,7 +87,7 @@ $.oNetwork = function( ){ * @param {function} callback_func Providing a callback function prevents blocking, and will respond on this function. The callback function is in form func( results ){} * @param {bool} use_json In the event of a JSON api, this will return an object converted from the returned JSON. * - * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occured.. + * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occurred.. */ $.oNetwork.prototype.webQuery = function ( address, callback_func, use_json ){ if (typeof callback_func === 'undefined') var callback_func = false; @@ -272,7 +272,7 @@ $.oNetwork.prototype.webQuery = function ( address, callback_func, use_json ){ * @param {function} path The local file path to save the download. * @param {bool} replace Replace the file if it exists. * - * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occured.. + * @return: {string/object} The resulting object/string from the query -- otherwise a bool as false when an error occurred.. */ $.oNetwork.prototype.downloadSingle = function ( address, path, replace ){ if (typeof replace === 'undefined') var replace = false; diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js index 5590d7b7e9..deb1854357 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_node.js @@ -4,7 +4,7 @@ // openHarmony Library // // -// Developped by Mathieu Chaptel, Chris Fourney +// Developed by Mathieu Chaptel, Chris Fourney // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -562,7 +562,7 @@ Object.defineProperty($.oNode.prototype, 'height', { /** - * The list of oNodeLinks objects descibing the connections to the inport of this node, in order of inport. + * The list of oNodeLinks objects describing the connections to the inport of this node, in order of inport. * @name $.oNode#inLinks * @readonly * @deprecated returns $.oNodeLink instances but $.oLink is preferred. Use oNode.getInLinks() instead. @@ -658,7 +658,7 @@ Object.defineProperty($.oNode.prototype, 'outPorts', { /** - * The list of oNodeLinks objects descibing the connections to the outports of this node, in order of outport. + * The list of oNodeLinks objects describing the connections to the outports of this node, in order of outport. * @name $.oNode#outLinks * @readonly * @type {$.oNodeLink[]} @@ -1666,7 +1666,7 @@ $.oNode.prototype.refreshAttributes = function( ){ * It represents peg nodes in the scene. * @constructor * @augments $.oNode - * @classdesc Peg Moudle Class + * @classdesc Peg Module Class * @param {string} path Path to the node in the network. * @param {oScene} oSceneObject Access to the oScene object of the DOM. */ @@ -1886,7 +1886,7 @@ $.oDrawingNode.prototype.getDrawingAtFrame = function(frameNumber){ /** - * Gets the list of palettes containing colors used by a drawing node. This only gets palettes with the first occurence of the colors. + * Gets the list of palettes containing colors used by a drawing node. This only gets palettes with the first occurrence of the colors. * @return {$.oPalette[]} The palettes that contain the color IDs used by the drawings of the node. */ $.oDrawingNode.prototype.getUsedPalettes = function(){ @@ -1968,7 +1968,7 @@ $.oDrawingNode.prototype.unlinkPalette = function(oPaletteObject){ * Duplicates a node by creating an independent copy. * @param {string} [newName] The new name for the duplicated node. * @param {oPoint} [newPosition] The new position for the duplicated node. - * @param {bool} [duplicateElement] Wether to also duplicate the element. + * @param {bool} [duplicateElement] Whether to also duplicate the element. */ $.oDrawingNode.prototype.duplicate = function(newName, newPosition, duplicateElement){ if (typeof newPosition === 'undefined') var newPosition = this.nodePosition; @@ -2464,7 +2464,7 @@ $.oGroupNode.prototype.getNodeByName = function(name){ * Returns all the nodes of a certain type in the group. * Pass a value to recurse to look into the groups as well. * @param {string} typeName The type of the nodes. - * @param {bool} recurse Wether to look inside the groups. + * @param {bool} recurse Whether to look inside the groups. * * @return {$.oNode[]} The nodes found. */ @@ -2626,7 +2626,7 @@ $.oGroupNode.prototype.orderNodeView = function(recurse){ * * peg.linkOutNode(drawingNode); * - * //through all this we didn't specify nodePosition parameters so we'll sort evertything at once + * //through all this we didn't specify nodePosition parameters so we'll sort everything at once * * sceneRoot.orderNodeView(); * @@ -3333,7 +3333,7 @@ $.oGroupNode.prototype.importImageAsTVG = function(path, alignment, nodePosition * imports an image sequence as a node into the current group. * @param {$.oFile[]} imagePaths a list of paths to the images to import (can pass a list of strings or $.oFile) * @param {number} [exposureLength=1] the number of frames each drawing should be exposed at. If set to 0/false, each drawing will use the numbering suffix of the file to set its frame. - * @param {boolean} [convertToTvg=false] wether to convert the files to tvg during import + * @param {boolean} [convertToTvg=false] whether to convert the files to tvg during import * @param {string} [alignment="ASIS"] the alignment to apply to the node * @param {$.oPoint} [nodePosition] the position of the node in the nodeview * @@ -3346,7 +3346,7 @@ $.oGroupNode.prototype.importImageSequence = function(imagePaths, exposureLength if (typeof extendScene === 'undefined') var extendScene = false; - // match anything but capture trailing numbers and separates punctuation preceeding it + // match anything but capture trailing numbers and separates punctuation preceding it var numberingRe = /(.*?)([\W_]+)?(\d*)$/i; // sanitize imagePaths diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js index 279a871691..07a4d147da 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony/openHarmony_nodeLink.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, Chris Fourney... +// Developed by Mathieu Chaptel, Chris Fourney... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -174,7 +174,7 @@ Object.defineProperty($.oNodeLink.prototype, 'outNode', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -198,7 +198,7 @@ Object.defineProperty($.oNodeLink.prototype, 'inNode', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -222,7 +222,7 @@ Object.defineProperty($.oNodeLink.prototype, 'outPort', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -256,7 +256,7 @@ Object.defineProperty($.oNodeLink.prototype, 'inPort', { return; } - this.apply(); // do we really want to apply everytime we set? + this.apply(); // do we really want to apply every time we set? } }); @@ -983,7 +983,7 @@ $.oNodeLink.prototype.validate = function ( ) { * @return {bool} Whether the connection is a valid connection that exists currently in the node system. */ $.oNodeLink.prototype.validateUpwards = function( inport, outportProvided ) { - //IN THE EVENT OUTNODE WASNT PROVIDED. + //IN THE EVENT OUTNODE WASN'T PROVIDED. this.path = this.findInputPath( this._inNode, inport, [] ); if( !this.path || this.path.length == 0 ){ return false; @@ -1173,7 +1173,7 @@ Object.defineProperty($.oLink.prototype, 'outPort', { /** - * The index of the link comming out of the out-port. + * The index of the link coming out of the out-port. *
In the event this value wasn't known by the link object but the link is actually connected, the correct value will be found. * @name $.oLink#outLink * @readonly @@ -1323,7 +1323,7 @@ $.oLink.prototype.getValidLink = function(createOutPorts, createInPorts){ /** - * Attemps to connect a link. Will guess the ports if not provided. + * Attempts to connect a link. Will guess the ports if not provided. * @return {bool} */ $.oLink.prototype.connect = function(){ @@ -1623,11 +1623,11 @@ $.oLinkPath.prototype.findExistingPath = function(){ /** - * Gets a link object from two nodes that can be succesfully connected. Provide port numbers if there are specific requirements to match. If a link already exists, it will be returned. + * Gets a link object from two nodes that can be successfully connected. Provide port numbers if there are specific requirements to match. If a link already exists, it will be returned. * @param {$.oNode} start The node from which the link originates. * @param {$.oNode} end The node at which the link ends. - * @param {int} [outPort] A prefered out-port for the link to use. - * @param {int} [inPort] A prefered in-port for the link to use. + * @param {int} [outPort] A preferred out-port for the link to use. + * @param {int} [inPort] A preferred in-port for the link to use. * * @return {$.oLink} the valid $.oLink object. Returns null if no such link could be created (for example if the node's in-port is already linked) */ diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js index 57d4a63e96..9014929fc4 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js +++ b/openpype/hosts/harmony/vendor/OpenHarmony/openHarmony_tools.js @@ -4,7 +4,7 @@ // openHarmony Library v0.01 // // -// Developped by Mathieu Chaptel, ... +// Developed by Mathieu Chaptel, ... // // // This library is an open source implementation of a Document Object Model @@ -16,7 +16,7 @@ // and by hiding the heavy lifting required by the official API. // // This library is provided as is and is a work in progress. As such, not every -// function has been implemented or is garanteed to work. Feel free to contribute +// function has been implemented or is guaranteed to work. Feel free to contribute // improvements to its official github. If you do make sure you follow the provided // template and naming conventions and document your new methods properly. // @@ -212,7 +212,7 @@ function openHarmony_toolInstaller(){ //---------------------------------------------- - //-- GET THE FILE CONTENTS IN A DIRCTORY ON GIT + //-- GET THE FILE CONTENTS IN A DIRECTORY ON GIT this.recurse_files = function( contents, arr_files ){ with( context.$.global ){ try{ @@ -501,7 +501,7 @@ function openHarmony_toolInstaller(){ var download_item = item["download_url"]; var query = $.network.webQuery( download_item, false, false ); if( query ){ - //INSTALL TYPES ARE script, package, ect. + //INSTALL TYPES ARE script, package, etc. if( install_types[ m.install_cache[ item["url"] ] ] ){ m.installLabel.text = install_types[ m.install_cache[ item["url"] ] ]; diff --git a/openpype/hosts/harmony/vendor/OpenHarmony/package.json b/openpype/hosts/harmony/vendor/OpenHarmony/package.json index c62ecbc9d8..7a535cdcf6 100644 --- a/openpype/hosts/harmony/vendor/OpenHarmony/package.json +++ b/openpype/hosts/harmony/vendor/OpenHarmony/package.json @@ -1,7 +1,7 @@ { "name": "openharmony", "version": "0.0.1", - "description": "An Open Source Imlementation of a Document Object Model for the Toonboom Harmony scripting interface", + "description": "An Open Source Implementation of a Document Object Model for the Toonboom Harmony scripting interface", "main": "openHarmony.js", "scripts": { "test": "$", diff --git a/openpype/hosts/hiero/api/__init__.py b/openpype/hosts/hiero/api/__init__.py index 1fa40c9f74..b95c0fe1d7 100644 --- a/openpype/hosts/hiero/api/__init__.py +++ b/openpype/hosts/hiero/api/__init__.py @@ -108,7 +108,7 @@ __all__ = [ "apply_colorspace_project", "apply_colorspace_clips", "get_sequence_pattern_and_padding", - # depricated + # deprecated "get_track_item_pype_tag", "set_track_item_pype_tag", "get_track_item_pype_data", diff --git a/openpype/hosts/hiero/api/lib.py b/openpype/hosts/hiero/api/lib.py index bbd1edc14a..fa874f9e9d 100644 --- a/openpype/hosts/hiero/api/lib.py +++ b/openpype/hosts/hiero/api/lib.py @@ -23,11 +23,17 @@ except ImportError: from openpype.client import get_project from openpype.settings import get_project_settings -from openpype.pipeline import legacy_io, Anatomy +from openpype.pipeline import ( + get_current_project_name, legacy_io, Anatomy +) from openpype.pipeline.load import filter_containers from openpype.lib import Logger from . import tags +from openpype.pipeline.colorspace import ( + get_imageio_config +) + class DeprecatedWarning(DeprecationWarning): pass @@ -1047,6 +1053,18 @@ def apply_colorspace_project(): imageio = get_project_settings(project_name)["hiero"]["imageio"] presets = imageio.get("workfile") + # backward compatibility layer + # TODO: remove this after some time + config_data = get_imageio_config( + project_name=get_current_project_name(), + host_name="hiero" + ) + + if config_data: + presets.update({ + "ocioConfigName": "custom" + }) + # save the workfile as subversion "comment:_colorspaceChange" split_current_file = os.path.splitext(current_file) copy_current_file = current_file @@ -1221,7 +1239,7 @@ def set_track_color(track_item, color): def check_inventory_versions(track_items=None): """ - Actual version color idetifier of Loaded containers + Actual version color identifier of Loaded containers Check all track items and filter only Loader nodes for its version. It will get all versions from database @@ -1249,10 +1267,10 @@ def check_inventory_versions(track_items=None): project_name = legacy_io.active_project() filter_result = filter_containers(containers, project_name) for container in filter_result.latest: - set_track_color(container["_item"], clip_color) + set_track_color(container["_item"], clip_color_last) for container in filter_result.outdated: - set_track_color(container["_item"], clip_color_last) + set_track_color(container["_item"], clip_color) def selection_changed_timeline(event): diff --git a/openpype/hosts/hiero/api/pipeline.py b/openpype/hosts/hiero/api/pipeline.py index 4ab73e7d19..d88aeac810 100644 --- a/openpype/hosts/hiero/api/pipeline.py +++ b/openpype/hosts/hiero/api/pipeline.py @@ -193,8 +193,8 @@ def parse_container(item, validate=True): return # convert the data to list and validate them for _, obj_data in _data.items(): - cotnainer = data_to_container(item, obj_data) - return_list.append(cotnainer) + container = data_to_container(item, obj_data) + return_list.append(container) return return_list else: _data = lib.get_trackitem_openpype_data(item) diff --git a/openpype/hosts/hiero/api/plugin.py b/openpype/hosts/hiero/api/plugin.py index 07457db1a4..a3f8a6c524 100644 --- a/openpype/hosts/hiero/api/plugin.py +++ b/openpype/hosts/hiero/api/plugin.py @@ -146,6 +146,8 @@ class CreatorWidget(QtWidgets.QDialog): return " ".join([str(m.group(0)).capitalize() for m in matches]) def create_row(self, layout, type, text, **kwargs): + value_keys = ["setText", "setCheckState", "setValue", "setChecked"] + # get type attribute from qwidgets attr = getattr(QtWidgets, type) @@ -167,14 +169,27 @@ class CreatorWidget(QtWidgets.QDialog): # assign the created attribute to variable item = getattr(self, attr_name) + + # set attributes to item which are not values for func, val in kwargs.items(): + if func in value_keys: + continue + if getattr(item, func): + log.debug("Setting {} to {}".format(func, val)) func_attr = getattr(item, func) if isinstance(val, tuple): func_attr(*val) else: func_attr(val) + # set values to item + for value_item in value_keys: + if value_item not in kwargs: + continue + if getattr(item, value_item): + getattr(item, value_item)(kwargs[value_item]) + # add to layout layout.addRow(label, item) @@ -276,8 +291,11 @@ class CreatorWidget(QtWidgets.QDialog): elif v["type"] == "QSpinBox": data[k]["value"] = self.create_row( content_layout, "QSpinBox", v["label"], - setValue=v["value"], setMinimum=0, + setValue=v["value"], + setDisplayIntegerBase=10000, + setRange=(0, 99999), setMinimum=0, setMaximum=100000, setToolTip=tool_tip) + return data @@ -393,7 +411,7 @@ class ClipLoader: self.with_handles = options.get("handles") or bool( options.get("handles") is True) # try to get value from options or evaluate key value for `load_how` - self.sequencial_load = options.get("sequencially") or bool( + self.sequencial_load = options.get("sequentially") or bool( "Sequentially in order" in options.get("load_how", "")) # try to get value from options or evaluate key value for `load_to` self.new_sequence = options.get("newSequence") or bool( @@ -818,7 +836,7 @@ class PublishClip: # increasing steps by index of rename iteration self.count_steps *= self.rename_index - hierarchy_formating_data = {} + hierarchy_formatting_data = {} hierarchy_data = deepcopy(self.hierarchy_data) _data = self.track_item_default_data.copy() if self.ui_inputs: @@ -853,13 +871,13 @@ class PublishClip: # fill up pythonic expresisons in hierarchy data for k, _v in hierarchy_data.items(): - hierarchy_formating_data[k] = _v["value"].format(**_data) + hierarchy_formatting_data[k] = _v["value"].format(**_data) else: # if no gui mode then just pass default data - hierarchy_formating_data = hierarchy_data + hierarchy_formatting_data = hierarchy_data tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formating_data + hierarchy_formatting_data ) tag_hierarchy_data.update({"heroTrack": True}) @@ -887,20 +905,20 @@ class PublishClip: # add data to return data dict self.tag_data.update(tag_hierarchy_data) - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): """ Solve tag data from hierarchy data and templates. """ # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) - clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) # remove shot from hierarchy data: is not needed anymore - hierarchy_formating_data.pop("shot") + hierarchy_formatting_data.pop("shot") return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, "parents": self.parents, - "hierarchyData": hierarchy_formating_data, + "hierarchyData": hierarchy_formatting_data, "subset": self.subset, "family": self.subset_family, "families": [self.data["family"]] @@ -916,16 +934,16 @@ class PublishClip: ) # first collect formatting data to use for formatting template - formating_data = {} + formatting_data = {} for _k, _v in self.hierarchy_data.items(): value = _v["value"].format( **self.track_item_default_data) - formating_data[_k] = value + formatting_data[_k] = value return { "entity_type": entity_type, "entity_name": template.format( - **formating_data + **formatting_data ) } diff --git a/openpype/hosts/hiero/plugins/load/load_clip.py b/openpype/hosts/hiero/plugins/load/load_clip.py index 2a7d1af41e..c9bebfa8b2 100644 --- a/openpype/hosts/hiero/plugins/load/load_clip.py +++ b/openpype/hosts/hiero/plugins/load/load_clip.py @@ -6,6 +6,10 @@ from openpype.pipeline import ( legacy_io, get_representation_path, ) +from openpype.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) import openpype.hosts.hiero.api as phiero @@ -17,7 +21,10 @@ class LoadClip(phiero.SequenceLoader): """ families = ["render2d", "source", "plate", "render", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) label = "Load as clip" order = -10 @@ -34,6 +41,38 @@ class LoadClip(phiero.SequenceLoader): clip_name_template = "{asset}_{subset}_{representation}" + @classmethod + def apply_settings(cls, project_settings, system_settings): + plugin_type_settings = ( + project_settings + .get("hiero", {}) + .get("load", {}) + ) + + if not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + print(" - is disabled by preset") + elif option == "representations": + continue + else: + print(" - setting `{}`: `{}`".format(option, value)) + setattr(cls, option, value) + + def load(self, context, name, namespace, options): # add clip name template to options options.update({ diff --git a/openpype/hosts/hiero/plugins/load/load_effects.py b/openpype/hosts/hiero/plugins/load/load_effects.py index a3fcd63b5b..b61cca9731 100644 --- a/openpype/hosts/hiero/plugins/load/load_effects.py +++ b/openpype/hosts/hiero/plugins/load/load_effects.py @@ -19,8 +19,9 @@ from openpype.lib import Logger class LoadEffects(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" - representations = ["effectJson"] families = ["effect"] + representations = ["*"] + extension = {"json"} label = "Load Effects" order = 0 diff --git a/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py b/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py index 9489b1c4fb..d455ad4a4e 100644 --- a/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py +++ b/openpype/hosts/hiero/plugins/publish/collect_clip_effects.py @@ -120,13 +120,9 @@ class CollectClipEffects(pyblish.api.InstancePlugin): track = sitem.parentTrack().name() # node serialization node = sitem.node() - node_serialized = self.node_serialisation(node) + node_serialized = self.node_serialization(node) node_name = sitem.name() - - if "_" in node_name: - node_class = re.sub(r"(?:_)[_0-9]+", "", node_name) # more numbers - else: - node_class = re.sub(r"\d+", "", node_name) # one number + node_class = node.Class() # collect timelineIn/Out effect_t_in = int(sitem.timelineIn()) @@ -148,7 +144,7 @@ class CollectClipEffects(pyblish.api.InstancePlugin): "node": node_serialized }} - def node_serialisation(self, node): + def node_serialization(self, node): node_serialized = {} # adding ignoring knob keys diff --git a/openpype/hosts/houdini/api/action.py b/openpype/hosts/houdini/api/action.py new file mode 100644 index 0000000000..27e8ce55bb --- /dev/null +++ b/openpype/hosts/houdini/api/action.py @@ -0,0 +1,46 @@ +import pyblish.api +import hou + +from openpype.pipeline.publish import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid nodes in Maya when plug-in failed. + + To retrieve the invalid nodes this assumes a static `get_invalid()` + method is available on the plugin. + + """ + label = "Select invalid" + on = "failed" # This action is only available on a failed plug-in + icon = "search" # Icon from Awesome Icon + + def process(self, context, plugin): + + errored_instances = get_errored_instances_from_context(context) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(errored_instances, plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes..") + invalid = list() + for instance in instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning("Plug-in returned to be invalid, " + "but has no selectable nodes.") + + hou.clearAllSelected() + if invalid: + self.log.info("Selecting invalid nodes: {}".format( + ", ".join(node.path() for node in invalid) + )) + for node in invalid: + node.setSelected(True) + node.setCurrent(True) + else: + self.log.info("No invalid nodes found.") diff --git a/openpype/hosts/houdini/api/colorspace.py b/openpype/hosts/houdini/api/colorspace.py new file mode 100644 index 0000000000..7047644225 --- /dev/null +++ b/openpype/hosts/houdini/api/colorspace.py @@ -0,0 +1,56 @@ +import attr +import hou +from openpype.hosts.houdini.api.lib import get_color_management_preferences + + +@attr.s +class LayerMetadata(object): + """Data class for Render Layer metadata.""" + frameStart = attr.ib() + frameEnd = attr.ib() + + +@attr.s +class RenderProduct(object): + """Getting Colorspace as + Specific Render Product Parameter for submitting + publish job. + + """ + colorspace = attr.ib() # colorspace + view = attr.ib() + productName = attr.ib(default=None) + + +class ARenderProduct(object): + + def __init__(self): + """Constructor.""" + # Initialize + self.layer_data = self._get_layer_data() + self.layer_data.products = self.get_colorspace_data() + + def _get_layer_data(self): + return LayerMetadata( + frameStart=int(hou.playbar.frameRange()[0]), + frameEnd=int(hou.playbar.frameRange()[1]), + ) + + def get_colorspace_data(self): + """To be implemented by renderer class. + + This should return a list of RenderProducts. + + Returns: + list: List of RenderProduct + + """ + data = get_color_management_preferences() + colorspace_data = [ + RenderProduct( + colorspace=data["display"], + view=data["view"], + productName="" + ) + ] + return colorspace_data diff --git a/openpype/hosts/houdini/api/creator_node_shelves.py b/openpype/hosts/houdini/api/creator_node_shelves.py new file mode 100644 index 0000000000..7c6122cffe --- /dev/null +++ b/openpype/hosts/houdini/api/creator_node_shelves.py @@ -0,0 +1,233 @@ +"""Library to register OpenPype Creators for Houdini TAB node search menu. + +This can be used to install custom houdini tools for the TAB search +menu which will trigger a publish instance to be created interactively. + +The Creators are automatically registered on launch of Houdini through the +Houdini integration's `host.install()` method. + +""" +import contextlib +import tempfile +import logging +import os + +from openpype.client import get_asset_by_name +from openpype.pipeline import registered_host +from openpype.pipeline.create import CreateContext +from openpype.resources import get_openpype_icon_filepath + +import hou +import stateutils +import soptoolutils +import loptoolutils +import cop2toolutils + + +log = logging.getLogger(__name__) + +CATEGORY_GENERIC_TOOL = { + hou.sopNodeTypeCategory(): soptoolutils.genericTool, + hou.cop2NodeTypeCategory(): cop2toolutils.genericTool, + hou.lopNodeTypeCategory(): loptoolutils.genericTool +} + + +CREATE_SCRIPT = """ +from openpype.hosts.houdini.api.creator_node_shelves import create_interactive +create_interactive("{identifier}", **kwargs) +""" + + +def create_interactive(creator_identifier, **kwargs): + """Create a Creator using its identifier interactively. + + This is used by the generated shelf tools as callback when a user selects + the creator from the node tab search menu. + + The `kwargs` should be what Houdini passes to the tool create scripts + context. For more information see: + https://www.sidefx.com/docs/houdini/hom/tool_script.html#arguments + + Args: + creator_identifier (str): The creator identifier of the Creator plugin + to create. + + Return: + list: The created instances. + + """ + + # TODO Use Qt instead + result, variant = hou.ui.readInput('Define variant name', + buttons=("Ok", "Cancel"), + initial_contents='Main', + title="Define variant", + help="Set the variant for the " + "publish instance", + close_choice=1) + if result == 1: + # User interrupted + return + variant = variant.strip() + if not variant: + raise RuntimeError("Empty variant value entered.") + + host = registered_host() + context = CreateContext(host) + creator = context.manual_creators.get(creator_identifier) + if not creator: + raise RuntimeError("Invalid creator identifier: " + "{}".format(creator_identifier)) + + # TODO: Once more elaborate unique create behavior should exist per Creator + # instead of per network editor area then we should move this from here + # to a method on the Creators for which this could be the default + # implementation. + pane = stateutils.activePane(kwargs) + if isinstance(pane, hou.NetworkEditor): + pwd = pane.pwd() + subset_name = creator.get_subset_name( + variant=variant, + task_name=context.get_current_task_name(), + asset_doc=get_asset_by_name( + project_name=context.get_current_project_name(), + asset_name=context.get_current_asset_name() + ), + project_name=context.get_current_project_name(), + host_name=context.host_name + ) + + tool_fn = CATEGORY_GENERIC_TOOL.get(pwd.childTypeCategory()) + if tool_fn is not None: + out_null = tool_fn(kwargs, "null") + out_null.setName("OUT_{}".format(subset_name), unique_name=True) + + before = context.instances_by_id.copy() + + # Create the instance + context.create( + creator_identifier=creator_identifier, + variant=variant, + pre_create_data={"use_selection": True} + ) + + # For convenience we set the new node as current since that's much more + # familiar to the artist when creating a node interactively + # TODO Allow to disable auto-select in studio settings or user preferences + after = context.instances_by_id + new = set(after) - set(before) + if new: + # Select the new instance + for instance_id in new: + instance = after[instance_id] + node = hou.node(instance.get("instance_node")) + node.setCurrent(True) + + return list(new) + + +@contextlib.contextmanager +def shelves_change_block(): + """Write shelf changes at the end of the context.""" + hou.shelves.beginChangeBlock() + try: + yield + finally: + hou.shelves.endChangeBlock() + + +def install(): + """Install the Creator plug-ins to show in Houdini's TAB node search menu. + + This function is re-entrant and can be called again to reinstall and + update the node definitions. For example during development it can be + useful to call it manually: + >>> from openpype.hosts.houdini.api.creator_node_shelves import install + >>> install() + + Returns: + list: List of `hou.Tool` instances + + """ + + host = registered_host() + + # Store the filepath on the host + # TODO: Define a less hacky static shelf path for current houdini session + filepath_attr = "_creator_node_shelf_filepath" + filepath = getattr(host, filepath_attr, None) + if filepath is None: + f = tempfile.NamedTemporaryFile(prefix="houdini_creator_nodes_", + suffix=".shelf", + delete=False) + f.close() + filepath = f.name + setattr(host, filepath_attr, filepath) + elif os.path.exists(filepath): + # Remove any existing shelf file so that we can completey regenerate + # and update the tools file if creator identifiers change + os.remove(filepath) + + icon = get_openpype_icon_filepath() + + # Create context only to get creator plugins, so we don't reset and only + # populate what we need to retrieve the list of creator plugins + create_context = CreateContext(host, reset=False) + create_context.reset_current_context() + create_context._reset_creator_plugins() + + log.debug("Writing OpenPype Creator nodes to shelf: {}".format(filepath)) + tools = [] + + with shelves_change_block(): + for identifier, creator in create_context.manual_creators.items(): + + # Allow the creator plug-in itself to override the categories + # for where they are shown with `Creator.get_network_categories()` + if not hasattr(creator, "get_network_categories"): + log.debug("Creator {} has no `get_network_categories` method " + "and will not be added to TAB search.") + continue + + network_categories = creator.get_network_categories() + if not network_categories: + continue + + key = "openpype_create.{}".format(identifier) + log.debug(f"Registering {key}") + script = CREATE_SCRIPT.format(identifier=identifier) + data = { + "script": script, + "language": hou.scriptLanguage.Python, + "icon": icon, + "help": "Create OpenPype publish instance for {}".format( + creator.label + ), + "help_url": None, + "network_categories": network_categories, + "viewer_categories": [], + "cop_viewer_categories": [], + "network_op_type": None, + "viewer_op_type": None, + "locations": ["OpenPype"] + } + label = "Create {}".format(creator.label) + tool = hou.shelves.tool(key) + if tool: + tool.setData(**data) + tool.setLabel(label) + else: + tool = hou.shelves.newTool( + file_path=filepath, + name=key, + label=label, + **data + ) + + tools.append(tool) + + # Ensure the shelf is reloaded + hou.shelves.loadFile(filepath) + + return tools diff --git a/openpype/hosts/houdini/api/lib.py b/openpype/hosts/houdini/api/lib.py index 13f5a62ec3..a33ba7aad2 100644 --- a/openpype/hosts/houdini/api/lib.py +++ b/openpype/hosts/houdini/api/lib.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import sys import os +import re import uuid import logging from contextlib import contextmanager @@ -127,6 +128,8 @@ def get_output_parameter(node): return node.parm("filename") elif node_type == "comp": return node.parm("copoutput") + elif node_type == "opengl": + return node.parm("picture") elif node_type == "arnold": if node.evalParm("ar_ass_export_enable"): return node.parm("ar_ass_file") @@ -479,23 +482,13 @@ def reset_framerange(): frame_start = asset_data.get("frameStart") frame_end = asset_data.get("frameEnd") - # Backwards compatibility - if frame_start is None or frame_end is None: - frame_start = asset_data.get("edit_in") - frame_end = asset_data.get("edit_out") if frame_start is None or frame_end is None: log.warning("No edit information found for %s" % asset_name) return - handles = asset_data.get("handles") or 0 - handle_start = asset_data.get("handleStart") - if handle_start is None: - handle_start = handles - - handle_end = asset_data.get("handleEnd") - if handle_end is None: - handle_end = handles + handle_start = asset_data.get("handleStart", 0) + handle_end = asset_data.get("handleEnd", 0) frame_start -= int(handle_start) frame_end += int(handle_end) @@ -589,3 +582,74 @@ def splitext(name, allowed_multidot_extensions): return name[:-len(ext)], ext return os.path.splitext(name) + + +def get_top_referenced_parm(parm): + + processed = set() # disallow infinite loop + while True: + if parm.path() in processed: + raise RuntimeError("Parameter references result in cycle.") + + processed.add(parm.path()) + + ref = parm.getReferencedParm() + if ref.path() == parm.path(): + # It returns itself when it doesn't reference + # another parameter + return ref + else: + parm = ref + + +def evalParmNoFrame(node, parm, pad_character="#"): + + parameter = node.parm(parm) + assert parameter, "Parameter does not exist: %s.%s" % (node, parm) + + # If the parameter has a parameter reference, then get that + # parameter instead as otherwise `unexpandedString()` fails. + parameter = get_top_referenced_parm(parameter) + + # Substitute out the frame numbering with padded characters + try: + raw = parameter.unexpandedString() + except hou.Error as exc: + print("Failed: %s" % parameter) + raise RuntimeError(exc) + + def replace(match): + padding = 1 + n = match.group(2) + if n and int(n): + padding = int(n) + return pad_character * padding + + expression = re.sub(r"(\$F([0-9]*))", replace, raw) + + with hou.ScriptEvalContext(parameter): + return hou.expandStringAtFrame(expression, 0) + + +def get_color_management_preferences(): + """Get default OCIO preferences""" + data = { + "config": hou.Color.ocio_configPath() + + } + + # Get default display and view from OCIO + display = hou.Color.ocio_defaultDisplay() + disp_regex = re.compile(r"^(?P.+-)(?P.+)$") + disp_match = disp_regex.match(display) + + view = hou.Color.ocio_defaultView() + view_regex = re.compile(r"^(?P.+- )(?P.+)$") + view_match = view_regex.match(view) + data.update({ + "display": disp_match.group("display"), + "view": view_match.group("view") + + }) + + return data diff --git a/openpype/hosts/houdini/api/pipeline.py b/openpype/hosts/houdini/api/pipeline.py index f8e2c16d21..b8b8fefb52 100644 --- a/openpype/hosts/houdini/api/pipeline.py +++ b/openpype/hosts/houdini/api/pipeline.py @@ -18,7 +18,7 @@ from openpype.pipeline import ( ) from openpype.pipeline.load import any_outdated_containers from openpype.hosts.houdini import HOUDINI_HOST_DIR -from openpype.hosts.houdini.api import lib, shelves +from openpype.hosts.houdini.api import lib, shelves, creator_node_shelves from openpype.lib import ( register_event_callback, @@ -81,7 +81,17 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): # TODO: make sure this doesn't trigger when # opening with last workfile. _set_context_settings() - shelves.generate_shelves() + + if not IS_HEADLESS: + import hdefereval # noqa, hdefereval is only available in ui mode + # Defer generation of shelves due to issue on Windows where shelf + # initialization during start up delays Houdini UI by minutes + # making it extremely slow to launch. + hdefereval.executeDeferred(shelves.generate_shelves) + + if not IS_HEADLESS: + import hdefereval # noqa, hdefereval is only available in ui mode + hdefereval.executeDeferred(creator_node_shelves.install) def has_unsaved_changes(self): return hou.hipFile.hasUnsavedChanges() @@ -144,13 +154,17 @@ class HoudiniHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): """ obj_network = hou.node("/obj") - op_ctx = obj_network.createNode( - "null", node_name="OpenPypeContext") + op_ctx = obj_network.createNode("subnet", + node_name="OpenPypeContext", + run_init_scripts=False, + load_contents=False) + op_ctx.moveToGoodPosition() op_ctx.setBuiltExplicitly(False) op_ctx.setCreatorState("OpenPype") op_ctx.setComment("OpenPype node to hold context metadata") op_ctx.setColor(hou.Color((0.081, 0.798, 0.810))) + op_ctx.setDisplayFlag(False) op_ctx.hide(True) return op_ctx diff --git a/openpype/hosts/houdini/api/plugin.py b/openpype/hosts/houdini/api/plugin.py index e15e27c83f..1e7eaa7e22 100644 --- a/openpype/hosts/houdini/api/plugin.py +++ b/openpype/hosts/houdini/api/plugin.py @@ -60,7 +60,7 @@ class Creator(LegacyCreator): def process(self): instance = super(CreateEpicNode, self, process() - # Set paramaters for Alembic node + # Set parameters for Alembic node instance.setParms( {"sop_path": "$HIP/%s.abc" % self.nodes[0]} ) @@ -103,9 +103,8 @@ class HoudiniCreatorBase(object): fill it with all collected instances from the scene under its respective creator identifiers. - If legacy instances are detected in the scene, create - `houdini_cached_legacy_subsets` there and fill it with - all legacy subsets under family as a key. + Create `houdini_cached_legacy_subsets` key for any legacy instances + detected in the scene as instances per family. Args: Dict[str, Any]: Shared data. @@ -115,29 +114,30 @@ class HoudiniCreatorBase(object): """ if shared_data.get("houdini_cached_subsets") is None: - shared_data["houdini_cached_subsets"] = {} - if shared_data.get("houdini_cached_legacy_subsets") is None: - shared_data["houdini_cached_legacy_subsets"] = {} - cached_instances = lsattr("id", "pyblish.avalon.instance") - for i in cached_instances: - if not i.parm("creator_identifier"): - # we have legacy instance - family = i.parm("family").eval() - if family not in shared_data[ - "houdini_cached_legacy_subsets"]: - shared_data["houdini_cached_legacy_subsets"][ - family] = [i] - else: - shared_data[ - "houdini_cached_legacy_subsets"][family].append(i) - continue + cache = dict() + cache_legacy = dict() + + for node in lsattr("id", "pyblish.avalon.instance"): + + creator_identifier_parm = node.parm("creator_identifier") + if creator_identifier_parm: + # creator instance + creator_id = creator_identifier_parm.eval() + cache.setdefault(creator_id, []).append(node) - creator_id = i.parm("creator_identifier").eval() - if creator_id not in shared_data["houdini_cached_subsets"]: - shared_data["houdini_cached_subsets"][creator_id] = [i] else: - shared_data[ - "houdini_cached_subsets"][creator_id].append(i) # noqa + # legacy instance + family_parm = node.parm("family") + if not family_parm: + # must be a broken instance + continue + + family = family_parm.eval() + cache_legacy.setdefault(family, []).append(node) + + shared_data["houdini_cached_subsets"] = cache + shared_data["houdini_cached_legacy_subsets"] = cache_legacy + return shared_data @staticmethod @@ -225,12 +225,12 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase): self._add_instance_to_context(created_instance) def update_instances(self, update_list): - for created_inst, _changes in update_list: + for created_inst, changes in update_list: instance_node = hou.node(created_inst.get("instance_node")) new_values = { - key: new_value - for key, (_old_value, new_value) in _changes.items() + key: changes[key].new_value + for key in changes.changed_keys } imprint( instance_node, @@ -276,3 +276,19 @@ class HoudiniCreator(NewCreator, HoudiniCreatorBase): color = hou.Color((0.616, 0.871, 0.769)) node.setUserData('nodeshape', shape) node.setColor(color) + + def get_network_categories(self): + """Return in which network view type this creator should show. + + The node type categories returned here will be used to define where + the creator will show up in the TAB search for nodes in Houdini's + Network View. + + This can be overridden in inherited classes to define where that + particular Creator should be visible in the TAB search. + + Returns: + list: List of houdini node type categories + + """ + return [hou.ropNodeTypeCategory()] diff --git a/openpype/hosts/houdini/api/shelves.py b/openpype/hosts/houdini/api/shelves.py index 3ccab964cd..6e0f367f62 100644 --- a/openpype/hosts/houdini/api/shelves.py +++ b/openpype/hosts/houdini/api/shelves.py @@ -1,4 +1,5 @@ import os +import re import logging import platform @@ -66,9 +67,9 @@ def generate_shelves(): ) continue - mandatory_attributes = {'name', 'script'} + mandatory_attributes = {'label', 'script'} for tool_definition in shelf_definition.get('tools_list'): - # We verify that the name and script attibutes of the tool + # We verify that the name and script attributes of the tool # are set if not all( tool_definition[key] for key in mandatory_attributes @@ -152,31 +153,32 @@ def get_or_create_tool(tool_definition, shelf): Returns: hou.Tool: The tool updated or the new one """ - existing_tools = shelf.tools() - tool_label = tool_definition.get('label') + tool_label = tool_definition.get("label") + if not tool_label: + log.warning("Skipped shelf without label") + return + + script_path = tool_definition["script"] + if not script_path or not os.path.exists(script_path): + log.warning("This path doesn't exist - {}".format(script_path)) + return + + existing_tools = shelf.tools() existing_tool = next( (tool for tool in existing_tools if tool.label() == tool_label), None ) + + with open(script_path) as stream: + script = stream.read() + + tool_definition["script"] = script + if existing_tool: - tool_definition.pop('name', None) - tool_definition.pop('label', None) + tool_definition.pop("label", None) existing_tool.setData(**tool_definition) return existing_tool - tool_name = tool_label.replace(' ', '_').lower() - - if not os.path.exists(tool_definition['script']): - log.warning( - "This path doesn't exist - {}".format(tool_definition['script']) - ) - return - - with open(tool_definition['script']) as f: - script = f.read() - tool_definition.update({'script': script}) - - new_tool = hou.shelves.newTool(name=tool_name, **tool_definition) - - return new_tool + tool_name = re.sub(r"[^\w\d]+", "_", tool_label).lower() + return hou.shelves.newTool(name=tool_name, **tool_definition) diff --git a/openpype/hosts/houdini/plugins/create/convert_legacy.py b/openpype/hosts/houdini/plugins/create/convert_legacy.py index 4b8041b4f5..e549c9dc26 100644 --- a/openpype/hosts/houdini/plugins/create/convert_legacy.py +++ b/openpype/hosts/houdini/plugins/create/convert_legacy.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""Convertor for legacy Houdini subsets.""" +"""Converter for legacy Houdini subsets.""" from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin from openpype.hosts.houdini.api.lib import imprint @@ -7,7 +7,7 @@ from openpype.hosts.houdini.api.lib import imprint class HoudiniLegacyConvertor(SubsetConvertorPlugin): """Find and convert any legacy subsets in the scene. - This Convertor will find all legacy subsets in the scene and will + This Converter will find all legacy subsets in the scene and will transform them to the current system. Since the old subsets doesn't retain any information about their original creators, the only mapping we can do is based on their families. diff --git a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py index fec64eb4a1..8c8a5e9eed 100644 --- a/openpype/hosts/houdini/plugins/create/create_alembic_camera.py +++ b/openpype/hosts/houdini/plugins/create/create_alembic_camera.py @@ -3,6 +3,8 @@ from openpype.hosts.houdini.api import plugin from openpype.pipeline import CreatedInstance, CreatorError +import hou + class CreateAlembicCamera(plugin.HoudiniCreator): """Single baked camera from Alembic ROP.""" @@ -47,3 +49,9 @@ class CreateAlembicCamera(plugin.HoudiniCreator): self.lock_parameters(instance_node, to_lock) instance_node.parm("trange").set(1) + + def get_network_categories(self): + return [ + hou.ropNodeTypeCategory(), + hou.objNodeTypeCategory() + ] diff --git a/openpype/hosts/houdini/plugins/create/create_arnold_rop.py b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py new file mode 100644 index 0000000000..bddf26dbd5 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_arnold_rop.py @@ -0,0 +1,71 @@ +from openpype.hosts.houdini.api import plugin +from openpype.lib import EnumDef + + +class CreateArnoldRop(plugin.HoudiniCreator): + """Arnold ROP""" + + identifier = "io.openpype.creators.houdini.arnold_rop" + label = "Arnold ROP" + family = "arnold_rop" + icon = "magic" + defaults = ["master"] + + # Default extension + ext = "exr" + + def create(self, subset_name, instance_data, pre_create_data): + import hou + + # Remove the active, we are checking the bypass flag of the nodes + instance_data.pop("active", None) + instance_data.update({"node_type": "arnold"}) + + # Add chunk size attribute + instance_data["chunkSize"] = 1 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateArnoldRop, self).create( + subset_name, + instance_data, + pre_create_data) # type: plugin.CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + ext = pre_create_data.get("image_format") + + filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + ext=ext, + ) + parms = { + # Render frame range + "trange": 1, + + # Arnold ROP settings + "ar_picture": filepath, + "ar_exr_half_precision": 1 # half precision + } + + instance_node.setParms(parms) + + # Lock any parameters in this list + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateArnoldRop, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default=self.ext, + label="Image Format Options") + ] diff --git a/openpype/hosts/houdini/plugins/create/create_composite.py b/openpype/hosts/houdini/plugins/create/create_composite.py index 45af2b0630..9d4f7969bb 100644 --- a/openpype/hosts/houdini/plugins/create/create_composite.py +++ b/openpype/hosts/houdini/plugins/create/create_composite.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- """Creator plugin for creating composite sequences.""" from openpype.hosts.houdini.api import plugin -from openpype.pipeline import CreatedInstance +from openpype.pipeline import CreatedInstance, CreatorError + +import hou class CreateCompositeSequence(plugin.HoudiniCreator): @@ -35,8 +37,20 @@ class CreateCompositeSequence(plugin.HoudiniCreator): "copoutput": filepath } + if self.selected_nodes: + if len(self.selected_nodes) > 1: + raise CreatorError("More than one item selected.") + path = self.selected_nodes[0].path() + parms["coppath"] = path + instance_node.setParms(parms) # Lock any parameters in this list to_lock = ["prim_to_detail_pattern"] self.lock_parameters(instance_node, to_lock) + + def get_network_categories(self): + return [ + hou.ropNodeTypeCategory(), + hou.cop2NodeTypeCategory() + ] diff --git a/openpype/hosts/houdini/plugins/create/create_karma_rop.py b/openpype/hosts/houdini/plugins/create/create_karma_rop.py new file mode 100644 index 0000000000..edfb992e1a --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_karma_rop.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +"""Creator plugin to create Karma ROP.""" +from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.lib import BoolDef, EnumDef, NumberDef + + +class CreateKarmaROP(plugin.HoudiniCreator): + """Karma ROP""" + identifier = "io.openpype.creators.houdini.karma_rop" + label = "Karma ROP" + family = "karma_rop" + icon = "magic" + defaults = ["master"] + + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa + + instance_data.pop("active", None) + instance_data.update({"node_type": "karma"}) + # Add chunk size attribute + instance_data["chunkSize"] = 10 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateKarmaROP, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + ext = pre_create_data.get("image_format") + + filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + ext=ext, + ) + checkpoint = "{cp_dir}{subset_name}.$F4.checkpoint".format( + cp_dir=hou.text.expandString("$HIP/pyblish/"), + subset_name=subset_name + ) + + usd_directory = "{usd_dir}{subset_name}_$RENDERID".format( + usd_dir=hou.text.expandString("$HIP/pyblish/renders/usd_renders/"), # noqa + subset_name=subset_name + ) + + parms = { + # Render Frame Range + "trange": 1, + # Karma ROP Setting + "picture": filepath, + # Karma Checkpoint Setting + "productName": checkpoint, + # USD Output Directory + "savetodirectory": usd_directory, + } + + res_x = pre_create_data.get("res_x") + res_y = pre_create_data.get("res_y") + + if self.selected_nodes: + # If camera found in selection + # we will use as render camera + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + has_camera = pre_create_data.get("cam_res") + if has_camera: + res_x = node.evalParm("resx") + res_y = node.evalParm("resy") + + if not camera: + self.log.warning("No render camera found in selection") + + parms.update({ + "camera": camera or "", + "resolutionx": res_x, + "resolutiony": res_y, + }) + + instance_node.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateKarmaROP, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default="exr", + label="Image Format Options"), + NumberDef("res_x", + label="width", + default=1920, + decimals=0), + NumberDef("res_y", + label="height", + default=720, + decimals=0), + BoolDef("cam_res", + label="Camera Resolution", + default=False) + ] diff --git a/openpype/hosts/houdini/plugins/create/create_mantra_rop.py b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py new file mode 100644 index 0000000000..5ca53e96de --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_mantra_rop.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +"""Creator plugin to create Mantra ROP.""" +from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.lib import EnumDef, BoolDef + + +class CreateMantraROP(plugin.HoudiniCreator): + """Mantra ROP""" + identifier = "io.openpype.creators.houdini.mantra_rop" + label = "Mantra ROP" + family = "mantra_rop" + icon = "magic" + defaults = ["master"] + + def create(self, subset_name, instance_data, pre_create_data): + import hou # noqa + + instance_data.pop("active", None) + instance_data.update({"node_type": "ifd"}) + # Add chunk size attribute + instance_data["chunkSize"] = 10 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateMantraROP, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + ext = pre_create_data.get("image_format") + + filepath = "{renders_dir}{subset_name}/{subset_name}.$F4.{ext}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + ext=ext, + ) + + parms = { + # Render Frame Range + "trange": 1, + # Mantra ROP Setting + "vm_picture": filepath, + } + + if self.selected_nodes: + # If camera found in selection + # we will use as render camera + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + camera = node.path() + + if not camera: + self.log.warning("No render camera found in selection") + + parms.update({"camera": camera or ""}) + + custom_res = pre_create_data.get("override_resolution") + if custom_res: + parms.update({"override_camerares": 1}) + instance_node.setParms(parms) + + # Lock some Avalon attributes + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateMantraROP, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default="exr", + label="Image Format Options"), + BoolDef("override_resolution", + label="Override Camera Resolution", + tooltip="Override the current camera " + "resolution, recommended for IPR.", + default=False) + ] diff --git a/openpype/hosts/houdini/plugins/create/create_pointcache.py b/openpype/hosts/houdini/plugins/create/create_pointcache.py index 6b6b277422..df74070fee 100644 --- a/openpype/hosts/houdini/plugins/create/create_pointcache.py +++ b/openpype/hosts/houdini/plugins/create/create_pointcache.py @@ -3,6 +3,8 @@ from openpype.hosts.houdini.api import plugin from openpype.pipeline import CreatedInstance +import hou + class CreatePointCache(plugin.HoudiniCreator): """Alembic ROP to pointcache""" @@ -49,3 +51,9 @@ class CreatePointCache(plugin.HoudiniCreator): # Lock any parameters in this list to_lock = ["prim_to_detail_pattern"] self.lock_parameters(instance_node, to_lock) + + def get_network_categories(self): + return [ + hou.ropNodeTypeCategory(), + hou.sopNodeTypeCategory() + ] diff --git a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py index 2cbe9bfda1..e14ff15bf8 100644 --- a/openpype/hosts/houdini/plugins/create/create_redshift_rop.py +++ b/openpype/hosts/houdini/plugins/create/create_redshift_rop.py @@ -1,7 +1,10 @@ # -*- coding: utf-8 -*- """Creator plugin to create Redshift ROP.""" +import hou # noqa + from openpype.hosts.houdini.api import plugin from openpype.pipeline import CreatedInstance +from openpype.lib import EnumDef class CreateRedshiftROP(plugin.HoudiniCreator): @@ -11,20 +14,16 @@ class CreateRedshiftROP(plugin.HoudiniCreator): family = "redshift_rop" icon = "magic" defaults = ["master"] + ext = "exr" def create(self, subset_name, instance_data, pre_create_data): - import hou # noqa instance_data.pop("active", None) instance_data.update({"node_type": "Redshift_ROP"}) # Add chunk size attribute instance_data["chunkSize"] = 10 - - # Clear the family prefix from the subset - subset = subset_name - subset_no_prefix = subset[len(self.family):] - subset_no_prefix = subset_no_prefix[0].lower() + subset_no_prefix[1:] - subset_name = subset_no_prefix + # Submit for job publishing + instance_data["farm"] = True instance = super(CreateRedshiftROP, self).create( subset_name, @@ -34,11 +33,10 @@ class CreateRedshiftROP(plugin.HoudiniCreator): instance_node = hou.node(instance.get("instance_node")) basename = instance_node.name() - instance_node.setName(basename + "_ROP", unique_name=True) # Also create the linked Redshift IPR Rop try: - ipr_rop = self.parent.createNode( + ipr_rop = instance_node.parent().createNode( "Redshift_IPR", node_name=basename + "_IPR" ) except hou.OperationFailed: @@ -50,19 +48,58 @@ class CreateRedshiftROP(plugin.HoudiniCreator): ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1)) # Set the linked rop to the Redshift ROP - ipr_rop.parm("linked_rop").set(ipr_rop.relativePathTo(instance)) + ipr_rop.parm("linked_rop").set(instance_node.path()) + + ext = pre_create_data.get("image_format") + filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + fmt="${aov}.$F4.{ext}".format(aov="AOV", ext=ext) + ) - prefix = '${HIP}/render/${HIPNAME}/`chs("subset")`.${AOV}.$F4.exr' parms = { # Render frame range "trange": 1, # Redshift ROP settings - "RS_outputFileNamePrefix": prefix, - "RS_outputMultilayerMode": 0, # no multi-layered exr + "RS_outputFileNamePrefix": filepath, + "RS_outputMultilayerMode": "1", # no multi-layered exr "RS_outputBeautyAOVSuffix": "beauty", } + + if self.selected_nodes: + # set up the render camera from the selected node + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + camera = node.path() + parms.update({ + "RS_renderCamera": camera or ""}) instance_node.setParms(parms) # Lock some Avalon attributes to_lock = ["family", "id"] self.lock_parameters(instance_node, to_lock) + + def remove_instances(self, instances): + for instance in instances: + node = instance.data.get("instance_node") + + ipr_node = hou.node(f"{node}_IPR") + if ipr_node: + ipr_node.destroy() + + return super(CreateRedshiftROP, self).remove_instances(instances) + + def get_pre_create_attr_defs(self): + attrs = super(CreateRedshiftROP, self).get_pre_create_attr_defs() + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default=self.ext, + label="Image Format Options") + ] diff --git a/openpype/hosts/houdini/plugins/create/create_review.py b/openpype/hosts/houdini/plugins/create/create_review.py new file mode 100644 index 0000000000..ab06b30c35 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_review.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating openGL reviews.""" +from openpype.hosts.houdini.api import plugin +from openpype.lib import EnumDef, BoolDef, NumberDef + + +class CreateReview(plugin.HoudiniCreator): + """Review with OpenGL ROP""" + + identifier = "io.openpype.creators.houdini.review" + label = "Review" + family = "review" + icon = "video-camera" + + def create(self, subset_name, instance_data, pre_create_data): + import hou + + instance_data.pop("active", None) + instance_data.update({"node_type": "opengl"}) + instance_data["imageFormat"] = pre_create_data.get("imageFormat") + instance_data["keepImages"] = pre_create_data.get("keepImages") + + instance = super(CreateReview, self).create( + subset_name, + instance_data, + pre_create_data) + + instance_node = hou.node(instance.get("instance_node")) + + frame_range = hou.playbar.frameRange() + + filepath = "{root}/{subset}/{subset}.$F4.{ext}".format( + root=hou.text.expandString("$HIP/pyblish"), + subset="`chs(\"subset\")`", # keep dynamic link to subset + ext=pre_create_data.get("image_format") or "png" + ) + + parms = { + "picture": filepath, + + "trange": 1, + + # Unlike many other ROP nodes the opengl node does not default + # to expression of $FSTART and $FEND so we preserve that behavior + # but do set the range to the frame range of the playbar + "f1": frame_range[0], + "f2": frame_range[1], + } + + override_resolution = pre_create_data.get("override_resolution") + if override_resolution: + parms.update({ + "tres": override_resolution, + "res1": pre_create_data.get("resx"), + "res2": pre_create_data.get("resy"), + "aspect": pre_create_data.get("aspect"), + }) + + if self.selected_nodes: + # The first camera found in selection we will use as camera + # Other node types we set in force objects + camera = None + force_objects = [] + for node in self.selected_nodes: + path = node.path() + if node.type().name() == "cam": + if camera: + continue + camera = path + else: + force_objects.append(path) + + if not camera: + self.log.warning("No camera found in selection.") + + parms.update({ + "camera": camera or "", + "scenepath": "/obj", + "forceobjects": " ".join(force_objects), + "vobjects": "" # clear candidate objects from '*' value + }) + + instance_node.setParms(parms) + + to_lock = ["id", "family"] + + self.lock_parameters(instance_node, to_lock) + + def get_pre_create_attr_defs(self): + attrs = super(CreateReview, self).get_pre_create_attr_defs() + + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + BoolDef("keepImages", + label="Keep Image Sequences", + default=False), + EnumDef("imageFormat", + image_format_enum, + default="png", + label="Image Format Options"), + BoolDef("override_resolution", + label="Override resolution", + tooltip="When disabled the resolution set on the camera " + "is used instead.", + default=True), + NumberDef("resx", + label="Resolution Width", + default=1280, + minimum=2, + decimals=0), + NumberDef("resy", + label="Resolution Height", + default=720, + minimum=2, + decimals=0), + NumberDef("aspect", + label="Aspect Ratio", + default=1.0, + minimum=0.0001, + decimals=3) + ] diff --git a/openpype/hosts/houdini/plugins/create/create_usd.py b/openpype/hosts/houdini/plugins/create/create_usd.py index 51ed8237c5..e05d254863 100644 --- a/openpype/hosts/houdini/plugins/create/create_usd.py +++ b/openpype/hosts/houdini/plugins/create/create_usd.py @@ -3,6 +3,8 @@ from openpype.hosts.houdini.api import plugin from openpype.pipeline import CreatedInstance +import hou + class CreateUSD(plugin.HoudiniCreator): """Universal Scene Description""" @@ -13,7 +15,6 @@ class CreateUSD(plugin.HoudiniCreator): enabled = False def create(self, subset_name, instance_data, pre_create_data): - import hou # noqa instance_data.pop("active", None) instance_data.update({"node_type": "usd"}) @@ -43,3 +44,9 @@ class CreateUSD(plugin.HoudiniCreator): "id", ] self.lock_parameters(instance_node, to_lock) + + def get_network_categories(self): + return [ + hou.ropNodeTypeCategory(), + hou.lopNodeTypeCategory() + ] diff --git a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py index 1a5011745f..c015cebd49 100644 --- a/openpype/hosts/houdini/plugins/create/create_vbd_cache.py +++ b/openpype/hosts/houdini/plugins/create/create_vbd_cache.py @@ -3,6 +3,8 @@ from openpype.hosts.houdini.api import plugin from openpype.pipeline import CreatedInstance +import hou + class CreateVDBCache(plugin.HoudiniCreator): """OpenVDB from Geometry ROP""" @@ -34,3 +36,9 @@ class CreateVDBCache(plugin.HoudiniCreator): parms["soppath"] = self.selected_nodes[0].path() instance_node.setParms(parms) + + def get_network_categories(self): + return [ + hou.ropNodeTypeCategory(), + hou.sopNodeTypeCategory() + ] diff --git a/openpype/hosts/houdini/plugins/create/create_vray_rop.py b/openpype/hosts/houdini/plugins/create/create_vray_rop.py new file mode 100644 index 0000000000..1de9be4ed6 --- /dev/null +++ b/openpype/hosts/houdini/plugins/create/create_vray_rop.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +"""Creator plugin to create VRay ROP.""" +import hou + +from openpype.hosts.houdini.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.lib import EnumDef, BoolDef + + +class CreateVrayROP(plugin.HoudiniCreator): + """VRay ROP""" + + identifier = "io.openpype.creators.houdini.vray_rop" + label = "VRay ROP" + family = "vray_rop" + icon = "magic" + defaults = ["master"] + + ext = "exr" + + def create(self, subset_name, instance_data, pre_create_data): + + instance_data.pop("active", None) + instance_data.update({"node_type": "vray_renderer"}) + # Add chunk size attribute + instance_data["chunkSize"] = 10 + # Submit for job publishing + instance_data["farm"] = True + + instance = super(CreateVrayROP, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + + instance_node = hou.node(instance.get("instance_node")) + + # Add IPR for Vray + basename = instance_node.name() + try: + ipr_rop = instance_node.parent().createNode( + "vray", node_name=basename + "_IPR" + ) + except hou.OperationFailed: + raise plugin.OpenPypeCreatorError( + "Cannot create Vray render node. " + "Make sure Vray installed and enabled!" + ) + + ipr_rop.setPosition(instance_node.position() + hou.Vector2(0, -1)) + ipr_rop.parm("rop").set(instance_node.path()) + + parms = { + "trange": 1, + "SettingsEXR_bits_per_channel": "16" # half precision + } + + if self.selected_nodes: + # set up the render camera from the selected node + camera = None + for node in self.selected_nodes: + if node.type().name() == "cam": + camera = node.path() + parms.update({ + "render_camera": camera or "" + }) + + # Enable render element + ext = pre_create_data.get("image_format") + instance_data["RenderElement"] = pre_create_data.get("render_element_enabled") # noqa + if pre_create_data.get("render_element_enabled", True): + # Vray has its own tag for AOV file output + filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + fmt="${aov}.$F4.{ext}".format(aov="AOV", + ext=ext) + ) + filepath = "{}{}".format( + hou.text.expandString("$HIP/pyblish/renders/"), + "{}/{}.${}.$F4.{}".format(subset_name, + subset_name, + "AOV", + ext) + ) + re_rop = instance_node.parent().createNode( + "vray_render_channels", + node_name=basename + "_render_element" + ) + # move the render element node next to the vray renderer node + re_rop.setPosition(instance_node.position() + hou.Vector2(0, 1)) + re_path = re_rop.path() + parms.update({ + "use_render_channels": 1, + "SettingsOutput_img_file_path": filepath, + "render_network_render_channels": re_path + }) + + else: + filepath = "{renders_dir}{subset_name}/{subset_name}.{fmt}".format( + renders_dir=hou.text.expandString("$HIP/pyblish/renders/"), + subset_name=subset_name, + fmt="$F4.{ext}".format(ext=ext) + ) + parms.update({ + "use_render_channels": 0, + "SettingsOutput_img_file_path": filepath + }) + + custom_res = pre_create_data.get("override_resolution") + if custom_res: + parms.update({"override_camerares": 1}) + + instance_node.setParms(parms) + + # lock parameters from AVALON + to_lock = ["family", "id"] + self.lock_parameters(instance_node, to_lock) + + def remove_instances(self, instances): + for instance in instances: + node = instance.data.get("instance_node") + # for the extra render node from the plugins + # such as vray and redshift + ipr_node = hou.node("{}{}".format(node, "_IPR")) + if ipr_node: + ipr_node.destroy() + re_node = hou.node("{}{}".format(node, + "_render_element")) + if re_node: + re_node.destroy() + + return super(CreateVrayROP, self).remove_instances(instances) + + def get_pre_create_attr_defs(self): + attrs = super(CreateVrayROP, self).get_pre_create_attr_defs() + image_format_enum = [ + "bmp", "cin", "exr", "jpg", "pic", "pic.gz", "png", + "rad", "rat", "rta", "sgi", "tga", "tif", + ] + + return attrs + [ + EnumDef("image_format", + image_format_enum, + default=self.ext, + label="Image Format Options"), + BoolDef("override_resolution", + label="Override Camera Resolution", + tooltip="Override the current camera " + "resolution, recommended for IPR.", + default=False), + BoolDef("render_element_enabled", + label="Render Element", + tooltip="Create Render Element Node " + "if enabled", + default=False) + ] diff --git a/openpype/hosts/houdini/plugins/create/create_workfile.py b/openpype/hosts/houdini/plugins/create/create_workfile.py index 0c6d840810..1a8537adcd 100644 --- a/openpype/hosts/houdini/plugins/create/create_workfile.py +++ b/openpype/hosts/houdini/plugins/create/create_workfile.py @@ -14,7 +14,7 @@ class CreateWorkfile(plugin.HoudiniCreatorBase, AutoCreator): identifier = "io.openpype.creators.houdini.workfile" label = "Workfile" family = "workfile" - icon = "document" + icon = "fa5.file" default_variant = "Main" diff --git a/openpype/hosts/houdini/plugins/load/load_alembic.py b/openpype/hosts/houdini/plugins/load/load_alembic.py index 96e666b255..c6f0ebf2f9 100644 --- a/openpype/hosts/houdini/plugins/load/load_alembic.py +++ b/openpype/hosts/houdini/plugins/load/load_alembic.py @@ -104,3 +104,6 @@ class AbcLoader(load.LoaderPlugin): node = container["node"] node.destroy() + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_alembic_archive.py b/openpype/hosts/houdini/plugins/load/load_alembic_archive.py index b960073e12..47d2e1b896 100644 --- a/openpype/hosts/houdini/plugins/load/load_alembic_archive.py +++ b/openpype/hosts/houdini/plugins/load/load_alembic_archive.py @@ -73,3 +73,6 @@ class AbcArchiveLoader(load.LoaderPlugin): node = container["node"] node.destroy() + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_bgeo.py b/openpype/hosts/houdini/plugins/load/load_bgeo.py index b298d423bc..86e8675c02 100644 --- a/openpype/hosts/houdini/plugins/load/load_bgeo.py +++ b/openpype/hosts/houdini/plugins/load/load_bgeo.py @@ -106,3 +106,6 @@ class BgeoLoader(load.LoaderPlugin): node = container["node"] node.destroy() + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_camera.py b/openpype/hosts/houdini/plugins/load/load_camera.py index 059ad11a76..6365508f4e 100644 --- a/openpype/hosts/houdini/plugins/load/load_camera.py +++ b/openpype/hosts/houdini/plugins/load/load_camera.py @@ -192,3 +192,6 @@ class CameraLoader(load.LoaderPlugin): new_node.moveToGoodPosition() return new_node + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_image.py b/openpype/hosts/houdini/plugins/load/load_image.py index c78798e58a..26bc569c53 100644 --- a/openpype/hosts/houdini/plugins/load/load_image.py +++ b/openpype/hosts/houdini/plugins/load/load_image.py @@ -125,3 +125,6 @@ class ImageLoader(load.LoaderPlugin): prefix, padding, suffix = first_fname.rsplit(".", 2) fname = ".".join([prefix, "$F{}".format(len(padding)), suffix]) return os.path.join(root, fname).replace("\\", "/") + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_usd_layer.py b/openpype/hosts/houdini/plugins/load/load_usd_layer.py index 2e5079925b..1f0ec25128 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_layer.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_layer.py @@ -79,3 +79,6 @@ class USDSublayerLoader(load.LoaderPlugin): node = container["node"] node.destroy() + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_usd_reference.py b/openpype/hosts/houdini/plugins/load/load_usd_reference.py index c4371db39b..f66d05395e 100644 --- a/openpype/hosts/houdini/plugins/load/load_usd_reference.py +++ b/openpype/hosts/houdini/plugins/load/load_usd_reference.py @@ -79,3 +79,6 @@ class USDReferenceLoader(load.LoaderPlugin): node = container["node"] node.destroy() + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/load/load_vdb.py b/openpype/hosts/houdini/plugins/load/load_vdb.py index c558a7a0e7..87900502c5 100644 --- a/openpype/hosts/houdini/plugins/load/load_vdb.py +++ b/openpype/hosts/houdini/plugins/load/load_vdb.py @@ -102,3 +102,6 @@ class VdbLoader(load.LoaderPlugin): node = container["node"] node.destroy() + + def switch(self, container, representation): + self.update(container, representation) diff --git a/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py new file mode 100644 index 0000000000..614785487f --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_arnold_rop.py @@ -0,0 +1,135 @@ +import os +import re + +import hou +import pyblish.api + +from openpype.hosts.houdini.api import colorspace +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, get_color_management_preferences) + + +class CollectArnoldROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Arnold ROP Render Products + + Collects the instance.data["files"] for the render products. + + Provides: + instance -> files + + """ + + label = "Arnold ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["arnold_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "ar_picture") + render_products = [] + + # Default beauty AOV + beauty_product = self.get_render_product_name(prefix=default_prefix, + suffix=None) + render_products.append(beauty_product) + + files_by_aov = { + "": self.generate_expected_files(instance, beauty_product) + } + + num_aovs = rop.evalParm("ar_aovs") + for index in range(1, num_aovs + 1): + # Skip disabled AOVs + if not rop.evalParm("ar_enable_aovP{}".format(index)): + continue + + if rop.evalParm("ar_aov_exr_enable_layer_name{}".format(index)): + label = rop.evalParm("ar_aov_exr_layer_name{}".format(index)) + else: + label = evalParmNoFrame(rop, "ar_aov_label{}".format(index)) + + aov_product = self.get_render_product_name(default_prefix, + suffix=label) + render_products.append(aov_product) + files_by_aov[label] = self.generate_expected_files(instance, + aov_product) + + for product in render_products: + self.log.debug("Found render product: {}".format(product)) + + instance.data["files"] = list(render_products) + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_render_product_name(self, prefix, suffix): + """Return the output filename using the AOV prefix and suffix""" + + # When AOV is explicitly defined in prefix we just swap it out + # directly with the AOV suffix to embed it. + # Note: ${AOV} seems to be evaluated in the parameter as %AOV% + if "%AOV%" in prefix: + # It seems that when some special separator characters are present + # before the %AOV% token that Redshift will secretly remove it if + # there is no suffix for the current product, for example: + # foo_%AOV% -> foo.exr + pattern = "%AOV%" if suffix else "[._-]?%AOV%" + product_name = re.sub(pattern, + suffix, + prefix, + flags=re.IGNORECASE) + else: + if suffix: + # Add ".{suffix}" before the extension + prefix_base, ext = os.path.splitext(prefix) + product_name = prefix_base + "." + suffix + ext + else: + product_name = prefix + + return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_current_file.py b/openpype/hosts/houdini/plugins/publish/collect_current_file.py index 9cca07fdc7..7b55778803 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/collect_current_file.py @@ -1,19 +1,17 @@ import os import hou -from openpype.pipeline import legacy_io import pyblish.api -class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin): +class CollectHoudiniCurrentFile(pyblish.api.ContextPlugin): """Inject the current working file into context""" - order = pyblish.api.CollectorOrder - 0.01 + order = pyblish.api.CollectorOrder - 0.1 label = "Houdini Current File" hosts = ["houdini"] - family = ["workfile"] - def process(self, instance): + def process(self, context): """Inject the current working file""" current_file = hou.hipFile.path() @@ -21,7 +19,7 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin): # By default, Houdini will even point a new scene to a path. # However if the file is not saved at all and does not exist, # we assume the user never set it. - filepath = "" + current_file = "" elif os.path.basename(current_file) == "untitled.hip": # Due to even a new file being called 'untitled.hip' we are unable @@ -35,26 +33,5 @@ class CollectHoudiniCurrentFile(pyblish.api.InstancePlugin): "saved correctly." ) - instance.context.data["currentFile"] = current_file - - folder, file = os.path.split(current_file) - filename, ext = os.path.splitext(file) - - instance.data.update({ - "setMembers": [current_file], - "frameStart": instance.context.data['frameStart'], - "frameEnd": instance.context.data['frameEnd'], - "handleStart": instance.context.data['handleStart'], - "handleEnd": instance.context.data['handleEnd'] - }) - - instance.data['representations'] = [{ - 'name': ext.lstrip("."), - 'ext': ext.lstrip("."), - 'files': file, - "stagingDir": folder, - }] - - self.log.info('Collected instance: {}'.format(file)) - self.log.info('Scene path: {}'.format(current_file)) - self.log.info('staging Dir: {}'.format(folder)) + context.data["currentFile"] = current_file + self.log.info('Current workfile path: {}'.format(current_file)) diff --git a/openpype/hosts/houdini/plugins/publish/collect_frames.py b/openpype/hosts/houdini/plugins/publish/collect_frames.py index 531cdf1249..91a3d9d170 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_frames.py +++ b/openpype/hosts/houdini/plugins/publish/collect_frames.py @@ -8,19 +8,16 @@ import pyblish.api from openpype.hosts.houdini.api import lib - class CollectFrames(pyblish.api.InstancePlugin): """Collect all frames which would be saved from the ROP nodes""" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.01 label = "Collect Frames" - families = ["vdbcache", "imagesequence", "ass", "redshiftproxy"] + families = ["vdbcache", "imagesequence", "ass", "redshiftproxy", "review"] def process(self, instance): ropnode = hou.node(instance.data["instance_node"]) - frame_data = lib.get_frame_data(ropnode) - instance.data.update(frame_data) start_frame = instance.data.get("frameStart", None) end_frame = instance.data.get("frameEnd", None) @@ -34,8 +31,10 @@ class CollectFrames(pyblish.api.InstancePlugin): self.log.warning("Using current frame: {}".format(hou.frame())) output = output_parm.eval() - _, ext = lib.splitext(output, - allowed_multidot_extensions=[".ass.gz"]) + _, ext = lib.splitext( + output, + allowed_multidot_extensions=[".ass.gz"] + ) file_name = os.path.basename(output) result = file_name diff --git a/openpype/hosts/houdini/plugins/publish/collect_inputs.py b/openpype/hosts/houdini/plugins/publish/collect_inputs.py index 9ee0248bd9..e92a42f2e8 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_inputs.py +++ b/openpype/hosts/houdini/plugins/publish/collect_inputs.py @@ -1,5 +1,3 @@ -from bson.objectid import ObjectId - import pyblish.api from openpype.pipeline import registered_host @@ -106,7 +104,7 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): # If no valid output node is set then ignore it as validation # will be checking those cases. self.log.debug( - "No output node found, skipping " "collecting of inputs.." + "No output node found, skipping collecting of inputs.." ) return @@ -117,7 +115,6 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): # Collect containers for the given set of nodes containers = collect_input_containers(nodes) - inputs = [ObjectId(c["representation"]) for c in containers] + inputs = [c["representation"] for c in containers] instance.data["inputRepresentations"] = inputs - - self.log.info("Collected inputs: %s" % inputs) + self.log.debug("Collected inputs: %s" % inputs) diff --git a/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py b/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py new file mode 100644 index 0000000000..584343cd64 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_instance_frame_data.py @@ -0,0 +1,56 @@ +import hou + +import pyblish.api + + +class CollectInstanceNodeFrameRange(pyblish.api.InstancePlugin): + """Collect time range frame data for the instance node.""" + + order = pyblish.api.CollectorOrder + 0.001 + label = "Instance Node Frame Range" + hosts = ["houdini"] + + def process(self, instance): + + node_path = instance.data.get("instance_node") + node = hou.node(node_path) if node_path else None + if not node_path or not node: + self.log.debug("No instance node found for instance: " + "{}".format(instance)) + return + + frame_data = self.get_frame_data(node) + if not frame_data: + return + + self.log.info("Collected time data: {}".format(frame_data)) + instance.data.update(frame_data) + + def get_frame_data(self, node): + """Get the frame data: start frame, end frame and steps + Args: + node(hou.Node) + + Returns: + dict + + """ + + data = {} + + if node.parm("trange") is None: + self.log.debug("Node has no 'trange' parameter: " + "{}".format(node.path())) + return data + + if node.evalParm("trange") == 0: + # Ignore 'render current frame' + self.log.debug("Node '{}' has 'Render current frame' set. " + "Time range data ignored.".format(node.path())) + return data + + data["frameStart"] = node.evalParm("f1") + data["frameEnd"] = node.evalParm("f2") + data["byFrameStep"] = node.evalParm("f3") + + return data diff --git a/openpype/hosts/houdini/plugins/publish/collect_instances.py b/openpype/hosts/houdini/plugins/publish/collect_instances.py index bb85630552..3772c9e705 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_instances.py +++ b/openpype/hosts/houdini/plugins/publish/collect_instances.py @@ -55,7 +55,9 @@ class CollectInstances(pyblish.api.ContextPlugin): has_family = node.evalParm("family") assert has_family, "'%s' is missing 'family'" % node.name() - self.log.info("processing {}".format(node)) + self.log.info( + "Processing legacy instance node {}".format(node.path()) + ) data = lib.read(node) # Check bypass state and reverse @@ -68,16 +70,10 @@ class CollectInstances(pyblish.api.ContextPlugin): if "active" in data: data["publish"] = data["active"] - data.update(self.get_frame_data(node)) - # Create nice name if the instance has a frame range. label = data.get("name", node.name()) label += " (%s)" % data["asset"] # include asset in name - if "frameStart" in data and "frameEnd" in data: - frames = "[{frameStart} - {frameEnd}]".format(**data) - label = "{} {}".format(label, frames) - instance = context.create_instance(label) # Include `families` using `family` data @@ -116,6 +112,6 @@ class CollectInstances(pyblish.api.ContextPlugin): data["frameStart"] = node.evalParm("f1") data["frameEnd"] = node.evalParm("f2") - data["steps"] = node.evalParm("f3") + data["byFrameStep"] = node.evalParm("f3") return data diff --git a/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py new file mode 100644 index 0000000000..eabb1128d8 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_karma_rop.py @@ -0,0 +1,104 @@ +import re +import os + +import hou +import pyblish.api + +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) + + +class CollectKarmaROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Karma Render Products + + Collects the instance.data["files"] for the multipart render product. + + Provides: + instance -> files + + """ + + label = "Karma ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["karma_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "picture") + render_products = [] + + # Default beauty AOV + beauty_product = self.get_render_product_name( + prefix=default_prefix, suffix=None + ) + render_products.append(beauty_product) + + files_by_aov = { + "beauty": self.generate_expected_files(instance, + beauty_product) + } + + filenames = list(render_products) + instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + for product in render_products: + self.log.debug("Found render product: %s" % product) + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_render_product_name(self, prefix, suffix): + product_name = prefix + if suffix: + # Add ".{suffix}" before the extension + prefix_base, ext = os.path.splitext(prefix) + product_name = "{}.{}{}".format(prefix_base, suffix, ext) + + return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py new file mode 100644 index 0000000000..c4460f5350 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_mantra_rop.py @@ -0,0 +1,127 @@ +import re +import os + +import hou +import pyblish.api + +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) + + +class CollectMantraROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Mantra Render Products + + Collects the instance.data["files"] for the render products. + + Provides: + instance -> files + + """ + + label = "Mantra ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["mantra_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "vm_picture") + render_products = [] + + # Default beauty AOV + beauty_product = self.get_render_product_name( + prefix=default_prefix, suffix=None + ) + render_products.append(beauty_product) + + files_by_aov = { + "beauty": self.generate_expected_files(instance, + beauty_product) + } + + aov_numbers = rop.evalParm("vm_numaux") + if aov_numbers > 0: + # get the filenames of the AOVs + for i in range(1, aov_numbers + 1): + var = rop.evalParm("vm_variable_plane%d" % i) + if var: + aov_name = "vm_filename_plane%d" % i + aov_boolean = "vm_usefile_plane%d" % i + aov_enabled = rop.evalParm(aov_boolean) + has_aov_path = rop.evalParm(aov_name) + if has_aov_path and aov_enabled == 1: + aov_prefix = evalParmNoFrame(rop, aov_name) + aov_product = self.get_render_product_name( + prefix=aov_prefix, suffix=None + ) + render_products.append(aov_product) + + files_by_aov[var] = self.generate_expected_files(instance, aov_product) # noqa + + for product in render_products: + self.log.debug("Found render product: %s" % product) + + filenames = list(render_products) + instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_render_product_name(self, prefix, suffix): + product_name = prefix + if suffix: + # Add ".{suffix}" before the extension + prefix_base, ext = os.path.splitext(prefix) + product_name = prefix_base + "." + suffix + ext + + return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py index f1d73d7523..dbb15ab88f 100644 --- a/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py +++ b/openpype/hosts/houdini/plugins/publish/collect_redshift_rop.py @@ -4,52 +4,13 @@ import os import hou import pyblish.api - -def get_top_referenced_parm(parm): - - processed = set() # disallow infinite loop - while True: - if parm.path() in processed: - raise RuntimeError("Parameter references result in cycle.") - - processed.add(parm.path()) - - ref = parm.getReferencedParm() - if ref.path() == parm.path(): - # It returns itself when it doesn't reference - # another parameter - return ref - else: - parm = ref - - -def evalParmNoFrame(node, parm, pad_character="#"): - - parameter = node.parm(parm) - assert parameter, "Parameter does not exist: %s.%s" % (node, parm) - - # If the parameter has a parameter reference, then get that - # parameter instead as otherwise `unexpandedString()` fails. - parameter = get_top_referenced_parm(parameter) - - # Substitute out the frame numbering with padded characters - try: - raw = parameter.unexpandedString() - except hou.Error as exc: - print("Failed: %s" % parameter) - raise RuntimeError(exc) - - def replace(match): - padding = 1 - n = match.group(2) - if n and int(n): - padding = int(n) - return pad_character * padding - - expression = re.sub(r"(\$F([0-9]*))", replace, raw) - - with hou.ScriptEvalContext(parameter): - return hou.expandStringAtFrame(expression, 0) +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): @@ -87,6 +48,9 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): prefix=default_prefix, suffix=beauty_suffix ) render_products.append(beauty_product) + files_by_aov = { + "_": self.generate_expected_files(instance, + beauty_product)} num_aovs = rop.evalParm("RS_aov") for index in range(num_aovs): @@ -104,11 +68,29 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): aov_product = self.get_render_product_name(aov_prefix, aov_suffix) render_products.append(aov_product) + files_by_aov[aov_suffix] = self.generate_expected_files(instance, + aov_product) # noqa + for product in render_products: self.log.debug("Found render product: %s" % product) filenames = list(render_products) instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] def get_render_product_name(self, prefix, suffix): """Return the output filename using the AOV prefix and suffix""" @@ -133,3 +115,27 @@ class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin): product_name = prefix return product_name + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_review_data.py b/openpype/hosts/houdini/plugins/publish/collect_review_data.py new file mode 100644 index 0000000000..3efb75e66c --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_review_data.py @@ -0,0 +1,55 @@ +import hou +import pyblish.api + + +class CollectHoudiniReviewData(pyblish.api.InstancePlugin): + """Collect Review Data.""" + + label = "Collect Review Data" + order = pyblish.api.CollectorOrder + 0.1 + hosts = ["houdini"] + families = ["review"] + + def process(self, instance): + + # This fixes the burnin having the incorrect start/end timestamps + # because without this it would take it from the context instead + # which isn't the actual frame range that this instance renders. + instance.data["handleStart"] = 0 + instance.data["handleEnd"] = 0 + instance.data["fps"] = instance.context.data["fps"] + + # Enable ftrack functionality + instance.data.setdefault("families", []).append('ftrack') + + # Get the camera from the rop node to collect the focal length + ropnode_path = instance.data["instance_node"] + ropnode = hou.node(ropnode_path) + + camera_path = ropnode.parm("camera").eval() + camera_node = hou.node(camera_path) + if not camera_node: + self.log.warning("No valid camera node found on review node: " + "{}".format(camera_path)) + return + + # Collect focal length. + focal_length_parm = camera_node.parm("focal") + if not focal_length_parm: + self.log.warning("No 'focal' (focal length) parameter found on " + "camera: {}".format(camera_path)) + return + + if focal_length_parm.isTimeDependent(): + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + 1 + focal_length = [ + focal_length_parm.evalAsFloatAtFrame(t) + for t in range(int(start), int(end)) + ] + else: + focal_length = focal_length_parm.evalAsFloat() + + # Store focal length in `burninDataMembers` + burnin_members = instance.data.setdefault("burninDataMembers", {}) + burnin_members["focalLength"] = focal_length diff --git a/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py new file mode 100644 index 0000000000..2a6be6b9f1 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_rop_frame_range.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +"""Collector plugin for frames data on ROP instances.""" +import hou # noqa +import pyblish.api +from openpype.hosts.houdini.api import lib + + +class CollectRopFrameRange(pyblish.api.InstancePlugin): + """Collect all frames which would be saved from the ROP nodes""" + + order = pyblish.api.CollectorOrder + label = "Collect RopNode Frame Range" + + def process(self, instance): + + node_path = instance.data.get("instance_node") + if node_path is None: + # Instance without instance node like a workfile instance + return + + ropnode = hou.node(node_path) + frame_data = lib.get_frame_data(ropnode) + + if "frameStart" in frame_data and "frameEnd" in frame_data: + + # Log artist friendly message about the collected frame range + message = ( + "Frame range {0[frameStart]} - {0[frameEnd]}" + ).format(frame_data) + if frame_data.get("step", 1.0) != 1.0: + message += " with step {0[step]}".format(frame_data) + self.log.info(message) + + instance.data.update(frame_data) + + # Add frame range to label if the instance has a frame range. + label = instance.data.get("label", instance.data["name"]) + instance.data["label"] = ( + "{0} [{1[frameStart]} - {1[frameEnd]}]".format(label, + frame_data) + ) diff --git a/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py new file mode 100644 index 0000000000..d4fe37f993 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_vray_rop.py @@ -0,0 +1,129 @@ +import re +import os + +import hou +import pyblish.api + +from openpype.hosts.houdini.api.lib import ( + evalParmNoFrame, + get_color_management_preferences +) +from openpype.hosts.houdini.api import ( + colorspace +) + + +class CollectVrayROPRenderProducts(pyblish.api.InstancePlugin): + """Collect Vray Render Products + + Collects the instance.data["files"] for the render products. + + Provides: + instance -> files + + """ + + label = "VRay ROP Render Products" + order = pyblish.api.CollectorOrder + 0.4 + hosts = ["houdini"] + families = ["vray_rop"] + + def process(self, instance): + + rop = hou.node(instance.data.get("instance_node")) + + # Collect chunkSize + chunk_size_parm = rop.parm("chunkSize") + if chunk_size_parm: + chunk_size = int(chunk_size_parm.eval()) + instance.data["chunkSize"] = chunk_size + self.log.debug("Chunk Size: %s" % chunk_size) + + default_prefix = evalParmNoFrame(rop, "SettingsOutput_img_file_path") + render_products = [] + # TODO: add render elements if render element + + beauty_product = self.get_beauty_render_product(default_prefix) + render_products.append(beauty_product) + files_by_aov = { + "RGB Color": self.generate_expected_files(instance, + beauty_product)} + + if instance.data.get("RenderElement", True): + render_element = self.get_render_element_name(rop, default_prefix) + if render_element: + for aov, renderpass in render_element.items(): + render_products.append(renderpass) + files_by_aov[aov] = self.generate_expected_files(instance, renderpass) # noqa + + for product in render_products: + self.log.debug("Found render product: %s" % product) + filenames = list(render_products) + instance.data["files"] = filenames + instance.data["renderProducts"] = colorspace.ARenderProduct() + + # For now by default do NOT try to publish the rendered output + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] # stub required data + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["expectedFiles"].append(files_by_aov) + self.log.debug("expectedFiles:{}".format(files_by_aov)) + + # update the colorspace data + colorspace_data = get_color_management_preferences() + instance.data["colorspaceConfig"] = colorspace_data["config"] + instance.data["colorspaceDisplay"] = colorspace_data["display"] + instance.data["colorspaceView"] = colorspace_data["view"] + + def get_beauty_render_product(self, prefix, suffix=""): + """Return the beauty output filename if render element enabled + """ + aov_parm = ".{}".format(suffix) + beauty_product = None + if aov_parm in prefix: + beauty_product = prefix.replace(aov_parm, "") + else: + beauty_product = prefix + + return beauty_product + + def get_render_element_name(self, node, prefix, suffix=""): + """Return the output filename using the AOV prefix and suffix + """ + render_element_dict = {} + # need a rewrite + re_path = node.evalParm("render_network_render_channels") + if re_path: + node_children = hou.node(re_path).children() + for element in node_children: + if element.shaderName() != "vray:SettingsRenderChannels": + aov = str(element) + render_product = prefix.replace(suffix, aov) + render_element_dict[aov] = render_product + return render_element_dict + + def generate_expected_files(self, instance, path): + """Create expected files in instance data""" + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + def replace(match): + return "%0{}d".format(len(match.group())) + + file = re.sub("#+", replace, file) + + if "%" not in file: + return path + + expected_files = [] + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + for i in range(int(start), (int(end) + 1)): + expected_files.append( + os.path.join(dir, (file % i)).replace("\\", "/")) + + return expected_files diff --git a/openpype/hosts/houdini/plugins/publish/collect_workfile.py b/openpype/hosts/houdini/plugins/publish/collect_workfile.py new file mode 100644 index 0000000000..aa533bcf1b --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/collect_workfile.py @@ -0,0 +1,35 @@ +import os + +import pyblish.api + + +class CollectWorkfile(pyblish.api.InstancePlugin): + """Inject workfile representation into instance""" + + order = pyblish.api.CollectorOrder - 0.01 + label = "Houdini Workfile Data" + hosts = ["houdini"] + families = ["workfile"] + + def process(self, instance): + + current_file = instance.context.data["currentFile"] + folder, file = os.path.split(current_file) + filename, ext = os.path.splitext(file) + + instance.data.update({ + "setMembers": [current_file], + "frameStart": instance.context.data['frameStart'], + "frameEnd": instance.context.data['frameEnd'], + "handleStart": instance.context.data['handleStart'], + "handleEnd": instance.context.data['handleEnd'] + }) + + instance.data['representations'] = [{ + 'name': ext.lstrip("."), + 'ext': ext.lstrip("."), + 'files': file, + "stagingDir": folder, + }] + + self.log.debug('Collected workfile instance: {}'.format(file)) diff --git a/openpype/hosts/houdini/plugins/publish/extract_opengl.py b/openpype/hosts/houdini/plugins/publish/extract_opengl.py new file mode 100644 index 0000000000..6c36dec5f5 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/extract_opengl.py @@ -0,0 +1,51 @@ +import os + +import pyblish.api + +from openpype.pipeline import publish +from openpype.hosts.houdini.api.lib import render_rop + +import hou + + +class ExtractOpenGL(publish.Extractor): + + order = pyblish.api.ExtractorOrder - 0.01 + label = "Extract OpenGL" + families = ["review"] + hosts = ["houdini"] + + def process(self, instance): + ropnode = hou.node(instance.data.get("instance_node")) + + output = ropnode.evalParm("picture") + staging_dir = os.path.normpath(os.path.dirname(output)) + instance.data["stagingDir"] = staging_dir + file_name = os.path.basename(output) + + self.log.info("Extracting '%s' to '%s'" % (file_name, + staging_dir)) + + render_rop(ropnode) + + output = instance.data["frames"] + + tags = ["review"] + if not instance.data.get("keepImages"): + tags.append("delete") + + representation = { + "name": instance.data["imageFormat"], + "ext": instance.data["imageFormat"], + "files": output, + "stagingDir": staging_dir, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "tags": tags, + "preview": True, + "camera_name": instance.data.get("review_camera") + } + + if "representations" not in instance.data: + instance.data["representations"] = [] + instance.data["representations"].append(representation) diff --git a/openpype/hosts/houdini/plugins/publish/help/validate_vdb_input_node.xml b/openpype/hosts/houdini/plugins/publish/help/validate_vdb_input_node.xml deleted file mode 100644 index 0f92560bf7..0000000000 --- a/openpype/hosts/houdini/plugins/publish/help/validate_vdb_input_node.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - -Scene setting - -## Invalid input node - -VDB input must have the same number of VDBs, points, primitives and vertices as output. - - - -### __Detailed Info__ (optional) - -A VDB is an inherited type of Prim, holds the following data: - - Primitives: 1 - - Points: 1 - - Vertices: 1 - - VDBs: 1 - - - \ No newline at end of file diff --git a/openpype/hosts/houdini/plugins/publish/help/validate_vdb_output_node.xml b/openpype/hosts/houdini/plugins/publish/help/validate_vdb_output_node.xml new file mode 100644 index 0000000000..eb83bfffe3 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/help/validate_vdb_output_node.xml @@ -0,0 +1,28 @@ + + + +Invalid VDB + +## Invalid VDB output + +All primitives of the output geometry must be VDBs, no other primitive +types are allowed. That means that regardless of the amount of VDBs in the +geometry it will have an equal amount of VDBs, points, primitives and +vertices since each VDB primitive is one point, one vertex and one VDB. + +This validation only checks the geometry on the first frame of the export +frame range. + + + + + +### Detailed Info + +ROP node `{rop_path}` is set to export SOP path `{sop_path}`. + +{message} + + + + \ No newline at end of file diff --git a/openpype/hosts/houdini/plugins/publish/increment_current_file.py b/openpype/hosts/houdini/plugins/publish/increment_current_file.py index 16d9ef9aec..2493b28bc1 100644 --- a/openpype/hosts/houdini/plugins/publish/increment_current_file.py +++ b/openpype/hosts/houdini/plugins/publish/increment_current_file.py @@ -2,7 +2,10 @@ import pyblish.api from openpype.lib import version_up from openpype.pipeline import registered_host +from openpype.action import get_errored_plugins_from_data from openpype.hosts.houdini.api import HoudiniHost +from openpype.pipeline.publish import KnownPublishError + class IncrementCurrentFile(pyblish.api.ContextPlugin): """Increment the current file. @@ -14,17 +17,32 @@ class IncrementCurrentFile(pyblish.api.ContextPlugin): label = "Increment current file" order = pyblish.api.IntegratorOrder + 9.0 hosts = ["houdini"] - families = ["workfile"] + families = ["workfile", + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "usdrender"] optional = True def process(self, context): + errored_plugins = get_errored_plugins_from_data(context) + if any( + plugin.__name__ == "HoudiniSubmitPublishDeadline" + for plugin in errored_plugins + ): + raise KnownPublishError( + "Skipping incrementing current file because " + "submission to deadline failed." + ) + # Filename must not have changed since collecting host = registered_host() # type: HoudiniHost current_file = host.current_file() assert ( context.data["currentFile"] == current_file - ), "Collected filename from current scene name." + ), "Collected filename mismatches from current scene name." new_filepath = version_up(current_file) host.save_workfile(new_filepath) diff --git a/openpype/hosts/houdini/plugins/publish/save_scene.py b/openpype/hosts/houdini/plugins/publish/save_scene.py index d6e07ccab0..703d3e4895 100644 --- a/openpype/hosts/houdini/plugins/publish/save_scene.py +++ b/openpype/hosts/houdini/plugins/publish/save_scene.py @@ -20,7 +20,7 @@ class SaveCurrentScene(pyblish.api.ContextPlugin): ) if host.has_unsaved_changes(): - self.log.info("Saving current file {}...".format(current_file)) + self.log.info("Saving current file: {}".format(current_file)) host.save_workfile(current_file) else: self.log.debug("No unsaved changes, skipping file save..") diff --git a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py index bafb206bd3..b0cf4cdc58 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_alembic_input_node.py @@ -22,7 +22,7 @@ class ValidateAlembicInputNode(pyblish.api.InstancePlugin): invalid = self.get_invalid(instance) if invalid: raise PublishValidationError( - ("Primitive types found that are not supported" + ("Primitive types found that are not supported " "for Alembic output."), title=self.label ) diff --git a/openpype/hosts/houdini/plugins/publish/validate_scene_review.py b/openpype/hosts/houdini/plugins/publish/validate_scene_review.py new file mode 100644 index 0000000000..a44b7e1597 --- /dev/null +++ b/openpype/hosts/houdini/plugins/publish/validate_scene_review.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from openpype.pipeline import PublishValidationError +import hou + + +class ValidateSceneReview(pyblish.api.InstancePlugin): + """Validator Some Scene Settings before publishing the review + 1. Scene Path + 2. Resolution + """ + + order = pyblish.api.ValidatorOrder + families = ["review"] + hosts = ["houdini"] + label = "Scene Setting for review" + + def process(self, instance): + + report = [] + instance_node = hou.node(instance.data.get("instance_node")) + + invalid = self.get_invalid_scene_path(instance_node) + if invalid: + report.append(invalid) + + invalid = self.get_invalid_camera_path(instance_node) + if invalid: + report.append(invalid) + + invalid = self.get_invalid_resolution(instance_node) + if invalid: + report.extend(invalid) + + if report: + raise PublishValidationError( + "\n\n".join(report), + title=self.label) + + def get_invalid_scene_path(self, rop_node): + scene_path_parm = rop_node.parm("scenepath") + scene_path_node = scene_path_parm.evalAsNode() + if not scene_path_node: + path = scene_path_parm.evalAsString() + return "Scene path does not exist: '{}'".format(path) + + def get_invalid_camera_path(self, rop_node): + camera_path_parm = rop_node.parm("camera") + camera_node = camera_path_parm.evalAsNode() + path = camera_path_parm.evalAsString() + if not camera_node: + return "Camera path does not exist: '{}'".format(path) + type_name = camera_node.type().name() + if type_name != "cam": + return "Camera path is not a camera: '{}' (type: {})".format( + path, type_name + ) + + def get_invalid_resolution(self, rop_node): + + # The resolution setting is only used when Override Camera Resolution + # is enabled. So we skip validation if it is disabled. + override = rop_node.parm("tres").eval() + if not override: + return + + invalid = [] + res_width = rop_node.parm("res1").eval() + res_height = rop_node.parm("res2").eval() + if res_width == 0: + invalid.append("Override Resolution width is set to zero.") + if res_height == 0: + invalid.append("Override Resolution height is set to zero") + + return invalid diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py deleted file mode 100644 index 1f9ccc9c42..0000000000 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_input_node.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -import pyblish.api -from openpype.pipeline import ( - PublishValidationError -) - - -class ValidateVDBInputNode(pyblish.api.InstancePlugin): - """Validate that the node connected to the output node is of type VDB. - - Regardless of the amount of VDBs create the output will need to have an - equal amount of VDBs, points, primitives and vertices - - A VDB is an inherited type of Prim, holds the following data: - - Primitives: 1 - - Points: 1 - - Vertices: 1 - - VDBs: 1 - - """ - - order = pyblish.api.ValidatorOrder + 0.1 - families = ["vdbcache"] - hosts = ["houdini"] - label = "Validate Input Node (VDB)" - - def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - self, - "Node connected to the output node is not of type VDB", - title=self.label - ) - - @classmethod - def get_invalid(cls, instance): - - node = instance.data["output_node"] - - prims = node.geometry().prims() - nr_of_prims = len(prims) - - nr_of_points = len(node.geometry().points()) - if nr_of_points != nr_of_prims: - cls.log.error("The number of primitives and points do not match") - return [instance] - - for prim in prims: - if prim.numVertices() != 1: - cls.log.error("Found primitive with more than 1 vertex!") - return [instance] diff --git a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py index f9f88b3bf9..674782179c 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py +++ b/openpype/hosts/houdini/plugins/publish/validate_vdb_output_node.py @@ -1,14 +1,73 @@ # -*- coding: utf-8 -*- +import contextlib + import pyblish.api import hou -from openpype.pipeline import PublishValidationError + +from openpype.pipeline import PublishXmlValidationError +from openpype.hosts.houdini.api.action import SelectInvalidAction + + +def group_consecutive_numbers(nums): + """ + Args: + nums (list): List of sorted integer numbers. + + Yields: + str: Group ranges as {start}-{end} if more than one number in the range + else it yields {end} + + """ + start = None + end = None + + def _result(a, b): + if a == b: + return "{}".format(a) + else: + return "{}-{}".format(a, b) + + for num in nums: + if start is None: + start = num + end = num + elif num == end + 1: + end = num + else: + yield _result(start, end) + start = num + end = num + if start is not None: + yield _result(start, end) + + +@contextlib.contextmanager +def update_mode_context(mode): + original = hou.updateModeSetting() + try: + hou.setUpdateMode(mode) + yield + finally: + hou.setUpdateMode(original) + + +def get_geometry_at_frame(sop_node, frame, force=True): + """Return geometry at frame but force a cooked value.""" + with update_mode_context(hou.updateMode.AutoUpdate): + sop_node.cook(force=force, frame_range=(frame, frame)) + return sop_node.geometryAtFrame(frame) class ValidateVDBOutputNode(pyblish.api.InstancePlugin): """Validate that the node connected to the output node is of type VDB. - Regardless of the amount of VDBs create the output will need to have an - equal amount of VDBs, points, primitives and vertices + All primitives of the output geometry must be VDBs, no other primitive + types are allowed. That means that regardless of the amount of VDBs in the + geometry it will have an equal amount of VDBs, points, primitives and + vertices since each VDB primitive is one point, one vertex and one VDB. + + This validation only checks the geometry on the first frame of the export + frame range for optimization purposes. A VDB is an inherited type of Prim, holds the following data: - Primitives: 1 @@ -22,54 +81,95 @@ class ValidateVDBOutputNode(pyblish.api.InstancePlugin): families = ["vdbcache"] hosts = ["houdini"] label = "Validate Output Node (VDB)" + actions = [SelectInvalidAction] def process(self, instance): - invalid = self.get_invalid(instance) - if invalid: - raise PublishValidationError( - "Node connected to the output node is not" " of type VDB!", - title=self.label + invalid_nodes, message = self.get_invalid_with_message(instance) + if invalid_nodes: + + # instance_node is str, but output_node is hou.Node so we convert + output = instance.data.get("output_node") + output_path = output.path() if output else None + + raise PublishXmlValidationError( + self, + "Invalid VDB content: {}".format(message), + formatting_data={ + "message": message, + "rop_path": instance.data.get("instance_node"), + "sop_path": output_path + } ) @classmethod - def get_invalid(cls, instance): + def get_invalid_with_message(cls, instance): - node = instance.data["output_node"] + node = instance.data.get("output_node") if node is None: - cls.log.error( + instance_node = instance.data.get("instance_node") + error = ( "SOP path is not correctly set on " - "ROP node '%s'." % instance.data.get("instance_node") + "ROP node `{}`.".format(instance_node) ) - return [instance] + return [hou.node(instance_node), error] frame = instance.data.get("frameStart", 0) - geometry = node.geometryAtFrame(frame) + geometry = get_geometry_at_frame(node, frame) if geometry is None: # No geometry data on this node, maybe the node hasn't cooked? - cls.log.error( - "SOP node has no geometry data. " - "Is it cooked? %s" % node.path() + error = ( + "SOP node `{}` has no geometry data. " + "Was it unable to cook?".format(node.path()) ) - return [node] + return [node, error] - prims = geometry.prims() - nr_of_prims = len(prims) + num_prims = geometry.intrinsicValue("primitivecount") + num_points = geometry.intrinsicValue("pointcount") + if num_prims == 0 and num_points == 0: + # Since we are only checking the first frame it doesn't mean there + # won't be VDB prims in a few frames. As such we'll assume for now + # the user knows what he or she is doing + cls.log.warning( + "SOP node `{}` has no primitives on start frame {}. " + "Validation is skipped and it is assumed elsewhere in the " + "frame range VDB prims and only VDB prims will exist." + "".format(node.path(), int(frame)) + ) + return [None, None] - # All primitives must be hou.VDB - invalid_prim = False - for prim in prims: - if not isinstance(prim, hou.VDB): - cls.log.error("Found non-VDB primitive: %s" % prim) - invalid_prim = True - if invalid_prim: - return [instance] + num_vdb_prims = geometry.countPrimType(hou.primType.VDB) + cls.log.debug("Detected {} VDB primitives".format(num_vdb_prims)) + if num_prims != num_vdb_prims: + # There's at least one primitive that is not a VDB. + # Search them and report them to the artist. + prims = geometry.prims() + invalid_prims = [prim for prim in prims + if not isinstance(prim, hou.VDB)] + if invalid_prims: + # Log prim numbers as consecutive ranges so logging isn't very + # slow for large number of primitives + error = ( + "Found non-VDB primitives for `{}`. " + "Primitive indices {} are not VDB primitives.".format( + node.path(), + ", ".join(group_consecutive_numbers( + prim.number() for prim in invalid_prims + )) + ) + ) + return [node, error] - nr_of_points = len(geometry.points()) - if nr_of_points != nr_of_prims: - cls.log.error("The number of primitives and points do not match") - return [instance] + if num_points != num_vdb_prims: + # We have points unrelated to the VDB primitives. + error = ( + "The number of primitives and points do not match in '{}'. " + "This likely means you have unconnected points, which we do " + "not allow in the VDB output.".format(node.path())) + return [node, error] - for prim in prims: - if prim.numVertices() != 1: - cls.log.error("Found primitive with more than 1 vertex!") - return [instance] + return [None, None] + + @classmethod + def get_invalid(cls, instance): + nodes, _ = cls.get_invalid_with_message(instance) + return nodes diff --git a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py index 7707cc2dba..543c8e1407 100644 --- a/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py +++ b/openpype/hosts/houdini/plugins/publish/validate_workfile_paths.py @@ -28,18 +28,37 @@ class ValidateWorkfilePaths( if not self.is_active(instance.data): return invalid = self.get_invalid() - self.log.info( - "node types to check: {}".format(", ".join(self.node_types))) - self.log.info( - "prohibited vars: {}".format(", ".join(self.prohibited_vars)) + self.log.debug( + "Checking node types: {}".format(", ".join(self.node_types))) + self.log.debug( + "Searching prohibited vars: {}".format( + ", ".join(self.prohibited_vars) + ) ) - if invalid: - for param in invalid: - self.log.error( - "{}: {}".format(param.path(), param.unexpandedString())) - raise PublishValidationError( - "Invalid paths found", title=self.label) + if invalid: + all_container_vars = set() + for param in invalid: + value = param.unexpandedString() + contained_vars = [ + var for var in self.prohibited_vars + if var in value + ] + all_container_vars.update(contained_vars) + + self.log.error( + "Parm {} contains prohibited vars {}: {}".format( + param.path(), + ", ".join(contained_vars), + value) + ) + + message = ( + "Prohibited vars {} found in parameter values".format( + ", ".join(all_container_vars) + ) + ) + raise PublishValidationError(message, title=self.label) @classmethod def get_invalid(cls): @@ -63,7 +82,7 @@ class ValidateWorkfilePaths( def repair(cls, instance): invalid = cls.get_invalid() for param in invalid: - cls.log.info("processing: {}".format(param.path())) + cls.log.info("Processing: {}".format(param.path())) cls.log.info("Replacing {} for {}".format( param.unexpandedString(), hou.text.expandString(param.unexpandedString()))) diff --git a/openpype/hosts/houdini/startup/MainMenuCommon.xml b/openpype/hosts/houdini/startup/MainMenuCommon.xml index c08114b71b..47a4653d5d 100644 --- a/openpype/hosts/houdini/startup/MainMenuCommon.xml +++ b/openpype/hosts/houdini/startup/MainMenuCommon.xml @@ -10,7 +10,7 @@ import hou from openpype.tools.utils import host_tools parent = hou.qt.mainWindow() -host_tools.show_creator(parent) +host_tools.show_publisher(parent, tab="create") ]]> @@ -30,7 +30,7 @@ host_tools.show_loader(parent=parent, use_context=True) import hou from openpype.tools.utils import host_tools parent = hou.qt.mainWindow() -host_tools.show_publisher(parent) +host_tools.show_publisher(parent, tab="publish") ]]> @@ -66,8 +66,8 @@ host_tools.show_workfiles(parent) ]]> - - + + dict: + """Get the current assets frame range and handles. + + Returns: + dict: with frame start, frame end, handle start, handle end. + """ + # Set frame start/end + asset = get_current_project_asset() + frame_start = asset["data"].get("frameStart") + frame_end = asset["data"].get("frameEnd") + + if frame_start is None or frame_end is None: + return + + handle_start = asset["data"].get("handleStart", 0) + handle_end = asset["data"].get("handleEnd", 0) + return { + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end + } + + +def reset_frame_range(fps: bool = True): + """Set frame range to current asset. + This is part of 3dsmax documentation: + + animationRange: A System Global variable which lets you get and + set an Interval value that defines the start and end frames + of the Active Time Segment. + frameRate: A System Global variable which lets you get + and set an Integer value that defines the current + scene frame rate in frames-per-second. + """ + if fps: + data_fps = get_current_project(fields=["data.fps"]) + fps_number = float(data_fps["data"]["fps"]) + rt.frameRate = fps_number + frame_range = get_frame_range() + frame_start_handle = frame_range["frameStart"] - int( + frame_range["handleStart"] + ) + frame_end_handle = frame_range["frameEnd"] + int(frame_range["handleEnd"]) + frange_cmd = ( + f"animationRange = interval {frame_start_handle} {frame_end_handle}" + ) + rt.execute(frange_cmd) + set_render_frame_range(frame_start_handle, frame_end_handle) + + +def set_context_setting(): + """Apply the project settings from the project definition + + Settings can be overwritten by an asset if the asset.data contains + any information regarding those settings. + + Examples of settings: + frame range + resolution + + Returns: + None + """ + reset_scene_resolution() + reset_frame_range() + + +def get_max_version(): + """ + Args: + get max version date for deadline + + Returns: + #(25000, 62, 0, 25, 0, 0, 997, 2023, "") + max_info[7] = max version date + """ + max_info = rt.maxversion() + return max_info[7] diff --git a/openpype/hosts/max/api/lib_renderproducts.py b/openpype/hosts/max/api/lib_renderproducts.py new file mode 100644 index 0000000000..94b0aeb913 --- /dev/null +++ b/openpype/hosts/max/api/lib_renderproducts.py @@ -0,0 +1,182 @@ +# Render Element Example : For scanline render, VRay +# https://help.autodesk.com/view/MAXDEV/2022/ENU/?guid=GUID-E8F75D47-B998-4800-A3A5-610E22913CFC +# arnold +# https://help.autodesk.com/view/ARNOL/ENU/?guid=arnold_for_3ds_max_ax_maxscript_commands_ax_renderview_commands_html +import os + +from pymxs import runtime as rt + +from openpype.hosts.max.api.lib import get_current_renderer +from openpype.pipeline import legacy_io +from openpype.settings import get_project_settings + + +class RenderProducts(object): + + def __init__(self, project_settings=None): + self._project_settings = project_settings or get_project_settings( + legacy_io.Session["AVALON_PROJECT"]) + + def get_beauty(self, container): + render_dir = os.path.dirname(rt.rendOutputFilename) + + output_file = os.path.join(render_dir, container) + + setting = self._project_settings + img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa + + start_frame = int(rt.rendStart) + end_frame = int(rt.rendEnd) + 1 + + return { + "beauty": self.get_expected_beauty( + output_file, start_frame, end_frame, img_fmt + ) + } + + def get_aovs(self, container): + render_dir = os.path.dirname(rt.rendOutputFilename) + + output_file = os.path.join(render_dir, + container) + + setting = self._project_settings + img_fmt = setting["max"]["RenderSettings"]["image_format"] # noqa + + start_frame = int(rt.rendStart) + end_frame = int(rt.rendEnd) + 1 + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + render_dict = {} + + if renderer in [ + "ART_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + render_name = self.get_render_elements_name() + if render_name: + for name in render_name: + render_dict.update({ + name: self.get_expected_render_elements( + output_file, name, start_frame, + end_frame, img_fmt) + }) + elif renderer == "Redshift_Renderer": + render_name = self.get_render_elements_name() + if render_name: + rs_aov_files = rt.Execute("renderers.current.separateAovFiles") + # this doesn't work, always returns False + # rs_AovFiles = rt.RedShift_Renderer().separateAovFiles + if img_fmt == "exr" and not rs_aov_files: + for name in render_name: + if name == "RsCryptomatte": + render_dict.update({ + name: self.get_expected_render_elements( + output_file, name, start_frame, + end_frame, img_fmt) + }) + else: + for name in render_name: + render_dict.update({ + name: self.get_expected_render_elements( + output_file, name, start_frame, + end_frame, img_fmt) + }) + + elif renderer == "Arnold": + render_name = self.get_arnold_product_name() + if render_name: + for name in render_name: + render_dict.update({ + name: self.get_expected_arnold_product( + output_file, name, start_frame, end_frame, img_fmt) + }) + elif renderer in [ + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3" + ]: + if img_fmt != "exr": + render_name = self.get_render_elements_name() + if render_name: + for name in render_name: + render_dict.update({ + name: self.get_expected_render_elements( + output_file, name, start_frame, + end_frame, img_fmt) # noqa + }) + + return render_dict + + def get_expected_beauty(self, folder, start_frame, end_frame, fmt): + beauty_frame_range = [] + for f in range(start_frame, end_frame): + frame = "%04d" % f + beauty_output = f"{folder}.{frame}.{fmt}" + beauty_output = beauty_output.replace("\\", "/") + beauty_frame_range.append(beauty_output) + + return beauty_frame_range + + def get_arnold_product_name(self): + """Get all the Arnold AOVs name""" + aov_name = [] + + amw = rt.MaxtoAOps.AOVsManagerWindow() + aov_mgr = rt.renderers.current.AOVManager + # Check if there is any aov group set in AOV manager + aov_group_num = len(aov_mgr.drivers) + if aov_group_num < 1: + return + for i in range(aov_group_num): + # get the specific AOV group + aov_name.extend(aov.name for aov in aov_mgr.drivers[i].aov_list) + # close the AOVs manager window + amw.close() + + return aov_name + + def get_expected_arnold_product(self, folder, name, + start_frame, end_frame, fmt): + """Get all the expected Arnold AOVs""" + aov_list = [] + for f in range(start_frame, end_frame): + frame = "%04d" % f + render_element = f"{folder}_{name}.{frame}.{fmt}" + render_element = render_element.replace("\\", "/") + aov_list.append(render_element) + + return aov_list + + def get_render_elements_name(self): + """Get all the render element names for general """ + render_name = [] + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 1: + return + # get render elements from the renders + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + if renderlayer_name.enabled: + target, renderpass = str(renderlayer_name).split(":") + render_name.append(renderpass) + + return render_name + + def get_expected_render_elements(self, folder, name, + start_frame, end_frame, fmt): + """Get all the expected render element output files. """ + render_elements = [] + for f in range(start_frame, end_frame): + frame = "%04d" % f + render_element = f"{folder}_{name}.{frame}.{fmt}" + render_element = render_element.replace("\\", "/") + render_elements.append(render_element) + + return render_elements + + def image_format(self): + return self._project_settings["max"]["RenderSettings"]["image_format"] # noqa diff --git a/openpype/hosts/max/api/lib_rendersettings.py b/openpype/hosts/max/api/lib_rendersettings.py new file mode 100644 index 0000000000..91e4a5bf9b --- /dev/null +++ b/openpype/hosts/max/api/lib_rendersettings.py @@ -0,0 +1,171 @@ +import os +from pymxs import runtime as rt +from openpype.lib import Logger +from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io +from openpype.pipeline.context_tools import get_current_project_asset + +from openpype.hosts.max.api.lib import ( + set_render_frame_range, + get_current_renderer, + get_default_render_folder +) + + +class RenderSettings(object): + + log = Logger.get_logger("RenderSettings") + + _aov_chars = { + "dot": ".", + "dash": "-", + "underscore": "_" + } + + def __init__(self, project_settings=None): + """ + Set up the naming convention for the render + elements for the deadline submission + """ + + self._project_settings = project_settings + if not self._project_settings: + self._project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + def set_render_camera(self, selection): + for sel in selection: + # to avoid Attribute Error from pymxs wrapper + found = False + if rt.classOf(sel) in rt.Camera.classes: + found = True + rt.viewport.setCamera(sel) + break + if not found: + raise RuntimeError("Camera not found") + + def render_output(self, container): + folder = rt.maxFilePath + # hard-coded, should be customized in the setting + file = rt.maxFileName + folder = folder.replace("\\", "/") + # hard-coded, set the renderoutput path + setting = self._project_settings + render_folder = get_default_render_folder(setting) + filename, ext = os.path.splitext(file) + output_dir = os.path.join(folder, + render_folder, + filename) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + # hard-coded, should be customized in the setting + context = get_current_project_asset() + + # get project resolution + width = context["data"].get("resolutionWidth") + height = context["data"].get("resolutionHeight") + # Set Frame Range + frame_start = context["data"].get("frame_start") + frame_end = context["data"].get("frame_end") + set_render_frame_range(frame_start, frame_end) + # get the production render + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + output = os.path.join(output_dir, container) + try: + aov_separator = self._aov_chars[( + self._project_settings["maya"] + ["RenderSettings"] + ["aov_separator"] + )] + except KeyError: + aov_separator = "." + output_filename = "{0}..{1}".format(output, img_fmt) + output_filename = output_filename.replace("{aov_separator}", + aov_separator) + rt.rendOutputFilename = output_filename + if renderer == "VUE_File_Renderer": + return + # TODO: Finish the arnold render setup + if renderer == "Arnold": + self.arnold_setup() + + if renderer in [ + "ART_Renderer", + "Redshift_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + self.render_element_layer(output, width, height, img_fmt) + + rt.rendSaveFile = True + + if rt.renderSceneDialog.isOpen(): + rt.renderSceneDialog.close() + + def arnold_setup(self): + # get Arnold RenderView run in the background + # for setting up renderable camera + arv = rt.MAXToAOps.ArnoldRenderView() + render_camera = rt.viewport.GetCamera() + arv.setOption("Camera", str(render_camera)) + + # TODO: add AOVs and extension + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + setup_cmd = ( + f""" + amw = MaxtoAOps.AOVsManagerWindow() + amw.close() + aovmgr = renderers.current.AOVManager + aovmgr.drivers = #() + img_fmt = "{img_fmt}" + if img_fmt == "png" then driver = ArnoldPNGDriver() + if img_fmt == "jpg" then driver = ArnoldJPEGDriver() + if img_fmt == "exr" then driver = ArnoldEXRDriver() + if img_fmt == "tif" then driver = ArnoldTIFFDriver() + if img_fmt == "tiff" then driver = ArnoldTIFFDriver() + append aovmgr.drivers driver + aovmgr.drivers[1].aov_list = #() + """) + + rt.execute(setup_cmd) + arv.close() + + def render_element_layer(self, dir, width, height, ext): + """For Renderers with render elements""" + rt.renderWidth = width + rt.renderHeight = height + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + + for i in range(render_elem_num): + renderlayer_name = render_elem.GetRenderElement(i) + target, renderpass = str(renderlayer_name).split(":") + aov_name = "{0}_{1}..{2}".format(dir, renderpass, ext) + render_elem.SetRenderElementFileName(i, aov_name) + + def get_render_output(self, container, output_dir): + output = os.path.join(output_dir, container) + img_fmt = self._project_settings["max"]["RenderSettings"]["image_format"] # noqa + output_filename = "{0}..{1}".format(output, img_fmt) + return output_filename + + def get_render_element(self): + orig_render_elem = [] + render_elem = rt.maxOps.GetCurRenderElementMgr() + render_elem_num = render_elem.NumRenderElements() + if render_elem_num < 0: + return + + for i in range(render_elem_num): + render_element = render_elem.GetRenderElementFilename(i) + orig_render_elem.append(render_element) + + return orig_render_elem diff --git a/openpype/hosts/max/api/menu.py b/openpype/hosts/max/api/menu.py index 02d8315af6..066cc90039 100644 --- a/openpype/hosts/max/api/menu.py +++ b/openpype/hosts/max/api/menu.py @@ -4,6 +4,7 @@ from qtpy import QtWidgets, QtCore from pymxs import runtime as rt from openpype.tools.utils import host_tools +from openpype.hosts.max.api import lib class OpenPypeMenu(object): @@ -107,6 +108,17 @@ class OpenPypeMenu(object): workfiles_action = QtWidgets.QAction("Work Files...", openpype_menu) workfiles_action.triggered.connect(self.workfiles_callback) openpype_menu.addAction(workfiles_action) + + openpype_menu.addSeparator() + + res_action = QtWidgets.QAction("Set Resolution", openpype_menu) + res_action.triggered.connect(self.resolution_callback) + openpype_menu.addAction(res_action) + + frame_action = QtWidgets.QAction("Set Frame Range", openpype_menu) + frame_action.triggered.connect(self.frame_range_callback) + openpype_menu.addAction(frame_action) + return openpype_menu def load_callback(self): @@ -119,7 +131,7 @@ class OpenPypeMenu(object): def manage_callback(self): """Callback to show Scene Manager/Inventory tool.""" - host_tools.show_subset_manager(parent=self.main_widget) + host_tools.show_scene_inventory(parent=self.main_widget) def library_callback(self): """Callback to show Library Loader tool.""" @@ -128,3 +140,11 @@ class OpenPypeMenu(object): def workfiles_callback(self): """Callback to show Workfiles tool.""" host_tools.show_workfiles(parent=self.main_widget) + + def resolution_callback(self): + """Callback to reset scene resolution""" + return lib.reset_scene_resolution() + + def frame_range_callback(self): + """Callback to reset frame range""" + return lib.reset_frame_range() diff --git a/openpype/hosts/max/api/pipeline.py b/openpype/hosts/max/api/pipeline.py index f3cdf245fb..03b85a4066 100644 --- a/openpype/hosts/max/api/pipeline.py +++ b/openpype/hosts/max/api/pipeline.py @@ -2,10 +2,11 @@ """Pipeline tools for OpenPype Houdini integration.""" import os import logging +from operator import attrgetter import json -from openpype.host import HostBase, IWorkfileHost, ILoadHost, INewPublisher +from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost import pyblish.api from openpype.pipeline import ( register_creator_plugin_path, @@ -27,7 +28,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") -class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher): +class MaxHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): name = "max" menu = None @@ -49,6 +50,12 @@ class MaxHost(HostBase, IWorkfileHost, ILoadHost, INewPublisher): self._has_been_setup = True + def context_setting(): + return lib.set_context_setting() + + rt.callbacks.addScript(rt.Name('systemPostNew'), + context_setting) + def has_unsaved_changes(self): # TODO: how to get it from 3dsmax? return True @@ -141,5 +148,25 @@ def ls() -> list: if rt.getUserProp(obj, "id") == AVALON_CONTAINER_ID ] - for container in sorted(containers, key=lambda name: container.name): + for container in sorted(containers, key=attrgetter("name")): yield lib.read(container) + + +def containerise(name: str, nodes: list, context, loader=None, suffix="_CON"): + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "name": name, + "namespace": "", + "loader": loader, + "representation": context["representation"]["_id"], + } + + container_name = f"{name}{suffix}" + container = rt.container(name=container_name) + for node in nodes: + node.Parent = container + + if not lib.imprint(container_name, data): + print(f"imprinting of {container_name} failed.") + return container diff --git a/openpype/hosts/max/api/plugin.py b/openpype/hosts/max/api/plugin.py index 4788bfd383..b54568b360 100644 --- a/openpype/hosts/max/api/plugin.py +++ b/openpype/hosts/max/api/plugin.py @@ -78,12 +78,12 @@ class MaxCreator(Creator, MaxCreatorBase): self._add_instance_to_context(created_instance) def update_instances(self, update_list): - for created_inst, _changes in update_list: + for created_inst, changes in update_list: instance_node = created_inst.get("instance_node") new_values = { - key: new_value - for key, (_old_value, new_value) in _changes.items() + key: changes[key].new_value + for key in changes.changed_keys } imprint( instance_node, @@ -101,7 +101,9 @@ class MaxCreator(Creator, MaxCreatorBase): instance_node = rt.getNodeByName( instance.data.get("instance_node")) if instance_node: - rt.delete(rt.getNodeByName(instance_node)) + rt.select(instance_node) + rt.execute(f'for o in selection do for c in o.children do c.parent = undefined') # noqa + rt.delete(instance_node) self._remove_instance_from_context(instance) diff --git a/openpype/hosts/max/hooks/force_startup_script.py b/openpype/hosts/max/hooks/force_startup_script.py new file mode 100644 index 0000000000..4fcf4fef21 --- /dev/null +++ b/openpype/hosts/max/hooks/force_startup_script.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +"""Pre-launch to force 3ds max startup script.""" +from openpype.lib import PreLaunchHook +import os + + +class ForceStartupScript(PreLaunchHook): + """Inject OpenPype environment to 3ds max. + + Note that this works in combination whit 3dsmax startup script that + is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH + environment. + + Hook `GlobalHostDataHook` must be executed before this hook. + """ + app_groups = ["3dsmax"] + order = 11 + + def execute(self): + startup_args = [ + "-U", + "MAXScript", + f"{os.getenv('OPENPYPE_ROOT')}\\openpype\\hosts\\max\\startup\\startup.ms"] # noqa + self.launch_context.launch_args.append(startup_args) diff --git a/openpype/hosts/max/hooks/inject_python.py b/openpype/hosts/max/hooks/inject_python.py new file mode 100644 index 0000000000..d9753ccbd8 --- /dev/null +++ b/openpype/hosts/max/hooks/inject_python.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +"""Pre-launch hook to inject python environment.""" +from openpype.lib import PreLaunchHook +import os + + +class InjectPythonPath(PreLaunchHook): + """Inject OpenPype environment to 3dsmax. + + Note that this works in combination whit 3dsmax startup script that + is translating it back to PYTHONPATH for cases when 3dsmax drops PYTHONPATH + environment. + + Hook `GlobalHostDataHook` must be executed before this hook. + """ + app_groups = ["3dsmax"] + + def execute(self): + self.launch_context.env["MAX_PYTHONPATH"] = os.environ["PYTHONPATH"] diff --git a/openpype/hosts/max/plugins/create/create_camera.py b/openpype/hosts/max/plugins/create/create_camera.py new file mode 100644 index 0000000000..91d0d4d3dc --- /dev/null +++ b/openpype/hosts/max/plugins/create/create_camera.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating camera.""" +from openpype.hosts.max.api import plugin +from openpype.pipeline import CreatedInstance + + +class CreateCamera(plugin.MaxCreator): + identifier = "io.openpype.creators.max.camera" + label = "Camera" + family = "camera" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + from pymxs import runtime as rt + sel_obj = list(rt.selection) + instance = super(CreateCamera, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + container = rt.getNodeByName(instance.data.get("instance_node")) + # TODO: Disable "Add to Containers?" Panel + # parent the selected cameras into the container + for obj in sel_obj: + obj.parent = container + # for additional work on the node: + # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_maxScene.py b/openpype/hosts/max/plugins/create/create_maxScene.py new file mode 100644 index 0000000000..7900336f32 --- /dev/null +++ b/openpype/hosts/max/plugins/create/create_maxScene.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating raw max scene.""" +from openpype.hosts.max.api import plugin +from openpype.pipeline import CreatedInstance + + +class CreateMaxScene(plugin.MaxCreator): + identifier = "io.openpype.creators.max.maxScene" + label = "Max Scene" + family = "maxScene" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + from pymxs import runtime as rt + sel_obj = list(rt.selection) + instance = super(CreateMaxScene, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + container = rt.getNodeByName(instance.data.get("instance_node")) + # TODO: Disable "Add to Containers?" Panel + # parent the selected cameras into the container + for obj in sel_obj: + obj.parent = container + # for additional work on the node: + # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_model.py b/openpype/hosts/max/plugins/create/create_model.py new file mode 100644 index 0000000000..e7ae3af9db --- /dev/null +++ b/openpype/hosts/max/plugins/create/create_model.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for model.""" +from openpype.hosts.max.api import plugin +from openpype.pipeline import CreatedInstance + + +class CreateModel(plugin.MaxCreator): + identifier = "io.openpype.creators.max.model" + label = "Model" + family = "model" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + from pymxs import runtime as rt + instance = super(CreateModel, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + container = rt.getNodeByName(instance.data.get("instance_node")) + # TODO: Disable "Add to Containers?" Panel + # parent the selected cameras into the container + sel_obj = None + if self.selected_nodes: + sel_obj = list(self.selected_nodes) + for obj in sel_obj: + obj.parent = container + # for additional work on the node: + # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_pointcloud.py b/openpype/hosts/max/plugins/create/create_pointcloud.py new file mode 100644 index 0000000000..c83acac3df --- /dev/null +++ b/openpype/hosts/max/plugins/create/create_pointcloud.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating point cloud.""" +from openpype.hosts.max.api import plugin +from openpype.pipeline import CreatedInstance + + +class CreatePointCloud(plugin.MaxCreator): + identifier = "io.openpype.creators.max.pointcloud" + label = "Point Cloud" + family = "pointcloud" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + from pymxs import runtime as rt + sel_obj = list(rt.selection) + instance = super(CreatePointCloud, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + container = rt.getNodeByName(instance.data.get("instance_node")) + # TODO: Disable "Add to Containers?" Panel + # parent the selected cameras into the container + for obj in sel_obj: + obj.parent = container + # for additional work on the node: + # instance_node = rt.getNodeByName(instance.get("instance_node")) diff --git a/openpype/hosts/max/plugins/create/create_redshift_proxy.py b/openpype/hosts/max/plugins/create/create_redshift_proxy.py new file mode 100644 index 0000000000..698ea82b69 --- /dev/null +++ b/openpype/hosts/max/plugins/create/create_redshift_proxy.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating camera.""" +from openpype.hosts.max.api import plugin +from openpype.pipeline import CreatedInstance + + +class CreateRedshiftProxy(plugin.MaxCreator): + identifier = "io.openpype.creators.max.redshiftproxy" + label = "Redshift Proxy" + family = "redshiftproxy" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + + _ = super(CreateRedshiftProxy, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance diff --git a/openpype/hosts/max/plugins/create/create_render.py b/openpype/hosts/max/plugins/create/create_render.py new file mode 100644 index 0000000000..5ad895b86e --- /dev/null +++ b/openpype/hosts/max/plugins/create/create_render.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating camera.""" +import os +from openpype.hosts.max.api import plugin +from openpype.pipeline import CreatedInstance +from openpype.hosts.max.api.lib_rendersettings import RenderSettings + + +class CreateRender(plugin.MaxCreator): + identifier = "io.openpype.creators.max.render" + label = "Render" + family = "maxrender" + icon = "gear" + + def create(self, subset_name, instance_data, pre_create_data): + from pymxs import runtime as rt + sel_obj = list(rt.selection) + file = rt.maxFileName + filename, _ = os.path.splitext(file) + instance_data["AssetName"] = filename + + instance = super(CreateRender, self).create( + subset_name, + instance_data, + pre_create_data) # type: CreatedInstance + container_name = instance.data.get("instance_node") + container = rt.getNodeByName(container_name) + # TODO: Disable "Add to Containers?" Panel + # parent the selected cameras into the container + for obj in sel_obj: + obj.parent = container + # for additional work on the node: + # instance_node = rt.getNodeByName(instance.get("instance_node")) + + # make sure the render dialog is closed + # for the update of resolution + # Changing the Render Setup dialog settings should be done + # with the actual Render Setup dialog in a closed state. + + # set viewport camera for rendering(mandatory for deadline) + RenderSettings().set_render_camera(sel_obj) + # set output paths for rendering(mandatory for deadline) + RenderSettings().render_output(container_name) diff --git a/openpype/hosts/max/plugins/load/load_camera_fbx.py b/openpype/hosts/max/plugins/load/load_camera_fbx.py new file mode 100644 index 0000000000..0c5dd762cf --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_camera_fbx.py @@ -0,0 +1,61 @@ +import os +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib + + +class FbxLoader(load.LoaderPlugin): + """Fbx Loader""" + + families = ["camera"] + representations = ["fbx"] + order = -9 + icon = "code-fork" + color = "white" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + + filepath = os.path.normpath(self.fname) + rt.FBXImporterSetParam("Animation", True) + rt.FBXImporterSetParam("Camera", True) + rt.FBXImporterSetParam("AxisConversionMethod", True) + rt.FBXImporterSetParam("Preserveinstances", True) + rt.importFile( + filepath, + rt.name("noPrompt"), + using=rt.FBXIMP) + + container = rt.getNodeByName(f"{name}") + if not container: + container = rt.container() + container.name = f"{name}" + + for selection in rt.getCurrentSelection(): + selection.Parent = container + + return containerise( + name, [container], context, loader=self.__class__.__name__) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + + fbx_objects = self.get_container_children(node) + for fbx_object in fbx_objects: + fbx_object.source = path + + lib.imprint(container["instance_node"], { + "representation": str(representation["_id"]) + }) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/load/load_max_scene.py b/openpype/hosts/max/plugins/load/load_max_scene.py new file mode 100644 index 0000000000..4b19cd671f --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_max_scene.py @@ -0,0 +1,64 @@ +import os +from openpype.pipeline import ( + load, get_representation_path +) +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib + + +class MaxSceneLoader(load.LoaderPlugin): + """Max Scene Loader""" + + families = ["camera", + "maxScene", + "model"] + + representations = ["max"] + order = -8 + icon = "code-fork" + color = "green" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + path = os.path.normpath(self.fname) + # import the max scene by using "merge file" + path = path.replace('\\', '/') + + merge_before = { + c for c in rt.rootNode.Children + if rt.classOf(c) == rt.Container + } + rt.mergeMaxFile(path) + + merge_after = { + c for c in rt.rootNode.Children + if rt.classOf(c) == rt.Container + } + max_containers = merge_after.difference(merge_before) + + if len(max_containers) != 1: + self.log.error("Something failed when loading.") + + max_container = max_containers.pop() + + return containerise( + name, [max_container], context, loader=self.__class__.__name__) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + max_objects = node.Children + for max_object in max_objects: + max_object.source = path + + lib.imprint(container["instance_node"], { + "representation": str(representation["_id"]) + }) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/load/load_model.py b/openpype/hosts/max/plugins/load/load_model.py new file mode 100644 index 0000000000..5f1ae3378e --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_model.py @@ -0,0 +1,105 @@ +import os +from openpype.pipeline import load, get_representation_path +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib +from openpype.hosts.max.api.lib import maintained_selection + + +class ModelAbcLoader(load.LoaderPlugin): + """Loading model with the Alembic loader.""" + + families = ["model"] + label = "Load Model(Alembic)" + representations = ["abc"] + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + + file_path = os.path.normpath(self.fname) + + abc_before = { + c + for c in rt.rootNode.Children + if rt.classOf(c) == rt.AlembicContainer + } + + rt.AlembicImport.ImportToRoot = False + rt.AlembicImport.CustomAttributes = True + rt.AlembicImport.UVs = True + rt.AlembicImport.VertexColors = True + rt.importFile(file_path, rt.name("noPrompt")) + + abc_after = { + c + for c in rt.rootNode.Children + if rt.classOf(c) == rt.AlembicContainer + } + + # This should yield new AlembicContainer node + abc_containers = abc_after.difference(abc_before) + + if len(abc_containers) != 1: + self.log.error("Something failed when loading.") + + abc_container = abc_containers.pop() + + return containerise( + name, [abc_container], context, loader=self.__class__.__name__ + ) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + rt.select(node.Children) + + for alembic in rt.selection: + abc = rt.getNodeByName(alembic.name) + rt.select(abc.Children) + for abc_con in rt.selection: + container = rt.getNodeByName(abc_con.name) + container.source = path + rt.select(container.Children) + for abc_obj in rt.selection: + alembic_obj = rt.getNodeByName(abc_obj.name) + alembic_obj.source = path + + with maintained_selection(): + rt.select(node) + + lib.imprint( + container["instance_node"], + {"representation": str(representation["_id"])}, + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) + + @staticmethod + def get_container_children(parent, type_name): + from pymxs import runtime as rt + + def list_children(node): + children = [] + for c in node.Children: + children.append(c) + children += list_children(c) + return children + + filtered = [] + for child in list_children(parent): + class_type = str(rt.classOf(child.baseObject)) + if class_type == type_name: + filtered.append(child) + + return filtered diff --git a/openpype/hosts/max/plugins/load/load_model_fbx.py b/openpype/hosts/max/plugins/load/load_model_fbx.py new file mode 100644 index 0000000000..61101c482d --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_model_fbx.py @@ -0,0 +1,67 @@ +import os +from openpype.pipeline import load, get_representation_path +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib +from openpype.hosts.max.api.lib import maintained_selection + + +class FbxModelLoader(load.LoaderPlugin): + """Fbx Model Loader""" + + families = ["model"] + representations = ["fbx"] + order = -9 + icon = "code-fork" + color = "white" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + + filepath = os.path.normpath(self.fname) + rt.FBXImporterSetParam("Animation", False) + rt.FBXImporterSetParam("Cameras", False) + rt.FBXImporterSetParam("Preserveinstances", True) + rt.importFile(filepath, rt.name("noPrompt"), using=rt.FBXIMP) + + container = rt.getNodeByName(f"{name}") + if not container: + container = rt.container() + container.name = f"{name}" + + for selection in rt.getCurrentSelection(): + selection.Parent = container + + return containerise( + name, [container], context, loader=self.__class__.__name__ + ) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + rt.select(node.Children) + + rt.FBXImporterSetParam("Animation", False) + rt.FBXImporterSetParam("Cameras", False) + rt.FBXImporterSetParam("AxisConversionMethod", True) + rt.FBXImporterSetParam("UpAxis", "Y") + rt.FBXImporterSetParam("Preserveinstances", True) + rt.importFile(path, rt.name("noPrompt"), using=rt.FBXIMP) + + with maintained_selection(): + rt.select(node) + + lib.imprint( + container["instance_node"], + {"representation": str(representation["_id"])}, + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/load/load_model_obj.py b/openpype/hosts/max/plugins/load/load_model_obj.py new file mode 100644 index 0000000000..c55e462111 --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_model_obj.py @@ -0,0 +1,68 @@ +import os +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib +from openpype.hosts.max.api.lib import maintained_selection + + +class ObjLoader(load.LoaderPlugin): + """Obj Loader""" + + families = ["model"] + representations = ["obj"] + order = -9 + icon = "code-fork" + color = "white" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + + filepath = os.path.normpath(self.fname) + self.log.debug(f"Executing command to import..") + + rt.execute(f'importFile @"{filepath}" #noPrompt using:ObjImp') + # create "missing" container for obj import + container = rt.container() + container.name = f"{name}" + + # get current selection + for selection in rt.getCurrentSelection(): + selection.Parent = container + + asset = rt.getNodeByName(f"{name}") + + return containerise( + name, [asset], context, loader=self.__class__.__name__) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node_name = container["instance_node"] + node = rt.getNodeByName(node_name) + + instance_name, _ = node_name.split("_") + container = rt.getNodeByName(instance_name) + for n in container.Children: + rt.delete(n) + + rt.execute(f'importFile @"{path}" #noPrompt using:ObjImp') + # get current selection + for selection in rt.getCurrentSelection(): + selection.Parent = container + + with maintained_selection(): + rt.select(node) + + lib.imprint(node_name, { + "representation": str(representation["_id"]) + }) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/load/load_model_usd.py b/openpype/hosts/max/plugins/load/load_model_usd.py new file mode 100644 index 0000000000..143f91f40b --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_model_usd.py @@ -0,0 +1,78 @@ +import os +from openpype.pipeline import ( + load, get_representation_path +) +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib +from openpype.hosts.max.api.lib import maintained_selection + + +class ModelUSDLoader(load.LoaderPlugin): + """Loading model with the USD loader.""" + + families = ["model"] + label = "Load Model(USD)" + representations = ["usda"] + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + # asset_filepath + filepath = os.path.normpath(self.fname) + import_options = rt.USDImporter.CreateOptions() + base_filename = os.path.basename(filepath) + filename, ext = os.path.splitext(base_filename) + log_filepath = filepath.replace(ext, "txt") + + rt.LogPath = log_filepath + rt.LogLevel = rt.name('info') + rt.USDImporter.importFile(filepath, + importOptions=import_options) + + asset = rt.getNodeByName(f"{name}") + + return containerise( + name, [asset], context, loader=self.__class__.__name__) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node_name = container["instance_node"] + node = rt.getNodeByName(node_name) + for n in node.Children: + for r in n.Children: + rt.delete(r) + rt.delete(n) + instance_name, _ = node_name.split("_") + + import_options = rt.USDImporter.CreateOptions() + base_filename = os.path.basename(path) + _, ext = os.path.splitext(base_filename) + log_filepath = path.replace(ext, "txt") + + rt.LogPath = log_filepath + rt.LogLevel = rt.name('info') + rt.USDImporter.importFile(path, + importOptions=import_options) + + asset = rt.getNodeByName(f"{instance_name}") + asset.Parent = node + + with maintained_selection(): + rt.select(node) + + lib.imprint(node_name, { + "representation": str(representation["_id"]) + }) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/load/load_pointcache.py b/openpype/hosts/max/plugins/load/load_pointcache.py index 285d84b7b6..5fb9772f87 100644 --- a/openpype/hosts/max/plugins/load/load_pointcache.py +++ b/openpype/hosts/max/plugins/load/load_pointcache.py @@ -5,15 +5,15 @@ Because of limited api, alembics can be only loaded, but not easily updated. """ import os -from openpype.pipeline import ( - load -) +from openpype.pipeline import load, get_representation_path +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib class AbcLoader(load.LoaderPlugin): """Alembic loader.""" - families = ["model", "animation", "pointcache"] + families = ["camera", "animation", "pointcache"] label = "Load Alembic" representations = ["abc"] order = -10 @@ -26,21 +26,17 @@ class AbcLoader(load.LoaderPlugin): file_path = os.path.normpath(self.fname) abc_before = { - c for c in rt.rootNode.Children + c + for c in rt.rootNode.Children if rt.classOf(c) == rt.AlembicContainer } - abc_export_cmd = (f""" -AlembicImport.ImportToRoot = false - -importFile @"{file_path}" #noPrompt - """) - - self.log.debug(f"Executing command: {abc_export_cmd}") - rt.execute(abc_export_cmd) + rt.AlembicImport.ImportToRoot = False + rt.importFile(file_path, rt.name("noPrompt")) abc_after = { - c for c in rt.rootNode.Children + c + for c in rt.rootNode.Children if rt.classOf(c) == rt.AlembicContainer } @@ -52,14 +48,49 @@ importFile @"{file_path}" #noPrompt abc_container = abc_containers.pop() - container_name = f"{name}_CON" - container = rt.container(name=container_name) - abc_container.Parent = container + return containerise( + name, [abc_container], context, loader=self.__class__.__name__ + ) - return container + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + + alembic_objects = self.get_container_children(node, "AlembicObject") + for alembic_object in alembic_objects: + alembic_object.source = path + + lib.imprint( + container["instance_node"], + {"representation": str(representation["_id"])}, + ) + + def switch(self, container, representation): + self.update(container, representation) def remove(self, container): from pymxs import runtime as rt - node = container["node"] + node = rt.getNodeByName(container["instance_node"]) rt.delete(node) + + @staticmethod + def get_container_children(parent, type_name): + from pymxs import runtime as rt + + def list_children(node): + children = [] + for c in node.Children: + children.append(c) + children += list_children(c) + return children + + filtered = [] + for child in list_children(parent): + class_type = str(rt.classOf(child.baseObject)) + if class_type == type_name: + filtered.append(child) + + return filtered diff --git a/openpype/hosts/max/plugins/load/load_pointcloud.py b/openpype/hosts/max/plugins/load/load_pointcloud.py new file mode 100644 index 0000000000..27bc88b4f3 --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_pointcloud.py @@ -0,0 +1,51 @@ +import os +from openpype.pipeline import ( + load, get_representation_path +) +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib + + +class PointCloudLoader(load.LoaderPlugin): + """Point Cloud Loader""" + + families = ["pointcloud"] + representations = ["prt"] + order = -8 + icon = "code-fork" + color = "green" + + def load(self, context, name=None, namespace=None, data=None): + """load point cloud by tyCache""" + from pymxs import runtime as rt + + filepath = os.path.normpath(self.fname) + obj = rt.tyCache() + obj.filename = filepath + + prt_container = rt.getNodeByName(f"{obj.name}") + + return containerise( + name, [prt_container], context, loader=self.__class__.__name__) + + def update(self, container, representation): + """update the container""" + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + + prt_objects = self.get_container_children(node) + for prt_object in prt_objects: + prt_object.source = path + + lib.imprint(container["instance_node"], { + "representation": str(representation["_id"]) + }) + + def remove(self, container): + """remove the container""" + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/load/load_redshift_proxy.py b/openpype/hosts/max/plugins/load/load_redshift_proxy.py new file mode 100644 index 0000000000..31692f6367 --- /dev/null +++ b/openpype/hosts/max/plugins/load/load_redshift_proxy.py @@ -0,0 +1,63 @@ +import os +import clique + +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.max.api.pipeline import containerise +from openpype.hosts.max.api import lib + + +class RedshiftProxyLoader(load.LoaderPlugin): + """Load rs files with Redshift Proxy""" + + label = "Load Redshift Proxy" + families = ["redshiftproxy"] + representations = ["rs"] + order = -9 + icon = "code-fork" + color = "white" + + def load(self, context, name=None, namespace=None, data=None): + from pymxs import runtime as rt + + filepath = self.filepath_from_context(context) + rs_proxy = rt.RedshiftProxy() + rs_proxy.file = filepath + files_in_folder = os.listdir(os.path.dirname(filepath)) + collections, remainder = clique.assemble(files_in_folder) + if collections: + rs_proxy.is_sequence = True + + container = rt.container() + container.name = name + rs_proxy.Parent = container + + asset = rt.getNodeByName(name) + + return containerise( + name, [asset], context, loader=self.__class__.__name__) + + def update(self, container, representation): + from pymxs import runtime as rt + + path = get_representation_path(representation) + node = rt.getNodeByName(container["instance_node"]) + for children in node.Children: + children_node = rt.getNodeByName(children.name) + for proxy in children_node.Children: + proxy.file = path + + lib.imprint(container["instance_node"], { + "representation": str(representation["_id"]) + }) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from pymxs import runtime as rt + + node = rt.getNodeByName(container["instance_node"]) + rt.delete(node) diff --git a/openpype/hosts/max/plugins/publish/collect_render.py b/openpype/hosts/max/plugins/publish/collect_render.py new file mode 100644 index 0000000000..db5c84fad9 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/collect_render.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +"""Collect Render""" +import os +import pyblish.api + +from pymxs import runtime as rt +from openpype.pipeline import get_current_asset_name +from openpype.hosts.max.api import colorspace +from openpype.hosts.max.api.lib import get_max_version, get_current_renderer +from openpype.hosts.max.api.lib_renderproducts import RenderProducts +from openpype.client import get_last_version_by_subset_name + + +class CollectRender(pyblish.api.InstancePlugin): + """Collect Render for Deadline""" + + order = pyblish.api.CollectorOrder + 0.01 + label = "Collect 3dsmax Render Layers" + hosts = ['max'] + families = ["maxrender"] + + def process(self, instance): + context = instance.context + folder = rt.maxFilePath + file = rt.maxFileName + current_file = os.path.join(folder, file) + filepath = current_file.replace("\\", "/") + + context.data['currentFile'] = current_file + asset = get_current_asset_name() + + files_by_aov = RenderProducts().get_beauty(instance.name) + folder = folder.replace("\\", "/") + aovs = RenderProducts().get_aovs(instance.name) + files_by_aov.update(aovs) + + if "expectedFiles" not in instance.data: + instance.data["expectedFiles"] = list() + instance.data["files"] = list() + instance.data["expectedFiles"].append(files_by_aov) + instance.data["files"].append(files_by_aov) + + img_format = RenderProducts().image_format() + project_name = context.data["projectName"] + asset_doc = context.data["assetEntity"] + asset_id = asset_doc["_id"] + version_doc = get_last_version_by_subset_name(project_name, + instance.name, + asset_id) + self.log.debug("version_doc: {0}".format(version_doc)) + version_int = 1 + if version_doc: + version_int += int(version_doc["name"]) + + self.log.debug(f"Setting {version_int} to context.") + context.data["version"] = version_int + # OCIO config not support in + # most of the 3dsmax renderers + # so this is currently hard coded + # TODO: add options for redshift/vray ocio config + instance.data["colorspaceConfig"] = "" + instance.data["colorspaceDisplay"] = "sRGB" + instance.data["colorspaceView"] = "ACES 1.0 SDR-video" + instance.data["renderProducts"] = colorspace.ARenderProduct() + instance.data["publishJobState"] = "Suspended" + instance.data["attachTo"] = [] + renderer_class = get_current_renderer() + renderer = str(renderer_class).split(":")[0] + # also need to get the render dir for conversion + data = { + "asset": asset, + "subset": str(instance.name), + "publish": True, + "maxversion": str(get_max_version()), + "imageFormat": img_format, + "family": 'maxrender', + "families": ['maxrender'], + "renderer": renderer, + "source": filepath, + "plugin": "3dsmax", + "frameStart": int(rt.rendStart), + "frameEnd": int(rt.rendEnd), + "version": version_int, + "farm": True + } + instance.data.update(data) + + # TODO: this should be unified with maya and its "multipart" flag + # on instance. + if renderer == "Redshift_Renderer": + instance.data.update( + {"separateAovFiles": rt.Execute( + "renderers.current.separateAovFiles")}) + + self.log.info("data: {0}".format(data)) diff --git a/openpype/hosts/max/plugins/publish/extract_camera_abc.py b/openpype/hosts/max/plugins/publish/extract_camera_abc.py new file mode 100644 index 0000000000..6b3bb178a3 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_camera_abc.py @@ -0,0 +1,63 @@ +import os +import pyblish.api +from openpype.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from openpype.hosts.max.api import maintained_selection, get_all_children + + +class ExtractCameraAlembic(publish.Extractor, OptionalPyblishPluginMixin): + """ + Extract Camera with AlembicExport + """ + + order = pyblish.api.ExtractorOrder - 0.1 + label = "Extract Alembic Camera" + hosts = ["max"] + families = ["camera"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + start = float(instance.data.get("frameStartHandle", 1)) + end = float(instance.data.get("frameEndHandle", 1)) + + container = instance.data["instance_node"] + + self.log.info("Extracting Camera ...") + + stagingdir = self.staging_dir(instance) + filename = "{name}.abc".format(**instance.data) + path = os.path.join(stagingdir, filename) + + # We run the render + self.log.info("Writing alembic '%s' to '%s'" % (filename, stagingdir)) + + rt.AlembicExport.ArchiveType = rt.name("ogawa") + rt.AlembicExport.CoordinateSystem = rt.name("maya") + rt.AlembicExport.StartFrame = start + rt.AlembicExport.EndFrame = end + rt.AlembicExport.CustomAttributes = True + + with maintained_selection(): + # select and export + rt.select(get_all_children(rt.getNodeByName(container))) + rt.exportFile( + path, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.AlembicExport, + ) + + self.log.info("Performing Extraction ...") + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "abc", + "ext": "abc", + "files": filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/openpype/hosts/max/plugins/publish/extract_camera_fbx.py b/openpype/hosts/max/plugins/publish/extract_camera_fbx.py new file mode 100644 index 0000000000..4b4b349e19 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_camera_fbx.py @@ -0,0 +1,60 @@ +import os +import pyblish.api +from openpype.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from openpype.hosts.max.api import maintained_selection, get_all_children + + +class ExtractCameraFbx(publish.Extractor, OptionalPyblishPluginMixin): + """ + Extract Camera with FbxExporter + """ + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Extract Fbx Camera" + hosts = ["max"] + families = ["camera"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + container = instance.data["instance_node"] + + self.log.info("Extracting Camera ...") + stagingdir = self.staging_dir(instance) + filename = "{name}.fbx".format(**instance.data) + + filepath = os.path.join(stagingdir, filename) + self.log.info("Writing fbx file '%s' to '%s'" % (filename, filepath)) + + rt.FBXExporterSetParam("Animation", True) + rt.FBXExporterSetParam("Cameras", True) + rt.FBXExporterSetParam("AxisConversionMethod", "Animation") + rt.FBXExporterSetParam("UpAxis", "Y") + rt.FBXExporterSetParam("Preserveinstances", True) + + with maintained_selection(): + # select and export + rt.select(get_all_children(rt.getNodeByName(container))) + rt.exportFile( + filepath, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.FBXEXP, + ) + + self.log.info("Performing Extraction ...") + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "fbx", + "ext": "fbx", + "files": filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info( + "Extracted instance '%s' to: %s" % (instance.name, filepath) + ) diff --git a/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py b/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py new file mode 100644 index 0000000000..f0c2aff7f3 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_max_scene_raw.py @@ -0,0 +1,50 @@ +import os +import pyblish.api +from openpype.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from openpype.hosts.max.api import get_all_children + + +class ExtractMaxSceneRaw(publish.Extractor, OptionalPyblishPluginMixin): + """ + Extract Raw Max Scene with SaveSelected + """ + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Extract Max Scene (Raw)" + hosts = ["max"] + families = ["camera", "maxScene", "model"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + container = instance.data["instance_node"] + + # publish the raw scene for camera + self.log.info("Extracting Raw Max Scene ...") + + stagingdir = self.staging_dir(instance) + filename = "{name}.max".format(**instance.data) + + max_path = os.path.join(stagingdir, filename) + self.log.info("Writing max file '%s' to '%s'" % (filename, max_path)) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + nodes = get_all_children(rt.getNodeByName(container)) + rt.saveNodes(nodes, max_path, quiet=True) + + self.log.info("Performing Extraction ...") + + representation = { + "name": "max", + "ext": "max", + "files": filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info( + "Extracted instance '%s' to: %s" % (instance.name, max_path) + ) diff --git a/openpype/hosts/max/plugins/publish/extract_model.py b/openpype/hosts/max/plugins/publish/extract_model.py new file mode 100644 index 0000000000..4c7c98e2cc --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_model.py @@ -0,0 +1,64 @@ +import os +import pyblish.api +from openpype.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from openpype.hosts.max.api import maintained_selection, get_all_children + + +class ExtractModel(publish.Extractor, OptionalPyblishPluginMixin): + """ + Extract Geometry in Alembic Format + """ + + order = pyblish.api.ExtractorOrder - 0.1 + label = "Extract Geometry (Alembic)" + hosts = ["max"] + families = ["model"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + container = instance.data["instance_node"] + + self.log.info("Extracting Geometry ...") + + stagingdir = self.staging_dir(instance) + filename = "{name}.abc".format(**instance.data) + filepath = os.path.join(stagingdir, filename) + + # We run the render + self.log.info("Writing alembic '%s' to '%s'" % (filename, stagingdir)) + + rt.AlembicExport.ArchiveType = rt.name("ogawa") + rt.AlembicExport.CoordinateSystem = rt.name("maya") + rt.AlembicExport.CustomAttributes = True + rt.AlembicExport.UVs = True + rt.AlembicExport.VertexColors = True + rt.AlembicExport.PreserveInstances = True + + with maintained_selection(): + # select and export + rt.select(get_all_children(rt.getNodeByName(container))) + rt.exportFile( + filepath, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.AlembicExport, + ) + + self.log.info("Performing Extraction ...") + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "abc", + "ext": "abc", + "files": filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info( + "Extracted instance '%s' to: %s" % (instance.name, filepath) + ) diff --git a/openpype/hosts/max/plugins/publish/extract_model_fbx.py b/openpype/hosts/max/plugins/publish/extract_model_fbx.py new file mode 100644 index 0000000000..e6ccb24cdd --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_model_fbx.py @@ -0,0 +1,63 @@ +import os +import pyblish.api +from openpype.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from openpype.hosts.max.api import maintained_selection, get_all_children + + +class ExtractModelFbx(publish.Extractor, OptionalPyblishPluginMixin): + """ + Extract Geometry in FBX Format + """ + + order = pyblish.api.ExtractorOrder - 0.05 + label = "Extract FBX" + hosts = ["max"] + families = ["model"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + container = instance.data["instance_node"] + + self.log.info("Extracting Geometry ...") + + stagingdir = self.staging_dir(instance) + filename = "{name}.fbx".format(**instance.data) + filepath = os.path.join(stagingdir, filename) + self.log.info("Writing FBX '%s' to '%s'" % (filepath, stagingdir)) + + rt.FBXExporterSetParam("Animation", False) + rt.FBXExporterSetParam("Cameras", False) + rt.FBXExporterSetParam("Lights", False) + rt.FBXExporterSetParam("PointCache", False) + rt.FBXExporterSetParam("AxisConversionMethod", "Animation") + rt.FBXExporterSetParam("UpAxis", "Y") + rt.FBXExporterSetParam("Preserveinstances", True) + + with maintained_selection(): + # select and export + rt.select(get_all_children(rt.getNodeByName(container))) + rt.exportFile( + filepath, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.FBXEXP, + ) + + self.log.info("Performing Extraction ...") + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "fbx", + "ext": "fbx", + "files": filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info( + "Extracted instance '%s' to: %s" % (instance.name, filepath) + ) diff --git a/openpype/hosts/max/plugins/publish/extract_model_obj.py b/openpype/hosts/max/plugins/publish/extract_model_obj.py new file mode 100644 index 0000000000..ed3d68c990 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_model_obj.py @@ -0,0 +1,56 @@ +import os +import pyblish.api +from openpype.pipeline import publish, OptionalPyblishPluginMixin +from pymxs import runtime as rt +from openpype.hosts.max.api import maintained_selection, get_all_children + + +class ExtractModelObj(publish.Extractor, OptionalPyblishPluginMixin): + """ + Extract Geometry in OBJ Format + """ + + order = pyblish.api.ExtractorOrder - 0.05 + label = "Extract OBJ" + hosts = ["max"] + families = ["model"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + container = instance.data["instance_node"] + + self.log.info("Extracting Geometry ...") + + stagingdir = self.staging_dir(instance) + filename = "{name}.obj".format(**instance.data) + filepath = os.path.join(stagingdir, filename) + self.log.info("Writing OBJ '%s' to '%s'" % (filepath, stagingdir)) + + with maintained_selection(): + # select and export + rt.select(get_all_children(rt.getNodeByName(container))) + rt.exportFile( + filepath, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.ObjExp, + ) + + self.log.info("Performing Extraction ...") + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "obj", + "ext": "obj", + "files": filename, + "stagingDir": stagingdir, + } + + instance.data["representations"].append(representation) + self.log.info( + "Extracted instance '%s' to: %s" % (instance.name, filepath) + ) diff --git a/openpype/hosts/max/plugins/publish/extract_model_usd.py b/openpype/hosts/max/plugins/publish/extract_model_usd.py new file mode 100644 index 0000000000..0bed2d855e --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_model_usd.py @@ -0,0 +1,114 @@ +import os +import pyblish.api +from openpype.pipeline import ( + publish, + OptionalPyblishPluginMixin +) +from pymxs import runtime as rt +from openpype.hosts.max.api import ( + maintained_selection +) + + +class ExtractModelUSD(publish.Extractor, + OptionalPyblishPluginMixin): + """ + Extract Geometry in USDA Format + """ + + order = pyblish.api.ExtractorOrder - 0.05 + label = "Extract Geometry (USD)" + hosts = ["max"] + families = ["model"] + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + + container = instance.data["instance_node"] + + self.log.info("Extracting Geometry ...") + + stagingdir = self.staging_dir(instance) + asset_filename = "{name}.usda".format(**instance.data) + asset_filepath = os.path.join(stagingdir, + asset_filename) + self.log.info("Writing USD '%s' to '%s'" % (asset_filepath, + stagingdir)) + + log_filename = "{name}.txt".format(**instance.data) + log_filepath = os.path.join(stagingdir, + log_filename) + self.log.info("Writing log '%s' to '%s'" % (log_filepath, + stagingdir)) + + # get the nodes which need to be exported + export_options = self.get_export_options(log_filepath) + with maintained_selection(): + # select and export + node_list = self.get_node_list(container) + rt.USDExporter.ExportFile(asset_filepath, + exportOptions=export_options, + contentSource=rt.name("selected"), + nodeList=node_list) + + self.log.info("Performing Extraction ...") + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'usda', + 'ext': 'usda', + 'files': asset_filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + + log_representation = { + 'name': 'txt', + 'ext': 'txt', + 'files': log_filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(log_representation) + + self.log.info("Extracted instance '%s' to: %s" % (instance.name, + asset_filepath)) + + def get_node_list(self, container): + """ + Get the target nodes which are + the children of the container + """ + node_list = [] + + container_node = rt.getNodeByName(container) + target_node = container_node.Children + rt.select(target_node) + for sel in rt.selection: + node_list.append(sel) + + return node_list + + def get_export_options(self, log_path): + """Set Export Options for USD Exporter""" + + export_options = rt.USDExporter.createOptions() + + export_options.Meshes = True + export_options.Shapes = False + export_options.Lights = False + export_options.Cameras = False + export_options.Materials = False + export_options.MeshFormat = rt.name('fromScene') + export_options.FileFormat = rt.name('ascii') + export_options.UpAxis = rt.name('y') + export_options.LogLevel = rt.name('info') + export_options.LogPath = log_path + export_options.PreserveEdgeOrientation = True + export_options.TimeMode = rt.name('current') + + rt.USDexporter.UIOptions = export_options + + return export_options diff --git a/openpype/hosts/max/plugins/publish/extract_pointcache.py b/openpype/hosts/max/plugins/publish/extract_pointcache.py index 904c1656da..8658cecb1b 100644 --- a/openpype/hosts/max/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/max/plugins/publish/extract_pointcache.py @@ -41,17 +41,14 @@ import os import pyblish.api from openpype.pipeline import publish from pymxs import runtime as rt -from openpype.hosts.max.api import ( - maintained_selection, - get_all_children -) +from openpype.hosts.max.api import maintained_selection, get_all_children class ExtractAlembic(publish.Extractor): order = pyblish.api.ExtractorOrder label = "Extract Pointcache" hosts = ["max"] - families = ["pointcache", "camera"] + families = ["pointcache"] def process(self, instance): start = float(instance.data.get("frameStartHandle", 1)) @@ -66,35 +63,30 @@ class ExtractAlembic(publish.Extractor): path = os.path.join(parent_dir, file_name) # We run the render - self.log.info("Writing alembic '%s' to '%s'" % (file_name, - parent_dir)) + self.log.info("Writing alembic '%s' to '%s'" % (file_name, parent_dir)) - abc_export_cmd = ( - f""" -AlembicExport.ArchiveType = #ogawa -AlembicExport.CoordinateSystem = #maya -AlembicExport.StartFrame = {start} -AlembicExport.EndFrame = {end} - -exportFile @"{path}" #noPrompt selectedOnly:on using:AlembicExport - - """) - - self.log.debug(f"Executing command: {abc_export_cmd}") + rt.AlembicExport.ArchiveType = rt.name("ogawa") + rt.AlembicExport.CoordinateSystem = rt.name("maya") + rt.AlembicExport.StartFrame = start + rt.AlembicExport.EndFrame = end with maintained_selection(): # select and export - rt.select(get_all_children(rt.getNodeByName(container))) - rt.execute(abc_export_cmd) + rt.exportFile( + path, + rt.name("noPrompt"), + selectedOnly=True, + using=rt.AlembicExport, + ) if "representations" not in instance.data: instance.data["representations"] = [] representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': file_name, + "name": "abc", + "ext": "abc", + "files": file_name, "stagingDir": parent_dir, } instance.data["representations"].append(representation) diff --git a/openpype/hosts/max/plugins/publish/extract_pointcloud.py b/openpype/hosts/max/plugins/publish/extract_pointcloud.py new file mode 100644 index 0000000000..e8d58ab713 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_pointcloud.py @@ -0,0 +1,207 @@ +import os +import pyblish.api +from openpype.pipeline import publish +from pymxs import runtime as rt +from openpype.hosts.max.api import ( + maintained_selection +) +from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io + + +def get_setting(project_setting=None): + project_setting = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + return (project_setting["max"]["PointCloud"]) + + +class ExtractPointCloud(publish.Extractor): + """ + Extract PRT format with tyFlow operators + + Notes: + Currently only works for the default partition setting + + Args: + export_particle(): sets up all job arguments for attributes + to be exported in MAXscript + + get_operators(): get the export_particle operator + + get_custom_attr(): get all custom channel attributes from Openpype + setting and sets it as job arguments before exporting + + get_files(): get the files with tyFlow naming convention + before publishing + + partition_output_name(): get the naming with partition settings. + get_partition(): get partition value + + """ + + order = pyblish.api.ExtractorOrder - 0.2 + label = "Extract Point Cloud" + hosts = ["max"] + families = ["pointcloud"] + + def process(self, instance): + start = int(instance.context.data.get("frameStart")) + end = int(instance.context.data.get("frameEnd")) + container = instance.data["instance_node"] + self.log.info("Extracting PRT...") + + stagingdir = self.staging_dir(instance) + filename = "{name}.prt".format(**instance.data) + path = os.path.join(stagingdir, filename) + + with maintained_selection(): + job_args = self.export_particle(container, + start, + end, + path) + for job in job_args: + rt.execute(job) + + self.log.info("Performing Extraction ...") + if "representations" not in instance.data: + instance.data["representations"] = [] + + self.log.info("Writing PRT with TyFlow Plugin...") + filenames = self.get_files(container, path, start, end) + self.log.debug("filenames: {0}".format(filenames)) + + partition = self.partition_output_name(container) + + representation = { + 'name': 'prt', + 'ext': 'prt', + 'files': filenames if len(filenames) > 1 else filenames[0], + "stagingDir": stagingdir, + "outputName": partition # partition value + } + instance.data["representations"].append(representation) + self.log.info("Extracted instance '%s' to: %s" % (instance.name, + path)) + + def export_particle(self, + container, + start, + end, + filepath): + job_args = [] + opt_list = self.get_operators(container) + for operator in opt_list: + start_frame = "{0}.frameStart={1}".format(operator, + start) + job_args.append(start_frame) + end_frame = "{0}.frameEnd={1}".format(operator, + end) + job_args.append(end_frame) + filepath = filepath.replace("\\", "/") + prt_filename = '{0}.PRTFilename="{1}"'.format(operator, + filepath) + + job_args.append(prt_filename) + # Partition + mode = "{0}.PRTPartitionsMode=2".format(operator) + job_args.append(mode) + + additional_args = self.get_custom_attr(operator) + for args in additional_args: + job_args.append(args) + + prt_export = "{0}.exportPRT()".format(operator) + job_args.append(prt_export) + + return job_args + + def get_operators(self, container): + """Get Export Particles Operator""" + + opt_list = [] + node = rt.getNodebyName(container) + selection_list = list(node.Children) + for sel in selection_list: + obj = sel.baseobject + # TODO: to see if it can be used maxscript instead + anim_names = rt.getsubanimnames(obj) + for anim_name in anim_names: + sub_anim = rt.getsubanim(obj, anim_name) + boolean = rt.isProperty(sub_anim, "Export_Particles") + event_name = sub_anim.name + if boolean: + opt = "${0}.{1}.export_particles".format(sel.name, + event_name) + opt_list.append(opt) + + return opt_list + + def get_custom_attr(self, operator): + """Get Custom Attributes""" + + custom_attr_list = [] + attr_settings = get_setting()["attribute"] + for key, value in attr_settings.items(): + custom_attr = "{0}.PRTChannels_{1}=True".format(operator, + value) + self.log.debug( + "{0} will be added as custom attribute".format(key) + ) + custom_attr_list.append(custom_attr) + + return custom_attr_list + + def get_files(self, + container, + path, + start_frame, + end_frame): + """ + Note: + Set the filenames accordingly to the tyFlow file + naming extension for the publishing purpose + + Actual File Output from tyFlow: + __partof..prt + e.g. tyFlow_cloth_CCCS_blobbyFill_001__part1of1_00004.prt + """ + filenames = [] + filename = os.path.basename(path) + orig_name, ext = os.path.splitext(filename) + partition_count, partition_start = self.get_partition(container) + for frame in range(int(start_frame), int(end_frame) + 1): + actual_name = "{}__part{:03}of{}_{:05}".format(orig_name, + partition_start, + partition_count, + frame) + actual_filename = path.replace(orig_name, actual_name) + filenames.append(os.path.basename(actual_filename)) + + return filenames + + def partition_output_name(self, container): + """ + Notes: + Partition output name set for mapping + the published file output + + todo: + Customizes the setting for the output + """ + partition_count, partition_start = self.get_partition(container) + partition = "_part{:03}of{}".format(partition_start, + partition_count) + + return partition + + def get_partition(self, container): + """ + Get Partition Value + """ + opt_list = self.get_operators(container) + for operator in opt_list: + count = rt.execute(f'{operator}.PRTPartitionsCount') + start = rt.execute(f'{operator}.PRTPartitionsFrom') + + return count, start diff --git a/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py new file mode 100644 index 0000000000..3b44099609 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/extract_redshift_proxy.py @@ -0,0 +1,62 @@ +import os +import pyblish.api +from openpype.pipeline import publish +from pymxs import runtime as rt +from openpype.hosts.max.api import maintained_selection + + +class ExtractRedshiftProxy(publish.Extractor): + """ + Extract Redshift Proxy with rsProxy + """ + + order = pyblish.api.ExtractorOrder - 0.1 + label = "Extract RedShift Proxy" + hosts = ["max"] + families = ["redshiftproxy"] + + def process(self, instance): + container = instance.data["instance_node"] + start = int(instance.context.data.get("frameStart")) + end = int(instance.context.data.get("frameEnd")) + + self.log.info("Extracting Redshift Proxy...") + stagingdir = self.staging_dir(instance) + rs_filename = "{name}.rs".format(**instance.data) + rs_filepath = os.path.join(stagingdir, rs_filename) + rs_filepath = rs_filepath.replace("\\", "/") + + rs_filenames = self.get_rsfiles(instance, start, end) + + with maintained_selection(): + # select and export + con = rt.getNodeByName(container) + rt.select(con.Children) + # Redshift rsProxy command + # rsProxy fp selected compress connectivity startFrame endFrame + # camera warnExisting transformPivotToOrigin + rt.rsProxy(rs_filepath, 1, 0, 0, start, end, 0, 1, 1) + + self.log.info("Performing Extraction ...") + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'rs', + 'ext': 'rs', + 'files': rs_filenames if len(rs_filenames) > 1 else rs_filenames[0], # noqa + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + self.log.info("Extracted instance '%s' to: %s" % (instance.name, + stagingdir)) + + def get_rsfiles(self, instance, startFrame, endFrame): + rs_filenames = [] + rs_name = instance.data["name"] + for frame in range(startFrame, endFrame + 1): + rs_filename = "%s.%04d.rs" % (rs_name, frame) + rs_filenames.append(rs_filename) + + return rs_filenames diff --git a/openpype/hosts/max/plugins/publish/increment_workfile_version.py b/openpype/hosts/max/plugins/publish/increment_workfile_version.py new file mode 100644 index 0000000000..3dec214f77 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/increment_workfile_version.py @@ -0,0 +1,19 @@ +import pyblish.api +from openpype.lib import version_up +from pymxs import runtime as rt + + +class IncrementWorkfileVersion(pyblish.api.ContextPlugin): + """Increment current workfile version.""" + + order = pyblish.api.IntegratorOrder + 0.9 + label = "Increment Workfile Version" + hosts = ["max"] + families = ["workfile"] + + def process(self, context): + path = context.data["currentFile"] + filepath = version_up(path) + + rt.saveMaxFile(filepath) + self.log.info("Incrementing file version") diff --git a/openpype/hosts/max/plugins/publish/save_scene.py b/openpype/hosts/max/plugins/publish/save_scene.py new file mode 100644 index 0000000000..a40788ab41 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/save_scene.py @@ -0,0 +1,21 @@ +import pyblish.api +import os + + +class SaveCurrentScene(pyblish.api.ContextPlugin): + """Save current scene + + """ + + label = "Save current file" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["max"] + families = ["maxrender", "workfile"] + + def process(self, context): + from pymxs import runtime as rt + folder = rt.maxFilePath + file = rt.maxFileName + current = os.path.join(folder, file) + assert context.data["currentFile"] == current + rt.saveMaxFile(current) diff --git a/openpype/hosts/max/plugins/publish/validate_camera_contents.py b/openpype/hosts/max/plugins/publish/validate_camera_contents.py new file mode 100644 index 0000000000..c81e28a61f --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_camera_contents.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from openpype.pipeline import PublishValidationError +from pymxs import runtime as rt + + +class ValidateCameraContent(pyblish.api.InstancePlugin): + """Validates Camera instance contents. + + A Camera instance may only hold a SINGLE camera's transform + """ + + order = pyblish.api.ValidatorOrder + families = ["camera"] + hosts = ["max"] + label = "Camera Contents" + camera_type = ["$Free_Camera", "$Target_Camera", + "$Physical_Camera", "$Target"] + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise PublishValidationError("Camera instance must only include" + "camera (and camera target)") + + def get_invalid(self, instance): + """ + Get invalid nodes if the instance is not camera + """ + invalid = list() + container = instance.data["instance_node"] + self.log.info("Validating look content for " + "{}".format(container)) + + con = rt.getNodeByName(container) + selection_list = list(con.Children) + for sel in selection_list: + # to avoid Attribute Error from pymxs wrapper + sel_tmp = str(sel) + found = False + for cam in self.camera_type: + if sel_tmp.startswith(cam): + found = True + break + if not found: + self.log.error("Camera not found") + invalid.append(sel) + return invalid diff --git a/openpype/hosts/max/plugins/publish/validate_deadline_publish.py b/openpype/hosts/max/plugins/publish/validate_deadline_publish.py new file mode 100644 index 0000000000..b2f0e863f4 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_deadline_publish.py @@ -0,0 +1,43 @@ +import os +import pyblish.api +from pymxs import runtime as rt +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin +) +from openpype.hosts.max.api.lib_rendersettings import RenderSettings + + +class ValidateDeadlinePublish(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validates Render File Directory is + not the same in every submission + """ + + order = ValidateContentsOrder + families = ["maxrender"] + hosts = ["max"] + label = "Render Output for Deadline" + optional = True + actions = [RepairAction] + + def process(self, instance): + if not self.is_active(instance.data): + return + file = rt.maxFileName + filename, ext = os.path.splitext(file) + if filename not in rt.rendOutputFilename: + raise PublishValidationError( + "Render output folder " + "doesn't match the max scene name! " + "Use Repair action to " + "fix the folder file path.." + ) + + @classmethod + def repair(cls, instance): + container = instance.data.get("instance_node") + RenderSettings().render_output(container) + cls.log.debug("Reset the render output folder...") diff --git a/openpype/hosts/max/plugins/publish/validate_frame_range.py b/openpype/hosts/max/plugins/publish/validate_frame_range.py new file mode 100644 index 0000000000..21e847405e --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_frame_range.py @@ -0,0 +1,64 @@ +import pyblish.api + +from pymxs import runtime as rt +from openpype.pipeline import ( + OptionalPyblishPluginMixin +) +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder, + PublishValidationError +) + + +class ValidateFrameRange(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validates the frame ranges. + + This is an optional validator checking if the frame range on instance + matches the frame range specified for the asset. + + It also validates render frame ranges of render layers. + + Repair action will change everything to match the asset frame range. + + This can be turned off by the artist to allow custom ranges. + """ + + label = "Validate Frame Range" + order = ValidateContentsOrder + families = ["maxrender"] + hosts = ["max"] + optional = True + actions = [RepairAction] + + def process(self, instance): + if not self.is_active(instance.data): + self.log.info("Skipping validation...") + return + context = instance.context + + frame_start = int(context.data.get("frameStart")) + frame_end = int(context.data.get("frameEnd")) + + inst_frame_start = int(instance.data.get("frameStart")) + inst_frame_end = int(instance.data.get("frameEnd")) + + errors = [] + if frame_start != inst_frame_start: + errors.append( + f"Start frame ({inst_frame_start}) on instance does not match " # noqa + f"with the start frame ({frame_start}) set on the asset data. ") # noqa + if frame_end != inst_frame_end: + errors.append( + f"End frame ({inst_frame_end}) on instance does not match " + f"with the end frame ({frame_start}) from the asset data. ") + + if errors: + errors.append("You can use repair action to fix it.") + raise PublishValidationError("\n".join(errors)) + + @classmethod + def repair(cls, instance): + rt.rendStart = instance.context.data.get("frameStart") + rt.rendEnd = instance.context.data.get("frameEnd") diff --git a/openpype/hosts/max/plugins/publish/validate_model_contents.py b/openpype/hosts/max/plugins/publish/validate_model_contents.py new file mode 100644 index 0000000000..dd782674ff --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_model_contents.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from openpype.pipeline import PublishValidationError +from pymxs import runtime as rt + + +class ValidateModelContent(pyblish.api.InstancePlugin): + """Validates Model instance contents. + + A model instance may only hold either geometry-related + object(excluding Shapes) or editable meshes. + """ + + order = pyblish.api.ValidatorOrder + families = ["model"] + hosts = ["max"] + label = "Model Contents" + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise PublishValidationError("Model instance must only include" + "Geometry and Editable Mesh") + + def get_invalid(self, instance): + """ + Get invalid nodes if the instance is not camera + """ + invalid = list() + container = instance.data["instance_node"] + self.log.info("Validating look content for " + "{}".format(container)) + + con = rt.getNodeByName(container) + selection_list = list(con.Children) or rt.getCurrentSelection() + for sel in selection_list: + if rt.classOf(sel) in rt.Camera.classes: + invalid.append(sel) + if rt.classOf(sel) in rt.Light.classes: + invalid.append(sel) + if rt.classOf(sel) in rt.Shape.classes: + invalid.append(sel) + + return invalid diff --git a/openpype/hosts/max/plugins/publish/validate_no_max_content.py b/openpype/hosts/max/plugins/publish/validate_no_max_content.py new file mode 100644 index 0000000000..c20a1968ed --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_no_max_content.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from openpype.pipeline import PublishValidationError +from pymxs import runtime as rt + + +class ValidateMaxContents(pyblish.api.InstancePlugin): + """Validates Max contents. + + Check if MaxScene container includes any contents underneath. + """ + + order = pyblish.api.ValidatorOrder + families = ["camera", + "maxScene", + "maxrender"] + hosts = ["max"] + label = "Max Scene Contents" + + def process(self, instance): + container = rt.getNodeByName(instance.data["instance_node"]) + if not list(container.Children): + raise PublishValidationError("No content found in the container") diff --git a/openpype/hosts/max/plugins/publish/validate_pointcloud.py b/openpype/hosts/max/plugins/publish/validate_pointcloud.py new file mode 100644 index 0000000000..f654058648 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_pointcloud.py @@ -0,0 +1,191 @@ +import pyblish.api +from openpype.pipeline import PublishValidationError +from pymxs import runtime as rt +from openpype.settings import get_project_settings +from openpype.pipeline import legacy_io + + +def get_setting(project_setting=None): + project_setting = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + return (project_setting["max"]["PointCloud"]) + + +class ValidatePointCloud(pyblish.api.InstancePlugin): + """Validate that workfile was saved.""" + + order = pyblish.api.ValidatorOrder + families = ["pointcloud"] + hosts = ["max"] + label = "Validate Point Cloud" + + def process(self, instance): + """ + Notes: + + 1. Validate the container only include tyFlow objects + 2. Validate if tyFlow operator Export Particle exists + 3. Validate if the export mode of Export Particle is at PRT format + 4. Validate the partition count and range set as default value + Partition Count : 100 + Partition Range : 1 to 1 + 5. Validate if the custom attribute(s) exist as parameter(s) + of export_particle operator + + """ + invalid = self.get_tyFlow_object(instance) + if invalid: + raise PublishValidationError("Non tyFlow object " + "found: {}".format(invalid)) + invalid = self.get_tyFlow_operator(instance) + if invalid: + raise PublishValidationError("tyFlow ExportParticle operator " + "not found: {}".format(invalid)) + + invalid = self.validate_export_mode(instance) + if invalid: + raise PublishValidationError("The export mode is not at PRT") + + invalid = self.validate_partition_value(instance) + if invalid: + raise PublishValidationError("tyFlow Partition setting is " + "not at the default value") + invalid = self.validate_custom_attribute(instance) + if invalid: + raise PublishValidationError("Custom Attribute not found " + ":{}".format(invalid)) + + def get_tyFlow_object(self, instance): + invalid = [] + container = instance.data["instance_node"] + self.log.info("Validating tyFlow container " + "for {}".format(container)) + + con = rt.getNodeByName(container) + selection_list = list(con.Children) + for sel in selection_list: + sel_tmp = str(sel) + if rt.classOf(sel) in [rt.tyFlow, + rt.Editable_Mesh]: + if "tyFlow" not in sel_tmp: + invalid.append(sel) + else: + invalid.append(sel) + + return invalid + + def get_tyFlow_operator(self, instance): + invalid = [] + container = instance.data["instance_node"] + self.log.info("Validating tyFlow object " + "for {}".format(container)) + + con = rt.getNodeByName(container) + selection_list = list(con.Children) + bool_list = [] + for sel in selection_list: + obj = sel.baseobject + anim_names = rt.getsubanimnames(obj) + for anim_name in anim_names: + # get all the names of the related tyFlow nodes + sub_anim = rt.getsubanim(obj, anim_name) + # check if there is export particle operator + boolean = rt.isProperty(sub_anim, "Export_Particles") + bool_list.append(str(boolean)) + # if the export_particles property is not there + # it means there is not a "Export Particle" operator + if "True" not in bool_list: + self.log.error("Operator 'Export Particles' not found!") + invalid.append(sel) + + return invalid + + def validate_custom_attribute(self, instance): + invalid = [] + container = instance.data["instance_node"] + self.log.info("Validating tyFlow custom " + "attributes for {}".format(container)) + + con = rt.getNodeByName(container) + selection_list = list(con.Children) + for sel in selection_list: + obj = sel.baseobject + anim_names = rt.getsubanimnames(obj) + for anim_name in anim_names: + # get all the names of the related tyFlow nodes + sub_anim = rt.getsubanim(obj, anim_name) + # check if there is export particle operator + boolean = rt.isProperty(sub_anim, "Export_Particles") + event_name = sub_anim.name + if boolean: + opt = "${0}.{1}.export_particles".format(sel.name, + event_name) + attributes = get_setting()["attribute"] + for key, value in attributes.items(): + custom_attr = "{0}.PRTChannels_{1}".format(opt, + value) + try: + rt.execute(custom_attr) + except RuntimeError: + invalid.add(key) + + return invalid + + def validate_partition_value(self, instance): + invalid = [] + container = instance.data["instance_node"] + self.log.info("Validating tyFlow partition " + "value for {}".format(container)) + + con = rt.getNodeByName(container) + selection_list = list(con.Children) + for sel in selection_list: + obj = sel.baseobject + anim_names = rt.getsubanimnames(obj) + for anim_name in anim_names: + # get all the names of the related tyFlow nodes + sub_anim = rt.getsubanim(obj, anim_name) + # check if there is export particle operator + boolean = rt.isProperty(sub_anim, "Export_Particles") + event_name = sub_anim.name + if boolean: + opt = "${0}.{1}.export_particles".format(sel.name, + event_name) + count = rt.execute(f'{opt}.PRTPartitionsCount') + if count != 100: + invalid.append(count) + start = rt.execute(f'{opt}.PRTPartitionsFrom') + if start != 1: + invalid.append(start) + end = rt.execute(f'{opt}.PRTPartitionsTo') + if end != 1: + invalid.append(end) + + return invalid + + def validate_export_mode(self, instance): + invalid = [] + container = instance.data["instance_node"] + self.log.info("Validating tyFlow export " + "mode for {}".format(container)) + + con = rt.getNodeByName(container) + selection_list = list(con.Children) + for sel in selection_list: + obj = sel.baseobject + anim_names = rt.getsubanimnames(obj) + for anim_name in anim_names: + # get all the names of the related tyFlow nodes + sub_anim = rt.getsubanim(obj, anim_name) + # check if there is export particle operator + boolean = rt.isProperty(sub_anim, "Export_Particles") + event_name = sub_anim.name + if boolean: + opt = "${0}.{1}.export_particles".format(sel.name, + event_name) + export_mode = rt.execute(f'{opt}.exportMode') + if export_mode != 1: + invalid.append(export_mode) + + return invalid diff --git a/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py b/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py new file mode 100644 index 0000000000..bc82f82f3b --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_renderer_redshift_proxy.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from openpype.pipeline import PublishValidationError +from pymxs import runtime as rt +from openpype.pipeline.publish import RepairAction +from openpype.hosts.max.api.lib import get_current_renderer + + +class ValidateRendererRedshiftProxy(pyblish.api.InstancePlugin): + """ + Validates Redshift as the current renderer for creating + Redshift Proxy + """ + + order = pyblish.api.ValidatorOrder + families = ["redshiftproxy"] + hosts = ["max"] + label = "Redshift Renderer" + actions = [RepairAction] + + def process(self, instance): + invalid = self.get_redshift_renderer(instance) + if invalid: + raise PublishValidationError("Please install Redshift for 3dsMax" + " before using the Redshift proxy instance") # noqa + invalid = self.get_current_renderer(instance) + if invalid: + raise PublishValidationError("The Redshift proxy extraction" + "discontinued since the current renderer is not Redshift") # noqa + + def get_redshift_renderer(self, instance): + invalid = list() + max_renderers_list = str(rt.RendererClass.classes) + if "Redshift_Renderer" not in max_renderers_list: + invalid.append(max_renderers_list) + + return invalid + + def get_current_renderer(self, instance): + invalid = list() + renderer_class = get_current_renderer() + current_renderer = str(renderer_class).split(":")[0] + if current_renderer != "Redshift_Renderer": + invalid.append(current_renderer) + + return invalid + + @classmethod + def repair(cls, instance): + for Renderer in rt.RendererClass.classes: + renderer = Renderer() + if "Redshift_Renderer" in str(renderer): + rt.renderers.production = renderer + break diff --git a/openpype/hosts/max/plugins/publish/validate_resolution_setting.py b/openpype/hosts/max/plugins/publish/validate_resolution_setting.py new file mode 100644 index 0000000000..5fcb843b20 --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_resolution_setting.py @@ -0,0 +1,65 @@ +import pyblish.api +from openpype.pipeline import ( + PublishValidationError, + OptionalPyblishPluginMixin +) +from pymxs import runtime as rt +from openpype.hosts.max.api.lib import reset_scene_resolution + +from openpype.pipeline.context_tools import ( + get_current_project_asset, + get_current_project +) + + +class ValidateResolutionSetting(pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin): + """Validate the resolution setting aligned with DB""" + + order = pyblish.api.ValidatorOrder - 0.01 + families = ["maxrender"] + hosts = ["max"] + label = "Validate Resolution Setting" + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + width, height = self.get_db_resolution(instance) + current_width = rt.renderwidth + current_height = rt.renderHeight + if current_width != width and current_height != height: + raise PublishValidationError("Resolution Setting " + "not matching resolution " + "set on asset or shot.") + if current_width != width: + raise PublishValidationError("Width in Resolution Setting " + "not matching resolution set " + "on asset or shot.") + + if current_height != height: + raise PublishValidationError("Height in Resolution Setting " + "not matching resolution set " + "on asset or shot.") + + def get_db_resolution(self, instance): + data = ["data.resolutionWidth", "data.resolutionHeight"] + project_resolution = get_current_project(fields=data) + project_resolution_data = project_resolution["data"] + asset_resolution = get_current_project_asset(fields=data) + asset_resolution_data = asset_resolution["data"] + # Set project resolution + project_width = int( + project_resolution_data.get("resolutionWidth", 1920)) + project_height = int( + project_resolution_data.get("resolutionHeight", 1080)) + width = int( + asset_resolution_data.get("resolutionWidth", project_width)) + height = int( + asset_resolution_data.get("resolutionHeight", project_height)) + + return width, height + + @classmethod + def repair(cls, instance): + reset_scene_resolution() diff --git a/openpype/hosts/max/plugins/publish/validate_usd_plugin.py b/openpype/hosts/max/plugins/publish/validate_usd_plugin.py new file mode 100644 index 0000000000..747147020a --- /dev/null +++ b/openpype/hosts/max/plugins/publish/validate_usd_plugin.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from openpype.pipeline import PublishValidationError +from pymxs import runtime as rt + + +class ValidateUSDPlugin(pyblish.api.InstancePlugin): + """Validates if USD plugin is installed or loaded in Max + """ + + order = pyblish.api.ValidatorOrder - 0.01 + families = ["model"] + hosts = ["max"] + label = "USD Plugin" + + def process(self, instance): + plugin_mgr = rt.pluginManager + plugin_count = plugin_mgr.pluginDllCount + plugin_info = self.get_plugins(plugin_mgr, + plugin_count) + usd_import = "usdimport.dli" + if usd_import not in plugin_info: + raise PublishValidationError("USD Plugin {}" + " not found".format(usd_import)) + usd_export = "usdexport.dle" + if usd_export not in plugin_info: + raise PublishValidationError("USD Plugin {}" + " not found".format(usd_export)) + + def get_plugins(self, manager, count): + plugin_info_list = list() + for p in range(1, count + 1): + plugin_info = manager.pluginDllName(p) + plugin_info_list.append(plugin_info) + + return plugin_info_list diff --git a/openpype/hosts/max/startup/startup.ms b/openpype/hosts/max/startup/startup.ms index aee40eb6bc..b80ead4b74 100644 --- a/openpype/hosts/max/startup/startup.ms +++ b/openpype/hosts/max/startup/startup.ms @@ -2,8 +2,11 @@ ( local sysPath = dotNetClass "System.IO.Path" local sysDir = dotNetClass "System.IO.Directory" - local localScript = getThisScriptFilename() + local localScript = getThisScriptFilename() local startup = sysPath.Combine (sysPath.GetDirectoryName localScript) "startup.py" + local pythonpath = systemTools.getEnvVariable "MAX_PYTHONPATH" + systemTools.setEnvVariable "PYTHONPATH" pythonpath + python.ExecuteFile startup ) \ No newline at end of file diff --git a/openpype/hosts/max/startup/startup.py b/openpype/hosts/max/startup/startup.py index 37bcef5db1..0d3135a16f 100644 --- a/openpype/hosts/max/startup/startup.py +++ b/openpype/hosts/max/startup/startup.py @@ -1,4 +1,13 @@ # -*- coding: utf-8 -*- +import os +import sys + +# this might happen in some 3dsmax version where PYTHONPATH isn't added +# to sys.path automatically +for path in os.environ["PYTHONPATH"].split(os.pathsep): + if path and path not in sys.path: + sys.path.append(path) + from openpype.hosts.max.api import MaxHost from openpype.pipeline import install_host diff --git a/openpype/hosts/maya/api/__init__.py b/openpype/hosts/maya/api/__init__.py index a6c5f50e1a..0948282f57 100644 --- a/openpype/hosts/maya/api/__init__.py +++ b/openpype/hosts/maya/api/__init__.py @@ -26,7 +26,6 @@ from .workio import ( ) from .lib import ( - export_alembic, lsattr, lsattrs, read, @@ -58,7 +57,6 @@ __all__ = [ "work_root", # Utility functions - "export_alembic", "lsattr", "lsattrs", "read", diff --git a/openpype/hosts/maya/api/commands.py b/openpype/hosts/maya/api/commands.py index 4a36406632..3e31875fd8 100644 --- a/openpype/hosts/maya/api/commands.py +++ b/openpype/hosts/maya/api/commands.py @@ -57,68 +57,6 @@ def edit_shader_definitions(): window.show() -def reset_frame_range(): - """Set frame range to current asset""" - # Set FPS first - fps = {15: 'game', - 24: 'film', - 25: 'pal', - 30: 'ntsc', - 48: 'show', - 50: 'palf', - 60: 'ntscf', - 23.98: '23.976fps', - 23.976: '23.976fps', - 29.97: '29.97fps', - 47.952: '47.952fps', - 47.95: '47.952fps', - 59.94: '59.94fps', - 44100: '44100fps', - 48000: '48000fps' - }.get(float(legacy_io.Session.get("AVALON_FPS", 25)), "pal") - - cmds.currentUnit(time=fps) - - # Set frame start/end - project_name = legacy_io.active_project() - asset_name = legacy_io.Session["AVALON_ASSET"] - asset = get_asset_by_name(project_name, asset_name) - - frame_start = asset["data"].get("frameStart") - frame_end = asset["data"].get("frameEnd") - # Backwards compatibility - if frame_start is None or frame_end is None: - frame_start = asset["data"].get("edit_in") - frame_end = asset["data"].get("edit_out") - - if frame_start is None or frame_end is None: - cmds.warning("No edit information found for %s" % asset_name) - return - - handles = asset["data"].get("handles") or 0 - handle_start = asset["data"].get("handleStart") - if handle_start is None: - handle_start = handles - - handle_end = asset["data"].get("handleEnd") - if handle_end is None: - handle_end = handles - - frame_start -= int(handle_start) - frame_end += int(handle_end) - - cmds.playbackOptions(minTime=frame_start) - cmds.playbackOptions(maxTime=frame_end) - cmds.playbackOptions(animationStartTime=frame_start) - cmds.playbackOptions(animationEndTime=frame_end) - cmds.playbackOptions(minTime=frame_start) - cmds.playbackOptions(maxTime=frame_end) - cmds.currentTime(frame_start) - - cmds.setAttr("defaultRenderGlobals.startFrame", frame_start) - cmds.setAttr("defaultRenderGlobals.endFrame", frame_end) - - def _resolution_from_document(doc): if not doc or "data" not in doc: print("Entered document is not valid. \"{}\"".format(str(doc))) @@ -131,7 +69,7 @@ def _resolution_from_document(doc): resolution_width = doc["data"].get("resolution_width") resolution_height = doc["data"].get("resolution_height") - # Make sure both width and heigh are set + # Make sure both width and height are set if resolution_width is None or resolution_height is None: cmds.warning( "No resolution information found for \"{}\"".format(doc["name"]) diff --git a/openpype/hosts/maya/api/customize.py b/openpype/hosts/maya/api/customize.py index f66858dfb6..f4c4d6ed88 100644 --- a/openpype/hosts/maya/api/customize.py +++ b/openpype/hosts/maya/api/customize.py @@ -11,6 +11,7 @@ import maya.mel as mel from openpype import resources from openpype.tools.utils import host_tools from .lib import get_main_window +from ..tools import show_look_assigner log = logging.getLogger(__name__) @@ -112,7 +113,7 @@ def override_toolbox_ui(): annotation="Look Manager", label="Look Manager", image=os.path.join(icons, "lookmanager.png"), - command=host_tools.show_look_assigner, + command=show_look_assigner, width=icon_size, height=icon_size, parent=parent diff --git a/openpype/hosts/maya/api/lib.py b/openpype/hosts/maya/api/lib.py index dd5da275e8..b02d3c9b39 100644 --- a/openpype/hosts/maya/api/lib.py +++ b/openpype/hosts/maya/api/lib.py @@ -1,10 +1,11 @@ """Standalone helper functions""" import os +from pprint import pformat import sys import platform import uuid -import math +import re import json import logging @@ -14,7 +15,7 @@ from math import ceil from six import string_types from maya import cmds, mel -import maya.api.OpenMaya as om +from maya.api import OpenMaya from openpype.client import ( get_project, @@ -32,8 +33,17 @@ from openpype.pipeline import ( load_container, registered_host, ) -from openpype.pipeline.context_tools import get_current_project_asset -from .commands import reset_frame_range +from openpype.pipeline.create import ( + legacy_create, + get_legacy_creator_by_name, +) +from openpype.pipeline.context_tools import ( + get_current_asset_name, + get_current_project_asset, + get_current_project_name, + get_current_task_name +) +from openpype.lib.profiles_filtering import filter_profiles self = sys.modules[__name__] @@ -113,6 +123,18 @@ FLOAT_FPS = {23.98, 23.976, 29.97, 47.952, 59.94} RENDERLIKE_INSTANCE_FAMILIES = ["rendering", "vrayscene"] +DISPLAY_LIGHTS_VALUES = [ + "project_settings", "default", "all", "selected", "flat", "none" +] +DISPLAY_LIGHTS_LABELS = [ + "Use Project Settings", + "Default Lighting", + "All Lights", + "Selected Lights", + "Flat Lighting", + "No Lights" +] + def get_main_window(): """Acquire Maya's main window""" @@ -169,6 +191,44 @@ def maintained_selection(): cmds.select(clear=True) +def get_custom_namespace(custom_namespace): + """Return unique namespace. + + The input namespace can contain a single group + of '#' number tokens to indicate where the namespace's + unique index should go. The amount of tokens defines + the zero padding of the number, e.g ### turns into 001. + + Warning: Note that a namespace will always be + prefixed with a _ if it starts with a digit + + Example: + >>> get_custom_namespace("myspace_##_") + # myspace_01_ + >>> get_custom_namespace("##_myspace") + # _01_myspace + >>> get_custom_namespace("myspace##") + # myspace01 + + """ + split = re.split("([#]+)", custom_namespace, 1) + + if len(split) == 3: + base, padding, suffix = split + padding = "%0{}d".format(len(padding)) + else: + base = split[0] + padding = "%02d" # default padding + suffix = "" + + return unique_namespace( + base, + format=padding, + prefix="_" if not base or base[0].isdigit() else "", + suffix=suffix + ) + + def unique_namespace(namespace, format="%02d", prefix="", suffix=""): """Return unique namespace @@ -254,11 +314,6 @@ def read(node): return data -def _get_mel_global(name): - """Return the value of a mel global variable""" - return mel.eval("$%s = $%s;" % (name, name)) - - def matrix_equals(a, b, tolerance=1e-10): """ Compares two matrices with an imperfection tolerance @@ -289,73 +344,6 @@ def pairwise(iterable): return zip(a, a) -def export_alembic(nodes, - file, - frame_range=None, - write_uv=True, - write_visibility=True, - attribute_prefix=None): - """Wrap native MEL command with limited set of arguments - - Arguments: - nodes (list): Long names of nodes to cache - - file (str): Absolute path to output destination - - frame_range (tuple, optional): Start- and end-frame of cache, - default to current animation range. - - write_uv (bool, optional): Whether or not to include UVs, - default to True - - write_visibility (bool, optional): Turn on to store the visibility - state of objects in the Alembic file. Otherwise, all objects are - considered visible, default to True - - attribute_prefix (str, optional): Include all user-defined - attributes with this prefix. - - """ - - if frame_range is None: - frame_range = ( - cmds.playbackOptions(query=True, ast=True), - cmds.playbackOptions(query=True, aet=True) - ) - - options = [ - ("file", file), - ("frameRange", "%s %s" % frame_range), - ] + [("root", mesh) for mesh in nodes] - - if isinstance(attribute_prefix, string_types): - # Include all attributes prefixed with "mb" - # TODO(marcus): This would be a good candidate for - # external registration, so that the developer - # doesn't have to edit this function to modify - # the behavior of Alembic export. - options.append(("attrPrefix", str(attribute_prefix))) - - if write_uv: - options.append(("uvWrite", "")) - - if write_visibility: - options.append(("writeVisibility", "")) - - # Generate MEL command - mel_args = list() - for key, value in options: - mel_args.append("-{0} {1}".format(key, value)) - - mel_args_string = " ".join(mel_args) - mel_cmd = "AbcExport -j \"{0}\"".format(mel_args_string) - - # For debuggability, put the string passed to MEL in the Script editor. - print("mel.eval('%s')" % mel_cmd) - - return mel.eval(mel_cmd) - - def collect_animation_data(fps=False): """Get the basic animation data @@ -365,15 +353,22 @@ def collect_animation_data(fps=False): """ # get scene values as defaults - start = cmds.playbackOptions(query=True, animationStartTime=True) - end = cmds.playbackOptions(query=True, animationEndTime=True) + frame_start = cmds.playbackOptions(query=True, minTime=True) + frame_end = cmds.playbackOptions(query=True, maxTime=True) + frame_start_handle = cmds.playbackOptions( + query=True, animationStartTime=True + ) + frame_end_handle = cmds.playbackOptions(query=True, animationEndTime=True) + + handle_start = frame_start - frame_start_handle + handle_end = frame_end_handle - frame_end # build attributes data = OrderedDict() - data["frameStart"] = start - data["frameEnd"] = end - data["handleStart"] = 0 - data["handleEnd"] = 0 + data["frameStart"] = frame_start + data["frameEnd"] = frame_end + data["handleStart"] = handle_start + data["handleEnd"] = handle_end data["step"] = 1.0 if fps: @@ -475,9 +470,9 @@ def lsattrs(attrs): """ - dep_fn = om.MFnDependencyNode() - dag_fn = om.MFnDagNode() - selection_list = om.MSelectionList() + dep_fn = OpenMaya.MFnDependencyNode() + dag_fn = OpenMaya.MFnDagNode() + selection_list = OpenMaya.MSelectionList() first_attr = next(iter(attrs)) @@ -491,7 +486,7 @@ def lsattrs(attrs): matches = set() for i in range(selection_list.length()): node = selection_list.getDependNode(i) - if node.hasFn(om.MFn.kDagNode): + if node.hasFn(OpenMaya.MFn.kDagNode): fn_node = dag_fn.setObject(node) full_path_names = [path.fullPathName() for path in fn_node.getAllPaths()] @@ -691,15 +686,15 @@ class delete_after(object): cmds.delete(self._nodes) +def get_current_renderlayer(): + return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) + + def get_renderer(layer): with renderlayer(layer): return cmds.getAttr("defaultRenderGlobals.currentRenderer") -def get_current_renderlayer(): - return cmds.editRenderLayerGlobals(query=True, currentRenderLayer=True) - - @contextlib.contextmanager def no_undo(flush=False): """Disable the undo queue during the context @@ -940,11 +935,11 @@ def maintained_selection_api(): Warning: This is *not* added to the undo stack. """ - original = om.MGlobal.getActiveSelectionList() + original = OpenMaya.MGlobal.getActiveSelectionList() try: yield finally: - om.MGlobal.setActiveSelectionList(original) + OpenMaya.MGlobal.setActiveSelectionList(original) @contextlib.contextmanager @@ -1354,11 +1349,11 @@ def get_id(node): if node is None: return - sel = om.MSelectionList() + sel = OpenMaya.MSelectionList() sel.add(node) api_node = sel.getDependNode(0) - fn = om.MFnDependencyNode(api_node) + fn = OpenMaya.MFnDependencyNode(api_node) if not fn.hasAttribute("cbId"): return @@ -1440,25 +1435,69 @@ def set_id(node, unique_id, overwrite=False): cmds.setAttr(attr, unique_id, type="string") -# endregion ID -def get_reference_node(path): - """ - Get the reference node when the path is found being used in a reference +def get_attribute(plug, + asString=False, + expandEnvironmentVariables=False, + **kwargs): + """Maya getAttr with some fixes based on `pymel.core.general.getAttr()`. + + Like Pymel getAttr this applies some changes to `maya.cmds.getAttr` + - maya pointlessly returned vector results as a tuple wrapped in a list + (ex. '[(1,2,3)]'). This command unpacks the vector for you. + - when getting a multi-attr, maya would raise an error, but this will + return a list of values for the multi-attr + - added support for getting message attributes by returning the + connections instead + + Note that the asString + expandEnvironmentVariables argument naming + convention matches the `maya.cmds.getAttr` arguments so that it can + act as a direct replacement for it. + Args: - path (str): the file path to check + plug (str): Node's attribute plug as `node.attribute` + asString (bool): Return string value for enum attributes instead + of the index. Note that the return value can be dependent on the + UI language Maya is running in. + expandEnvironmentVariables (bool): Expand any environment variable and + (tilde characters on UNIX) found in string attributes which are + returned. + + Kwargs: + Supports the keyword arguments of `maya.cmds.getAttr` Returns: - node (str): name of the reference node in question - """ - try: - node = cmds.file(path, query=True, referenceNode=True) - except RuntimeError: - log.debug('File is not referenced : "{}"'.format(path)) - return + object: The value of the maya attribute. - reference_path = cmds.referenceQuery(path, filename=True) - if os.path.normpath(path) == os.path.normpath(reference_path): - return node + """ + attr_type = cmds.getAttr(plug, type=True) + if asString: + kwargs["asString"] = True + if expandEnvironmentVariables: + kwargs["expandEnvironmentVariables"] = True + try: + res = cmds.getAttr(plug, **kwargs) + except RuntimeError: + if attr_type == "message": + return cmds.listConnections(plug) + + node, attr = plug.split(".", 1) + children = cmds.attributeQuery(attr, node=node, listChildren=True) + if children: + return [ + get_attribute("{}.{}".format(node, child)) + for child in children + ] + + raise + + # Convert vector result wrapped in tuple + if isinstance(res, list) and len(res): + if isinstance(res[0], tuple) and len(res): + if attr_type in {'pointArray', 'vectorArray'}: + return res + return res[0] + + return res def set_attribute(attribute, value, node): @@ -1975,6 +2014,12 @@ def remove_other_uv_sets(mesh): cmds.removeMultiInstance(attr, b=True) +def get_node_parent(node): + """Return full path name for parent of node""" + parents = cmds.listRelatives(node, parent=True, fullPath=True) + return parents[0] if parents else None + + def get_id_from_sibling(node, history_only=True): """Return first node id in the history chain that matches this node. @@ -1998,10 +2043,6 @@ def get_id_from_sibling(node, history_only=True): """ - def _get_parent(node): - """Return full path name for parent of node""" - return cmds.listRelatives(node, parent=True, fullPath=True) - node = cmds.ls(node, long=True)[0] # Find all similar nodes in history @@ -2013,8 +2054,8 @@ def get_id_from_sibling(node, history_only=True): similar_nodes = [x for x in similar_nodes if x != node] # The node *must be* under the same parent - parent = _get_parent(node) - similar_nodes = [i for i in similar_nodes if _get_parent(i) == parent] + parent = get_node_parent(node) + similar_nodes = [i for i in similar_nodes if get_node_parent(i) == parent] # Check all of the remaining similar nodes and take the first one # with an id and assume it's the original. @@ -2062,8 +2103,6 @@ def get_id_from_sibling(node, history_only=True): return first_id - -# Project settings def set_scene_fps(fps, update=True): """Set FPS from project configuration @@ -2076,30 +2115,23 @@ def set_scene_fps(fps, update=True): """ - fps_mapping = {'15': 'game', - '24': 'film', - '25': 'pal', - '30': 'ntsc', - '48': 'show', - '50': 'palf', - '60': 'ntscf', - '23.98': '23.976fps', - '23.976': '23.976fps', - '29.97': '29.97fps', - '47.952': '47.952fps', - '47.95': '47.952fps', - '59.94': '59.94fps', - '44100': '44100fps', - '48000': '48000fps'} + fps_mapping = { + '15': 'game', + '24': 'film', + '25': 'pal', + '30': 'ntsc', + '48': 'show', + '50': 'palf', + '60': 'ntscf', + '23.976023976023978': '23.976fps', + '29.97002997002997': '29.97fps', + '47.952047952047955': '47.952fps', + '59.94005994005994': '59.94fps', + '44100': '44100fps', + '48000': '48000fps' + } - # pull from mapping - # this should convert float string to float and int to int - # so 25.0 is converted to 25, but 23.98 will be still float. - dec, ipart = math.modf(fps) - if dec == 0.0: - fps = int(ipart) - - unit = fps_mapping.get(str(fps), None) + unit = fps_mapping.get(str(convert_to_maya_fps(fps)), None) if unit is None: raise ValueError("Unsupported FPS value: `%s`" % fps) @@ -2166,6 +2198,109 @@ def set_scene_resolution(width, height, pixelAspect): cmds.setAttr("%s.pixelAspect" % control_node, pixelAspect) +def get_frame_range(include_animation_range=False): + """Get the current assets frame range and handles. + + Args: + include_animation_range (bool, optional): Whether to include + `animationStart` and `animationEnd` keys to define the outer + range of the timeline. It is excluded by default. + + Returns: + dict: Asset's expected frame range values. + + """ + + # Set frame start/end + project_name = get_current_project_name() + asset_name = get_current_asset_name() + asset = get_asset_by_name(project_name, asset_name) + + frame_start = asset["data"].get("frameStart") + frame_end = asset["data"].get("frameEnd") + + if frame_start is None or frame_end is None: + cmds.warning("No edit information found for %s" % asset_name) + return + + handle_start = asset["data"].get("handleStart") or 0 + handle_end = asset["data"].get("handleEnd") or 0 + + frame_range = { + "frameStart": frame_start, + "frameEnd": frame_end, + "handleStart": handle_start, + "handleEnd": handle_end + } + if include_animation_range: + # The animation range values are only included to define whether + # the Maya time slider should include the handles or not. + # Some usages of this function use the full dictionary to define + # instance attributes for which we want to exclude the animation + # keys. That is why these are excluded by default. + task_name = get_current_task_name() + settings = get_project_settings(project_name) + include_handles_settings = settings["maya"]["include_handles"] + current_task = asset.get("data").get("tasks").get(task_name) + + animation_start = frame_start + animation_end = frame_end + + include_handles = include_handles_settings["include_handles_default"] + for item in include_handles_settings["per_task_type"]: + if current_task["type"] in item["task_type"]: + include_handles = item["include_handles"] + break + if include_handles: + animation_start -= int(handle_start) + animation_end += int(handle_end) + + frame_range["animationStart"] = animation_start + frame_range["animationEnd"] = animation_end + + return frame_range + + +def reset_frame_range(playback=True, render=True, fps=True): + """Set frame range to current asset + + Args: + playback (bool, Optional): Whether to set the maya timeline playback + frame range. Defaults to True. + render (bool, Optional): Whether to set the maya render frame range. + Defaults to True. + fps (bool, Optional): Whether to set scene FPS. Defaults to True. + """ + if fps: + fps = convert_to_maya_fps( + float(legacy_io.Session.get("AVALON_FPS", 25)) + ) + set_scene_fps(fps) + + frame_range = get_frame_range(include_animation_range=True) + if not frame_range: + # No frame range data found for asset + return + + frame_start = frame_range["frameStart"] + frame_end = frame_range["frameEnd"] + animation_start = frame_range["animationStart"] + animation_end = frame_range["animationEnd"] + + if playback: + cmds.playbackOptions( + minTime=frame_start, + maxTime=frame_end, + animationStartTime=animation_start, + animationEndTime=animation_end + ) + cmds.currentTime(frame_start) + + if render: + cmds.setAttr("defaultRenderGlobals.startFrame", frame_start) + cmds.setAttr("defaultRenderGlobals.endFrame", frame_end) + + def reset_scene_resolution(): """Apply the scene resolution from the project definition @@ -2217,7 +2352,9 @@ def set_context_settings(): asset_data = asset_doc.get("data", {}) # Set project fps - fps = asset_data.get("fps", project_data.get("fps", 25)) + fps = convert_to_maya_fps( + asset_data.get("fps", project_data.get("fps", 25)) + ) legacy_io.Session["AVALON_FPS"] = str(fps) set_scene_fps(fps) @@ -2239,15 +2376,12 @@ def validate_fps(): """ - fps = get_current_project_asset(fields=["data.fps"])["data"]["fps"] - # TODO(antirotor): This is hack as for framerates having multiple - # decimal places. FTrack is ceiling decimal values on - # fps to two decimal places but Maya 2019+ is reporting those fps - # with much higher resolution. As we currently cannot fix Ftrack - # rounding, we have to round those numbers coming from Maya. - current_fps = float_round(mel.eval('currentTimeUnitToFPS()'), 2) + expected_fps = convert_to_maya_fps( + get_current_project_asset(fields=["data.fps"])["data"]["fps"] + ) + current_fps = mel.eval('currentTimeUnitToFPS()') - fps_match = current_fps == fps + fps_match = current_fps == expected_fps if not fps_match and not IS_HEADLESS: from openpype.widgets import popup @@ -2256,14 +2390,19 @@ def validate_fps(): dialog = popup.PopupUpdateKeys(parent=parent) dialog.setModal(True) dialog.setWindowTitle("Maya scene does not match project FPS") - dialog.setMessage("Scene %i FPS does not match project %i FPS" % - (current_fps, fps)) + dialog.setMessage( + "Scene {} FPS does not match project {} FPS".format( + current_fps, expected_fps + ) + ) dialog.setButtonText("Fix") # Set new text for button (add optional argument for the popup?) toggle = dialog.widgets["toggle"] update = toggle.isChecked() - dialog.on_clicked_state.connect(lambda: set_scene_fps(fps, update)) + dialog.on_clicked_state.connect( + lambda: set_scene_fps(expected_fps, update) + ) dialog.show() @@ -2505,8 +2644,8 @@ def load_capture_preset(data=None): float(value[2]) / 255 ] disp_options[key] = value - else: - disp_options['displayGradient'] = True + elif key == "displayGradient": + disp_options[key] = value options['display_options'] = disp_options @@ -3101,75 +3240,6 @@ def iter_shader_edits(relationships, shader_nodes, nodes_by_id, label=None): def set_colorspace(): """Set Colorspace from project configuration """ - project_name = os.getenv("AVALON_PROJECT") - imageio = get_project_settings(project_name)["maya"]["imageio"] - - # Maya 2022+ introduces new OCIO v2 color management settings that - # can override the old color managenement preferences. OpenPype has - # separate settings for both so we fall back when necessary. - use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"] - required_maya_version = 2022 - maya_version = int(cmds.about(version=True)) - maya_supports_ocio_v2 = maya_version >= required_maya_version - if use_ocio_v2 and not maya_supports_ocio_v2: - # Fallback to legacy behavior with a warning - log.warning("Color Management Preference v2 is enabled but not " - "supported by current Maya version: {} (< {}). Falling " - "back to legacy settings.".format( - maya_version, required_maya_version) - ) - use_ocio_v2 = False - - if use_ocio_v2: - root_dict = imageio["colorManagementPreference_v2"] - else: - root_dict = imageio["colorManagementPreference"] - - if not isinstance(root_dict, dict): - msg = "set_colorspace(): argument should be dictionary" - log.error(msg) - - log.debug(">> root_dict: {}".format(root_dict)) - - # enable color management - cmds.colorManagementPrefs(e=True, cmEnabled=True) - cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True) - - # set config path - custom_ocio_config = False - if root_dict.get("configFilePath"): - unresolved_path = root_dict["configFilePath"] - ocio_paths = unresolved_path[platform.system().lower()] - - resolved_path = None - for ocio_p in ocio_paths: - resolved_path = str(ocio_p).format(**os.environ) - if not os.path.exists(resolved_path): - continue - - if resolved_path: - filepath = str(resolved_path).replace("\\", "/") - cmds.colorManagementPrefs(e=True, configFilePath=filepath) - cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=True) - log.debug("maya '{}' changed to: {}".format( - "configFilePath", resolved_path)) - custom_ocio_config = True - else: - cmds.colorManagementPrefs(e=True, cmConfigFileEnabled=False) - cmds.colorManagementPrefs(e=True, configFilePath="") - - # If no custom OCIO config file was set we make sure that Maya 2022+ - # either chooses between Maya's newer default v2 or legacy config based - # on OpenPype setting to use ocio v2 or not. - if maya_supports_ocio_v2 and not custom_ocio_config: - if use_ocio_v2: - # Use Maya 2022+ default OCIO v2 config - log.info("Setting default Maya OCIO v2 config") - cmds.colorManagementPrefs(edit=True, configFilePath="") - else: - # Set the Maya default config file path - log.info("Setting default Maya OCIO v1 legacy config") - cmds.colorManagementPrefs(edit=True, configFilePath="legacy") # set color spaces for rendering space and view transforms def _colormanage(**kwargs): @@ -3186,55 +3256,152 @@ def set_colorspace(): except RuntimeError as exc: log.error(exc) - if use_ocio_v2: - _colormanage(renderingSpaceName=root_dict["renderSpace"]) - _colormanage(displayName=root_dict["displayName"]) - _colormanage(viewName=root_dict["viewName"]) - else: - _colormanage(renderingSpaceName=root_dict["renderSpace"]) - if maya_supports_ocio_v2: - _colormanage(viewName=root_dict["viewTransform"]) - _colormanage(displayName="legacy") + project_name = os.getenv("AVALON_PROJECT") + imageio = get_project_settings(project_name)["maya"]["imageio"] + + # ocio compatibility variables + ocio_v2_maya_version = 2022 + maya_version = int(cmds.about(version=True)) + ocio_v2_support = use_ocio_v2 = maya_version >= ocio_v2_maya_version + + root_dict = {} + use_workfile_settings = imageio.get("workfile", {}).get("enabled") + + if use_workfile_settings: + # TODO: deprecated code from 3.15.5 - remove + # Maya 2022+ introduces new OCIO v2 color management settings that + # can override the old color management preferences. OpenPype has + # separate settings for both so we fall back when necessary. + use_ocio_v2 = imageio["colorManagementPreference_v2"]["enabled"] + if use_ocio_v2 and not ocio_v2_support: + # Fallback to legacy behavior with a warning + log.warning( + "Color Management Preference v2 is enabled but not " + "supported by current Maya version: {} (< {}). Falling " + "back to legacy settings.".format( + maya_version, ocio_v2_maya_version) + ) + + if use_ocio_v2: + root_dict = imageio["colorManagementPreference_v2"] else: - _colormanage(viewTransformName=root_dict["viewTransform"]) + root_dict = imageio["colorManagementPreference"] + + if not isinstance(root_dict, dict): + msg = "set_colorspace(): argument should be dictionary" + log.error(msg) + + else: + root_dict = imageio["workfile"] + + log.debug(">> root_dict: {}".format(pformat(root_dict))) + + if root_dict: + # enable color management + cmds.colorManagementPrefs(e=True, cmEnabled=True) + cmds.colorManagementPrefs(e=True, ocioRulesEnabled=True) + + # backward compatibility + # TODO: deprecated code from 3.15.5 - refactor to use new settings + view_name = root_dict.get("viewTransform") + if view_name is None: + view_name = root_dict.get("viewName") + + if use_ocio_v2: + # Use Maya 2022+ default OCIO v2 config + log.info("Setting default Maya OCIO v2 config") + cmds.colorManagementPrefs(edit=True, configFilePath="") + + # set rendering space and view transform + _colormanage(renderingSpaceName=root_dict["renderSpace"]) + _colormanage(viewName=view_name) + _colormanage(displayName=root_dict["displayName"]) + else: + # Set the Maya default config file path + log.info("Setting default Maya OCIO v1 legacy config") + cmds.colorManagementPrefs(edit=True, configFilePath="legacy") + + # set rendering space and view transform + _colormanage(renderingSpaceName=root_dict["renderSpace"]) + _colormanage(viewTransformName=view_name) @contextlib.contextmanager def parent_nodes(nodes, parent=None): # type: (list, str) -> list """Context manager to un-parent provided nodes and return them back.""" - import pymel.core as pm # noqa - parent_node = None + def _as_mdagpath(node): + """Return MDagPath for node path.""" + if not node: + return + sel = OpenMaya.MSelectionList() + sel.add(node) + return sel.getDagPath(0) + + # We can only parent dag nodes so we ensure input contains only dag nodes + nodes = cmds.ls(nodes, type="dagNode", long=True) + if not nodes: + # opt-out early + yield + return + + parent_node_path = None delete_parent = False - if parent: if not cmds.objExists(parent): - parent_node = pm.createNode("transform", n=parent, ss=False) + parent_node = cmds.createNode("transform", + name=parent, + skipSelect=False) delete_parent = True else: - parent_node = pm.PyNode(parent) + parent_node = parent + parent_node_path = cmds.ls(parent_node, long=True)[0] + + # Store original parents node_parents = [] for node in nodes: - n = pm.PyNode(node) - try: - root = pm.listRelatives(n, parent=1)[0] - except IndexError: - root = None - node_parents.append((n, root)) + node_parent = get_node_parent(node) + node_parents.append((_as_mdagpath(node), _as_mdagpath(node_parent))) + try: - for node in node_parents: - if not parent: - node[0].setParent(world=True) + for node, node_parent in node_parents: + node_parent_path = node_parent.fullPathName() if node_parent else None # noqa + if node_parent_path == parent_node_path: + # Already a child + continue + + if parent_node_path: + cmds.parent(node.fullPathName(), parent_node_path) else: - node[0].setParent(parent_node) + cmds.parent(node.fullPathName(), world=True) + yield finally: - for node in node_parents: - if node[1]: - node[0].setParent(node[1]) + # Reparent to original parents + for node, original_parent in node_parents: + node_path = node.fullPathName() + if not node_path: + # Node must have been deleted + continue + + node_parent_path = get_node_parent(node_path) + + original_parent_path = None + if original_parent: + original_parent_path = original_parent.fullPathName() + if not original_parent_path: + # Original parent node must have been deleted + continue + + if node_parent_path != original_parent_path: + if not original_parent_path: + cmds.parent(node_path, world=True) + else: + cmds.parent(node_path, original_parent_path) + if delete_parent: - pm.delete(parent_node) + cmds.delete(parent_node_path) @contextlib.contextmanager @@ -3391,15 +3558,15 @@ def iter_visible_nodes_in_range(nodes, start, end): @memodict def get_visibility_mplug(node): """Return api 2.0 MPlug with cached memoize decorator""" - sel = om.MSelectionList() + sel = OpenMaya.MSelectionList() sel.add(node) dag = sel.getDagPath(0) - return om.MFnDagNode(dag).findPlug("visibility", True) + return OpenMaya.MFnDagNode(dag).findPlug("visibility", True) @contextlib.contextmanager def dgcontext(mtime): """MDGContext context manager""" - context = om.MDGContext(mtime) + context = OpenMaya.MDGContext(mtime) try: previous = context.makeCurrent() yield context @@ -3408,9 +3575,9 @@ def iter_visible_nodes_in_range(nodes, start, end): # We skip the first frame as we already used that frame to check for # overall visibilities. And end+1 to include the end frame. - scene_units = om.MTime.uiUnit() + scene_units = OpenMaya.MTime.uiUnit() for frame in range(start + 1, end + 1): - mtime = om.MTime(frame, unit=scene_units) + mtime = OpenMaya.MTime(frame, unit=scene_units) # Build little cache so we don't query the same MPlug's value # again if it was checked on this frame and also is a dependency @@ -3446,3 +3613,427 @@ def iter_visible_nodes_in_range(nodes, start, end): def get_attribute_input(attr): connections = cmds.listConnections(attr, plugs=True, destination=False) return connections[0] if connections else None + + +def convert_to_maya_fps(fps): + """Convert any fps to supported Maya framerates.""" + float_framerates = [ + 23.976023976023978, + # WTF is 29.97 df vs fps? + 29.97002997002997, + 47.952047952047955, + 59.94005994005994 + ] + # 44100 fps evaluates as 41000.0. Why? Omitting for now. + int_framerates = [ + 2, + 3, + 4, + 5, + 6, + 8, + 10, + 12, + 15, + 16, + 20, + 24, + 25, + 30, + 40, + 48, + 50, + 60, + 75, + 80, + 90, + 100, + 120, + 125, + 150, + 200, + 240, + 250, + 300, + 375, + 400, + 500, + 600, + 750, + 1200, + 1500, + 2000, + 3000, + 6000, + 48000 + ] + + # If input fps is a whole number we'll return. + if float(fps).is_integer(): + # Validate fps is part of Maya's fps selection. + if int(fps) not in int_framerates: + raise ValueError( + "Framerate \"{}\" is not supported in Maya".format(fps) + ) + return int(fps) + else: + # Differences to supported float frame rates. + differences = [] + for i in float_framerates: + differences.append(abs(i - fps)) + + # Validate difference does not stray too far from supported framerates. + min_difference = min(differences) + min_index = differences.index(min_difference) + supported_framerate = float_framerates[min_index] + if min_difference > 0.1: + raise ValueError( + "Framerate \"{}\" strays too far from any supported framerate" + " in Maya. Closest supported framerate is \"{}\"".format( + fps, supported_framerate + ) + ) + + return supported_framerate + + +def write_xgen_file(data, filepath): + """Overwrites data in .xgen files. + + Quite naive approach to mainly overwrite "xgDataPath" and "xgProjectPath". + + Args: + data (dict): Dictionary of key, value. Key matches with xgen file. + For example: + {"xgDataPath": "some/path"} + filepath (string): Absolute path of .xgen file. + """ + # Generate regex lookup for line to key basically + # match any of the keys in `\t{key}\t\t` + keys = "|".join(re.escape(key) for key in data.keys()) + re_keys = re.compile("^\t({})\t\t".format(keys)) + + lines = [] + with open(filepath, "r") as f: + for line in f: + match = re_keys.match(line) + if match: + key = match.group(1) + value = data[key] + line = "\t{}\t\t{}\n".format(key, value) + + lines.append(line) + + with open(filepath, "w") as f: + f.writelines(lines) + + +def get_color_management_preferences(): + """Get and resolve OCIO preferences.""" + data = { + # Is color management enabled. + "enabled": cmds.colorManagementPrefs( + query=True, cmEnabled=True + ), + "rendering_space": cmds.colorManagementPrefs( + query=True, renderingSpaceName=True + ), + "output_transform": cmds.colorManagementPrefs( + query=True, outputTransformName=True + ), + "output_transform_enabled": cmds.colorManagementPrefs( + query=True, outputTransformEnabled=True + ), + "view_transform": cmds.colorManagementPrefs( + query=True, viewTransformName=True + ) + } + + # Split view and display from view_transform. view_transform comes in + # format of "{view} ({display})". + regex = re.compile(r"^(?P.+) \((?P.+)\)$") + if int(cmds.about(version=True)) <= 2020: + # view_transform comes in format of "{view} {display}" in 2020. + regex = re.compile(r"^(?P.+) (?P.+)$") + + match = regex.match(data["view_transform"]) + if not match: + raise ValueError( + "Unable to parse view and display from Maya view transform: '{}' " + "using regex '{}'".format(data["view_transform"], regex.pattern) + ) + + data.update({ + "display": match.group("display"), + "view": match.group("view") + }) + + # Get config absolute path. + path = cmds.colorManagementPrefs( + query=True, configFilePath=True + ) + + # The OCIO config supports a custom token. + maya_resources_token = "" + maya_resources_path = OpenMaya.MGlobal.getAbsolutePathToResources() + path = path.replace(maya_resources_token, maya_resources_path) + + data["config"] = path + + return data + + +def get_color_management_output_transform(): + preferences = get_color_management_preferences() + colorspace = preferences["rendering_space"] + if preferences["output_transform_enabled"]: + colorspace = preferences["output_transform"] + return colorspace + + +def image_info(file_path): + # type: (str) -> dict + """Based on tha texture path, get its bit depth and format information. + Take reference from makeTx.py in Arnold: + ImageInfo(filename): Get Image Information for colorspace + AiTextureGetFormat(filename): Get Texture Format + AiTextureGetBitDepth(filename): Get Texture bit depth + Args: + file_path (str): Path to the texture file. + Returns: + dict: Dictionary with the information about the texture file. + """ + from arnold import ( + AiTextureGetBitDepth, + AiTextureGetFormat + ) + # Get Texture Information + img_info = {'filename': file_path} + if os.path.isfile(file_path): + img_info['bit_depth'] = AiTextureGetBitDepth(file_path) # noqa + img_info['format'] = AiTextureGetFormat(file_path) # noqa + else: + img_info['bit_depth'] = 8 + img_info['format'] = "unknown" + return img_info + + +def guess_colorspace(img_info): + # type: (dict) -> str + """Guess the colorspace of the input image filename. + Note: + Reference from makeTx.py + Args: + img_info (dict): Image info generated by :func:`image_info` + Returns: + str: color space name use in the `--colorconvert` + option of maketx. + """ + from arnold import ( + AiTextureInvalidate, + # types + AI_TYPE_BYTE, + AI_TYPE_INT, + AI_TYPE_UINT + ) + try: + if img_info['bit_depth'] <= 16: + if img_info['format'] in (AI_TYPE_BYTE, AI_TYPE_INT, AI_TYPE_UINT): # noqa + return 'sRGB' + else: + return 'linear' + # now discard the image file as AiTextureGetFormat has loaded it + AiTextureInvalidate(img_info['filename']) # noqa + except ValueError: + print(("[maketx] Error: Could not guess" + "colorspace for {}").format(img_info["filename"])) + return "linear" + + +def len_flattened(components): + """Return the length of the list as if it was flattened. + + Maya will return consecutive components as a single entry + when requesting with `maya.cmds.ls` without the `flatten` + flag. Though enabling `flatten` on a large list (e.g. millions) + will result in a slow result. This command will return the amount + of entries in a non-flattened list by parsing the result with + regex. + + Args: + components (list): The non-flattened components. + + Returns: + int: The amount of entries. + + """ + assert isinstance(components, (list, tuple)) + n = 0 + + pattern = re.compile(r"\[(\d+):(\d+)\]") + for c in components: + match = pattern.search(c) + if match: + start, end = match.groups() + n += int(end) - int(start) + 1 + else: + n += 1 + return n + + +def get_all_children(nodes): + """Return all children of `nodes` including each instanced child. + Using maya.cmds.listRelatives(allDescendents=True) includes only the first + instance. As such, this function acts as an optimal replacement with a + focus on a fast query. + + """ + + sel = OpenMaya.MSelectionList() + traversed = set() + iterator = OpenMaya.MItDag(OpenMaya.MItDag.kDepthFirst) + for node in nodes: + + if node in traversed: + # Ignore if already processed as a child + # before + continue + + sel.clear() + sel.add(node) + dag = sel.getDagPath(0) + + iterator.reset(dag) + # ignore self + iterator.next() # noqa: B305 + while not iterator.isDone(): + + path = iterator.fullPathName() + + if path in traversed: + iterator.prune() + iterator.next() # noqa: B305 + continue + + traversed.add(path) + iterator.next() # noqa: B305 + + return list(traversed) + + +def get_capture_preset(task_name, task_type, subset, project_settings, log): + """Get capture preset for playblasting. + + Logic for transitioning from old style capture preset to new capture preset + profiles. + + Args: + task_name (str): Task name. + take_type (str): Task type. + subset (str): Subset name. + project_settings (dict): Project settings. + log (object): Logging object. + """ + capture_preset = None + filtering_criteria = { + "hosts": "maya", + "families": "review", + "task_names": task_name, + "task_types": task_type, + "subset": subset + } + + plugin_settings = project_settings["maya"]["publish"]["ExtractPlayblast"] + if plugin_settings["profiles"]: + profile = filter_profiles( + plugin_settings["profiles"], + filtering_criteria, + logger=log + ) + capture_preset = profile.get("capture_preset") + else: + log.warning("No profiles present for Extract Playblast") + + # Backward compatibility for deprecated Extract Playblast settings + # without profiles. + if capture_preset is None: + log.debug( + "Falling back to deprecated Extract Playblast capture preset " + "because no new style playblast profiles are defined." + ) + capture_preset = plugin_settings["capture_preset"] + + return capture_preset or {} + + +def create_rig_animation_instance( + nodes, context, namespace, options=None, log=None +): + """Create an animation publish instance for loaded rigs. + + See the RecreateRigAnimationInstance inventory action on how to use this + for loaded rig containers. + + Arguments: + nodes (list): Member nodes of the rig instance. + context (dict): Representation context of the rig container + namespace (str): Namespace of the rig container + options (dict, optional): Additional loader data + log (logging.Logger, optional): Logger to log to if provided + + Returns: + None + + """ + if options is None: + options = {} + + output = next((node for node in nodes if + node.endswith("out_SET")), None) + controls = next((node for node in nodes if + node.endswith("controls_SET")), None) + + assert output, "No out_SET in rig, this is a bug." + assert controls, "No controls_SET in rig, this is a bug." + + # Find the roots amongst the loaded nodes + roots = ( + cmds.ls(nodes, assemblies=True, long=True) or + get_highest_in_hierarchy(nodes) + ) + assert roots, "No root nodes in rig, this is a bug." + + asset = legacy_io.Session["AVALON_ASSET"] + dependency = str(context["representation"]["_id"]) + + custom_subset = options.get("animationSubsetName") + if custom_subset: + formatting_data = { + "asset_name": context['asset']['name'], + "asset_type": context['asset']['type'], + "subset": context['subset']['name'], + "family": ( + context['subset']['data'].get('family') or + context['subset']['data']['families'][0] + ) + } + namespace = get_custom_namespace( + custom_subset.format( + **formatting_data + ) + ) + + if log: + log.info("Creating subset: {}".format(namespace)) + + # Create the animation instance + creator_plugin = get_legacy_creator_by_name("CreateAnimation") + with maintained_selection(): + cmds.select([output, controls] + roots, noExpand=True) + legacy_create( + creator_plugin, + name=namespace, + asset=asset, + options={"useSelection": True}, + data={"dependencies": dependency} + ) diff --git a/openpype/hosts/maya/api/lib_renderproducts.py b/openpype/hosts/maya/api/lib_renderproducts.py index c54e3ab3e0..a6bcd003a5 100644 --- a/openpype/hosts/maya/api/lib_renderproducts.py +++ b/openpype/hosts/maya/api/lib_renderproducts.py @@ -46,6 +46,7 @@ import attr from . import lib from . import lib_rendersetup +from openpype.pipeline.colorspace import get_ocio_config_views from maya import cmds, mel @@ -127,6 +128,7 @@ class RenderProduct(object): """ productName = attr.ib() ext = attr.ib() # extension + colorspace = attr.ib() # colorspace aov = attr.ib(default=None) # source aov driver = attr.ib(default=None) # source driver multipart = attr.ib(default=False) # multichannel file @@ -196,12 +198,18 @@ class ARenderProducts: """Constructor.""" self.layer = layer self.render_instance = render_instance - self.multipart = False + self.multipart = self.get_multipart() # Initialize self.layer_data = self._get_layer_data() self.layer_data.products = self.get_render_products() + def get_multipart(self): + raise NotImplementedError( + "The render product implementation does not have a " + "\"get_multipart\" method." + ) + def has_camera_token(self): # type: () -> bool """Check if camera token is in image prefix. @@ -331,7 +339,7 @@ class ARenderProducts: aov_tokens = ["", ""] def match_last(tokens, text): - """regex match the last occurence from a list of tokens""" + """regex match the last occurrence from a list of tokens""" pattern = "(?:.*)({})".format("|".join(tokens)) return re.search(pattern, text, re.IGNORECASE) @@ -344,7 +352,6 @@ class ARenderProducts: separator = file_prefix[matches[0].end(1):matches[1].start(1)] return separator - def _get_layer_data(self): # type: () -> LayerMetadata # ______________________________________________ @@ -531,16 +538,20 @@ class RenderProductsArnold(ARenderProducts): return prefix - def _get_aov_render_products(self, aov, cameras=None): - """Return all render products for the AOV""" - - products = [] - aov_name = self._get_attr(aov, "name") + def get_multipart(self): multipart = False multilayer = bool(self._get_attr("defaultArnoldDriver.multipart")) merge_AOVs = bool(self._get_attr("defaultArnoldDriver.mergeAOVs")) if multilayer or merge_AOVs: multipart = True + + return multipart + + def _get_aov_render_products(self, aov, cameras=None): + """Return all render products for the AOV""" + + products = [] + aov_name = self._get_attr(aov, "name") ai_drivers = cmds.listConnections("{}.outputs".format(aov), source=True, destination=False, @@ -553,6 +564,9 @@ class RenderProductsArnold(ARenderProducts): ] for ai_driver in ai_drivers: + colorspace = self._get_colorspace( + ai_driver + ".colorManagement" + ) # todo: check aiAOVDriver.prefix as it could have # a custom path prefix set for this driver @@ -590,12 +604,15 @@ class RenderProductsArnold(ARenderProducts): global_aov = self._get_attr(aov, "globalAov") if global_aov: for camera in cameras: - product = RenderProduct(productName=name, - ext=ext, - aov=aov_name, - driver=ai_driver, - multipart=multipart, - camera=camera) + product = RenderProduct( + productName=name, + ext=ext, + aov=aov_name, + driver=ai_driver, + multipart=self.multipart, + camera=camera, + colorspace=colorspace + ) products.append(product) all_light_groups = self._get_attr(aov, "lightGroups") @@ -603,13 +620,16 @@ class RenderProductsArnold(ARenderProducts): # All light groups is enabled. A single multipart # Render Product for camera in cameras: - product = RenderProduct(productName=name + "_lgroups", - ext=ext, - aov=aov_name, - driver=ai_driver, - # Always multichannel output - multipart=True, - camera=camera) + product = RenderProduct( + productName=name + "_lgroups", + ext=ext, + aov=aov_name, + driver=ai_driver, + # Always multichannel output + multipart=True, + camera=camera, + colorspace=colorspace + ) products.append(product) else: value = self._get_attr(aov, "lightGroupsList") @@ -625,12 +645,36 @@ class RenderProductsArnold(ARenderProducts): aov=aov_name, driver=ai_driver, ext=ext, - camera=camera + camera=camera, + colorspace=colorspace ) products.append(product) return products + def _get_colorspace(self, attribute): + """Resolve colorspace from Arnold settings.""" + + def _view_transform(): + preferences = lib.get_color_management_preferences() + views_data = get_ocio_config_views(preferences["config"]) + view_data = views_data[ + "{}/{}".format(preferences["display"], preferences["view"]) + ] + return view_data["colorspace"] + + def _raw(): + preferences = lib.get_color_management_preferences() + return preferences["rendering_space"] + + resolved_values = { + "Raw": _raw, + "Use View Transform": _view_transform, + # Default. Same as Maya Preferences. + "Use Output Transform": lib.get_color_management_output_transform + } + return resolved_values[self._get_attr(attribute)]() + def get_render_products(self): """Get all AOVs. @@ -659,11 +703,19 @@ class RenderProductsArnold(ARenderProducts): ] default_ext = self._get_attr("defaultRenderGlobals.imfPluginKey") - beauty_products = [RenderProduct( - productName="beauty", - ext=default_ext, - driver="defaultArnoldDriver", - camera=camera) for camera in cameras] + colorspace = self._get_colorspace( + "defaultArnoldDriver.colorManagement" + ) + beauty_products = [ + RenderProduct( + productName="beauty", + ext=default_ext, + driver="defaultArnoldDriver", + camera=camera, + colorspace=colorspace + ) for camera in cameras + ] + # AOVs > Legacy > Maya Render View > Mode aovs_enabled = bool( self._get_attr("defaultArnoldRenderOptions.aovMode") @@ -731,6 +783,14 @@ class RenderProductsVray(ARenderProducts): renderer = "vray" + def get_multipart(self): + multipart = False + image_format = self._get_attr("vraySettings.imageFormatStr") + if image_format == "exr (multichannel)": + multipart = True + + return multipart + def get_renderer_prefix(self): # type: () -> str """Get image prefix for V-Ray. @@ -797,6 +857,7 @@ class RenderProductsVray(ARenderProducts): if default_ext in {"exr (multichannel)", "exr (deep)"}: default_ext = "exr" + colorspace = lib.get_color_management_output_transform() products = [] # add beauty as default when not disabled @@ -804,23 +865,30 @@ class RenderProductsVray(ARenderProducts): if not dont_save_rgb: for camera in cameras: products.append( - RenderProduct(productName="", - ext=default_ext, - camera=camera)) + RenderProduct( + productName="", + ext=default_ext, + camera=camera, + colorspace=colorspace, + multipart=self.multipart + ) + ) # separate alpha file separate_alpha = self._get_attr("vraySettings.separateAlpha") if separate_alpha: for camera in cameras: products.append( - RenderProduct(productName="Alpha", - ext=default_ext, - camera=camera) + RenderProduct( + productName="Alpha", + ext=default_ext, + camera=camera, + colorspace=colorspace, + multipart=self.multipart + ) ) - - if image_format_str == "exr (multichannel)": + if self.multipart: # AOVs are merged in m-channel file, only main layer is rendered - self.multipart = True return products # handle aovs from references @@ -851,17 +919,21 @@ class RenderProductsVray(ARenderProducts): product = RenderProduct(productName=name, ext=default_ext, aov=aov, - camera=camera) + camera=camera, + colorspace=colorspace) products.append(product) # Continue as we've processed this special case AOV continue aov_name = self._get_vray_aov_name(aov) for camera in cameras: - product = RenderProduct(productName=aov_name, - ext=default_ext, - aov=aov, - camera=camera) + product = RenderProduct( + productName=aov_name, + ext=default_ext, + aov=aov, + camera=camera, + colorspace=colorspace + ) products.append(product) return products @@ -979,6 +1051,34 @@ class RenderProductsRedshift(ARenderProducts): renderer = "redshift" unmerged_aovs = {"Cryptomatte"} + def get_files(self, product): + # When outputting AOVs we need to replace Redshift specific AOV tokens + # with Maya render tokens for generating file sequences. We validate to + # a specific AOV fileprefix so we only need to account for one + # replacement. + if not product.multipart and product.driver: + file_prefix = self._get_attr(product.driver + ".filePrefix") + self.layer_data.filePrefix = file_prefix.replace( + "/", + "//" + ) + + return super(RenderProductsRedshift, self).get_files(product) + + def get_multipart(self): + # For Redshift we don't directly return upon forcing multilayer + # due to some AOVs still being written into separate files, + # like Cryptomatte. + # AOVs are merged in multi-channel file + multipart = False + force_layer = bool( + self._get_attr("redshiftOptions.exrForceMultilayer") + ) + if force_layer: + multipart = True + + return multipart + def get_renderer_prefix(self): """Get image prefix for Redshift. @@ -1018,16 +1118,6 @@ class RenderProductsRedshift(ARenderProducts): for c in self.get_renderable_cameras() ] - # For Redshift we don't directly return upon forcing multilayer - # due to some AOVs still being written into separate files, - # like Cryptomatte. - # AOVs are merged in multi-channel file - multipart = False - force_layer = bool(self._get_attr("redshiftOptions.exrForceMultilayer")) # noqa - exMultipart = bool(self._get_attr("redshiftOptions.exrMultipart")) - if exMultipart or force_layer: - multipart = True - # Get Redshift Extension from image format image_format = self._get_attr("redshiftOptions.imageFormat") # integer ext = mel.eval("redshiftGetImageExtension(%i)" % image_format) @@ -1043,13 +1133,14 @@ class RenderProductsRedshift(ARenderProducts): products = [] light_groups_enabled = False has_beauty_aov = False + colorspace = lib.get_color_management_output_transform() for aov in aovs: enabled = self._get_attr(aov, "enabled") if not enabled: continue aov_type = self._get_attr(aov, "aovType") - if multipart and aov_type not in self.unmerged_aovs: + if self.multipart and aov_type not in self.unmerged_aovs: continue # Any AOVs that still get processed, like Cryptomatte @@ -1084,8 +1175,10 @@ class RenderProductsRedshift(ARenderProducts): productName=aov_light_group_name, aov=aov_name, ext=ext, - multipart=multipart, - camera=camera) + multipart=False, + camera=camera, + driver=aov, + colorspace=colorspace) products.append(product) if light_groups: @@ -1098,8 +1191,10 @@ class RenderProductsRedshift(ARenderProducts): product = RenderProduct(productName=aov_name, aov=aov_name, ext=ext, - multipart=multipart, - camera=camera) + multipart=False, + camera=camera, + driver=aov, + colorspace=colorspace) products.append(product) # When a Beauty AOV is added manually, it will be rendered as @@ -1114,8 +1209,9 @@ class RenderProductsRedshift(ARenderProducts): products.insert(0, RenderProduct(productName=beauty_name, ext=ext, - multipart=multipart, - camera=camera)) + multipart=self.multipart, + camera=camera, + colorspace=colorspace)) return products @@ -1132,6 +1228,11 @@ class RenderProductsRenderman(ARenderProducts): """ renderer = "renderman" + unmerged_aovs = {"PxrCryptomatte"} + + def get_multipart(self): + # Implemented as display specific in "get_render_products". + return False def get_render_products(self): """Get all AOVs. @@ -1142,6 +1243,8 @@ class RenderProductsRenderman(ARenderProducts): """ from rfm2.api.displays import get_displays # noqa + colorspace = lib.get_color_management_output_transform() + cameras = [ self.sanitize_camera_name(c) for c in self.get_renderable_cameras() @@ -1181,6 +1284,17 @@ class RenderProductsRenderman(ARenderProducts): if not display_types.get(display["driverNode"]["type"]): continue + has_cryptomatte = cmds.ls(type=self.unmerged_aovs) + matte_enabled = False + if has_cryptomatte: + for cryptomatte in has_cryptomatte: + cryptomatte_aov = cryptomatte + matte_name = "cryptomatte" + rman_globals = cmds.listConnections(cryptomatte + + ".message") + if rman_globals: + matte_enabled = True + aov_name = name if aov_name == "rmanDefaultDisplay": aov_name = "beauty" @@ -1197,8 +1311,19 @@ class RenderProductsRenderman(ARenderProducts): productName=aov_name, ext=extensions, camera=camera, - multipart=True + multipart=True, + colorspace=colorspace ) + + if has_cryptomatte and matte_enabled: + cryptomatte = RenderProduct( + productName=matte_name, + aov=cryptomatte_aov, + ext=extensions, + camera=camera, + multipart=True, + colorspace=colorspace + ) else: # this code should handle the case where no multipart # capable format is selected. But since it involves @@ -1218,6 +1343,9 @@ class RenderProductsRenderman(ARenderProducts): products.append(product) + if has_cryptomatte and matte_enabled: + products.append(cryptomatte) + return products def get_files(self, product): @@ -1249,6 +1377,10 @@ class RenderProductsMayaHardware(ARenderProducts): {"label": "EXR(exr)", "index": 40, "extension": "exr"} ] + def get_multipart(self): + # MayaHardware does not support multipart EXRs. + return False + def _get_extension(self, value): result = None if isinstance(value, int): @@ -1293,7 +1425,12 @@ class RenderProductsMayaHardware(ARenderProducts): products = [] for cam in self.get_renderable_cameras(): - product = RenderProduct(productName="beauty", ext=ext, camera=cam) + product = RenderProduct( + productName="beauty", + ext=ext, + camera=cam, + colorspace=lib.get_color_management_output_transform() + ) products.append(product) return products diff --git a/openpype/hosts/maya/api/lib_rendersettings.py b/openpype/hosts/maya/api/lib_rendersettings.py index 5161141ef9..eaa728a2f6 100644 --- a/openpype/hosts/maya/api/lib_rendersettings.py +++ b/openpype/hosts/maya/api/lib_rendersettings.py @@ -14,7 +14,7 @@ from openpype.settings import ( from openpype.pipeline import legacy_io from openpype.pipeline import CreatorError from openpype.pipeline.context_tools import get_current_project_asset -from openpype.hosts.maya.api.commands import reset_frame_range +from openpype.hosts.maya.api.lib import reset_frame_range class RenderSettings(object): @@ -22,17 +22,26 @@ class RenderSettings(object): _image_prefix_nodes = { 'vray': 'vraySettings.fileNamePrefix', 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', - 'redshift': 'defaultRenderGlobals.imageFilePrefix' + 'renderman': 'rmanGlobals.imageFileFormat', + 'redshift': 'defaultRenderGlobals.imageFilePrefix', + 'mayahardware2': 'defaultRenderGlobals.imageFilePrefix' } _image_prefixes = { 'vray': get_current_project_settings()["maya"]["RenderSettings"]["vray_renderer"]["image_prefix"], # noqa 'arnold': get_current_project_settings()["maya"]["RenderSettings"]["arnold_renderer"]["image_prefix"], # noqa - 'renderman': '//{aov_separator}', + 'renderman': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["image_prefix"], # noqa 'redshift': get_current_project_settings()["maya"]["RenderSettings"]["redshift_renderer"]["image_prefix"] # noqa } + # Renderman only + _image_dir = { + 'renderman': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["image_dir"], # noqa + 'cryptomatte': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["cryptomatte_dir"], # noqa + 'imageDisplay': get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["imageDisplay_dir"], # noqa + "watermark": get_current_project_settings()["maya"]["RenderSettings"]["renderman_renderer"]["watermark_dir"] # noqa + } + _aov_chars = { "dot": ".", "dash": "-", @@ -81,7 +90,6 @@ class RenderSettings(object): prefix, type="string") # noqa else: print("{0} isn't a supported renderer to autoset settings.".format(renderer)) # noqa - # TODO: handle not having res values in the doc width = asset_doc["data"].get("resolutionWidth") height = asset_doc["data"].get("resolutionHeight") @@ -97,6 +105,13 @@ class RenderSettings(object): self._set_redshift_settings(width, height) mel.eval("redshiftUpdateActiveAovList") + if renderer == "renderman": + image_dir = self._image_dir["renderman"] + cmds.setAttr("rmanGlobals.imageOutputDir", + image_dir, type="string") + self._set_renderman_settings(width, height, + aov_separator) + def _set_arnold_settings(self, width, height): """Sets settings for Arnold.""" from mtoa.core import createOptions # noqa @@ -143,7 +158,7 @@ class RenderSettings(object): cmds.setAttr( "defaultArnoldDriver.mergeAOVs", multi_exr) self._additional_attribs_setter(additional_options) - reset_frame_range() + reset_frame_range(playback=False, fps=False, render=True) def _set_redshift_settings(self, width, height): """Sets settings for Redshift.""" @@ -202,6 +217,66 @@ class RenderSettings(object): cmds.setAttr("defaultResolution.height", height) self._additional_attribs_setter(additional_options) + def _set_renderman_settings(self, width, height, aov_separator): + """Sets settings for Renderman""" + rman_render_presets = ( + self._project_settings + ["maya"] + ["RenderSettings"] + ["renderman_renderer"] + ) + display_filters = rman_render_presets["display_filters"] + d_filters_number = len(display_filters) + for i in range(d_filters_number): + d_node = cmds.ls(typ=display_filters[i]) + if len(d_node) > 0: + filter_nodes = d_node[0] + else: + filter_nodes = cmds.createNode(display_filters[i]) + + cmds.connectAttr(filter_nodes + ".message", + "rmanGlobals.displayFilters[%i]" % i, + force=True) + if filter_nodes.startswith("PxrImageDisplayFilter"): + imageDisplay_dir = self._image_dir["imageDisplay"] + imageDisplay_dir = imageDisplay_dir.replace("{aov_separator}", + aov_separator) + cmds.setAttr(filter_nodes + ".filename", + imageDisplay_dir, type="string") + + sample_filters = rman_render_presets["sample_filters"] + s_filters_number = len(sample_filters) + for n in range(s_filters_number): + s_node = cmds.ls(typ=sample_filters[n]) + if len(s_node) > 0: + filter_nodes = s_node[0] + else: + filter_nodes = cmds.createNode(sample_filters[n]) + + cmds.connectAttr(filter_nodes + ".message", + "rmanGlobals.sampleFilters[%i]" % n, + force=True) + + if filter_nodes.startswith("PxrCryptomatte"): + matte_dir = self._image_dir["cryptomatte"] + matte_dir = matte_dir.replace("{aov_separator}", + aov_separator) + cmds.setAttr(filter_nodes + ".filename", + matte_dir, type="string") + elif filter_nodes.startswith("PxrWatermarkFilter"): + watermark_dir = self._image_dir["watermark"] + watermark_dir = watermark_dir.replace("{aov_separator}", + aov_separator) + cmds.setAttr(filter_nodes + ".filename", + watermark_dir, type="string") + + additional_options = rman_render_presets["additional_options"] + + self._set_global_output_settings() + cmds.setAttr("defaultResolution.width", width) + cmds.setAttr("defaultResolution.height", height) + self._additional_attribs_setter(additional_options) + def _set_vray_settings(self, aov_separator, width, height): # type: (str, int, int) -> None """Sets important settings for Vray.""" @@ -261,7 +336,8 @@ class RenderSettings(object): ) # Set render file format to exr - cmds.setAttr("{}.imageFormatStr".format(node), "exr", type="string") + ext = vray_render_presets["image_format"] + cmds.setAttr("{}.imageFormatStr".format(node), ext, type="string") # animType cmds.setAttr("{}.animType".format(node), 1) diff --git a/openpype/hosts/maya/api/lib_rendersetup.py b/openpype/hosts/maya/api/lib_rendersetup.py index e616f26e1b..440ee21a52 100644 --- a/openpype/hosts/maya/api/lib_rendersetup.py +++ b/openpype/hosts/maya/api/lib_rendersetup.py @@ -19,6 +19,8 @@ from maya.app.renderSetup.model.override import ( UniqueOverride ) +from openpype.hosts.maya.api.lib import get_attribute + EXACT_MATCH = 0 PARENT_MATCH = 1 CLIENT_MATCH = 2 @@ -96,9 +98,6 @@ def get_attr_in_layer(node_attr, layer): """ - # Delay pymel import to here because it's slow to load - import pymel.core as pm - def _layer_needs_update(layer): """Return whether layer needs updating.""" # Use `getattr` as e.g. DEFAULT_RENDER_LAYER does not have @@ -125,7 +124,7 @@ def get_attr_in_layer(node_attr, layer): node = history_overrides[-1] if history_overrides else override node_attr_ = node + ".original" - return pm.getAttr(node_attr_, asString=True) + return get_attribute(node_attr_, asString=True) layer = get_rendersetup_layer(layer) rs = renderSetup.instance() @@ -145,7 +144,7 @@ def get_attr_in_layer(node_attr, layer): # we will let it error out. rs.switchToLayer(current_layer) - return pm.getAttr(node_attr, asString=True) + return get_attribute(node_attr, asString=True) overrides = get_attr_overrides(node_attr, layer) default_layer_value = get_default_layer_value(node_attr) @@ -156,7 +155,7 @@ def get_attr_in_layer(node_attr, layer): for match, layer_override, index in overrides: if isinstance(layer_override, AbsOverride): # Absolute override - value = pm.getAttr(layer_override.name() + ".attrValue") + value = get_attribute(layer_override.name() + ".attrValue") if match == EXACT_MATCH: # value = value pass @@ -168,8 +167,8 @@ def get_attr_in_layer(node_attr, layer): elif isinstance(layer_override, RelOverride): # Relative override # Value = Original * Multiply + Offset - multiply = pm.getAttr(layer_override.name() + ".multiply") - offset = pm.getAttr(layer_override.name() + ".offset") + multiply = get_attribute(layer_override.name() + ".multiply") + offset = get_attribute(layer_override.name() + ".offset") if match == EXACT_MATCH: value = value * multiply + offset diff --git a/openpype/hosts/maya/api/menu.py b/openpype/hosts/maya/api/menu.py index 67109e9958..5284c0249d 100644 --- a/openpype/hosts/maya/api/menu.py +++ b/openpype/hosts/maya/api/menu.py @@ -12,7 +12,7 @@ from openpype.pipeline.workfile import BuildWorkfile from openpype.tools.utils import host_tools from openpype.hosts.maya.api import lib, lib_rendersettings from .lib import get_main_window, IS_HEADLESS -from .commands import reset_frame_range +from ..tools import show_look_assigner from .workfile_template_builder import ( create_placeholder, @@ -50,7 +50,6 @@ def install(): parent="MayaWindow" ) - renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() # Create context menu context_label = "{}, {}".format( legacy_io.Session["AVALON_ASSET"], @@ -113,12 +112,12 @@ def install(): ) cmds.menuItem( - "Reset Frame Range", - command=lambda *args: reset_frame_range() + "Set Frame Range", + command=lambda *args: lib.reset_frame_range() ) cmds.menuItem( - "Reset Resolution", + "Set Resolution", command=lambda *args: lib.reset_scene_resolution() ) @@ -141,7 +140,7 @@ def install(): cmds.menuItem( "Look assigner...", - command=lambda *args: host_tools.show_look_assigner( + command=lambda *args: show_look_assigner( parent_widget ) ) diff --git a/openpype/hosts/maya/api/pipeline.py b/openpype/hosts/maya/api/pipeline.py index 3798170671..5323717fa7 100644 --- a/openpype/hosts/maya/api/pipeline.py +++ b/openpype/hosts/maya/api/pipeline.py @@ -514,6 +514,9 @@ def check_lock_on_current_file(): # add the lock file when opening the file filepath = current_file() + # Skip if current file is 'untitled' + if not filepath: + return if is_workfile_locked(filepath): # add lockfile dialog @@ -680,10 +683,12 @@ def before_workfile_save(event): def after_workfile_save(event): workfile_name = event["filename"] - if handle_workfile_locks(): - if workfile_name: - if not is_workfile_locked(workfile_name): - create_workfile_lock(workfile_name) + if ( + handle_workfile_locks() + and workfile_name + and not is_workfile_locked(workfile_name) + ): + create_workfile_lock(workfile_name) class MayaDirmap(HostDirmap): diff --git a/openpype/hosts/maya/api/plugin.py b/openpype/hosts/maya/api/plugin.py index 82df85a8be..604ff101db 100644 --- a/openpype/hosts/maya/api/plugin.py +++ b/openpype/hosts/maya/api/plugin.py @@ -1,4 +1,5 @@ import os +import re from maya import cmds @@ -12,6 +13,7 @@ from openpype.pipeline import ( AVALON_CONTAINER_ID, Anatomy, ) +from openpype.pipeline.load import LoadError from openpype.settings import get_project_settings from .pipeline import containerise from . import lib @@ -143,15 +145,46 @@ class ReferenceLoader(Loader): assert os.path.exists(self.fname), "%s does not exist." % self.fname asset = context['asset'] + subset = context['subset'] + settings = get_project_settings(context['project']['name']) + custom_naming = settings['maya']['load']['reference_loader'] loaded_containers = [] - count = options.get("count") or 1 - for c in range(0, count): - namespace = namespace or lib.unique_namespace( - "{}_{}_".format(asset["name"], context["subset"]["name"]), - prefix="_" if asset["name"][0].isdigit() else "", - suffix="_", + if not custom_naming['namespace']: + raise LoadError("No namespace specified in " + "Maya ReferenceLoader settings") + elif not custom_naming['group_name']: + raise LoadError("No group name specified in " + "Maya ReferenceLoader settings") + + formatting_data = { + "asset_name": asset['name'], + "asset_type": asset['type'], + "subset": subset['name'], + "family": ( + subset['data'].get('family') or + subset['data']['families'][0] ) + } + + custom_namespace = custom_naming['namespace'].format( + **formatting_data + ) + + custom_group_name = custom_naming['group_name'].format( + **formatting_data + ) + + count = options.get("count") or 1 + + for c in range(0, count): + namespace = lib.get_custom_namespace(custom_namespace) + group_name = "{}:{}".format( + namespace, + custom_group_name + ) + + options['group_name'] = group_name # Offset loaded subset if "offset" in options: @@ -187,7 +220,7 @@ class ReferenceLoader(Loader): return loaded_containers - def process_reference(self, context, name, namespace, data): + def process_reference(self, context, name, namespace, options): """To be implemented by subclass""" raise NotImplementedError("Must be implemented by subclass") @@ -300,6 +333,39 @@ class ReferenceLoader(Loader): str(representation["_id"]), type="string") + # When an animation or pointcache gets connected to an Xgen container, + # the compound attribute "xgenContainers" gets created. When animation + # containers gets updated we also need to update the cacheFileName on + # the Xgen collection. + compound_name = "xgenContainers" + if cmds.objExists("{}.{}".format(node, compound_name)): + import xgenm + container_amount = cmds.getAttr( + "{}.{}".format(node, compound_name), size=True + ) + # loop through all compound children + for i in range(container_amount): + attr = "{}.{}[{}].container".format(node, compound_name, i) + objectset = cmds.listConnections(attr)[0] + reference_node = cmds.sets(objectset, query=True)[0] + palettes = cmds.ls( + cmds.referenceQuery(reference_node, nodes=True), + type="xgmPalette" + ) + for palette in palettes: + for description in xgenm.descriptions(palette): + xgenm.setAttr( + "cacheFileName", + path.replace("\\", "/"), + palette, + description, + "SplinePrimitive" + ) + + # Refresh UI and viewport. + de = xgenm.xgGlobal.DescriptionEditor + de.refresh("Full") + def remove(self, container): """Remove an existing `container` from Maya scene diff --git a/openpype/hosts/maya/api/setdress.py b/openpype/hosts/maya/api/setdress.py index 159bfe9eb3..0bb1f186eb 100644 --- a/openpype/hosts/maya/api/setdress.py +++ b/openpype/hosts/maya/api/setdress.py @@ -28,7 +28,9 @@ from openpype.pipeline import ( ) from openpype.hosts.maya.api.lib import ( matrix_equals, - unique_namespace + unique_namespace, + get_container_transforms, + DEFAULT_MATRIX ) log = logging.getLogger("PackageLoader") @@ -183,8 +185,6 @@ def _add(instance, representation_id, loaders, namespace, root="|"): """ - from openpype.hosts.maya.lib import get_container_transforms - # Process within the namespace with namespaced(namespace, new=False) as namespace: @@ -379,8 +379,6 @@ def update_scene(set_container, containers, current_data, new_data, new_file): """ - from openpype.hosts.maya.lib import DEFAULT_MATRIX, get_container_transforms - set_namespace = set_container['namespace'] project_name = legacy_io.active_project() diff --git a/openpype/hosts/maya/api/workfile_template_builder.py b/openpype/hosts/maya/api/workfile_template_builder.py index 3416c98793..6e6166c2ef 100644 --- a/openpype/hosts/maya/api/workfile_template_builder.py +++ b/openpype/hosts/maya/api/workfile_template_builder.py @@ -2,7 +2,7 @@ import json from maya import cmds -from openpype.pipeline import registered_host +from openpype.pipeline import registered_host, get_current_asset_name from openpype.pipeline.workfile.workfile_template_builder import ( TemplateAlreadyImported, AbstractTemplateBuilder, @@ -22,6 +22,8 @@ PLACEHOLDER_SET = "PLACEHOLDERS_SET" class MayaTemplateBuilder(AbstractTemplateBuilder): """Concrete implementation of AbstractTemplateBuilder for maya""" + use_legacy_creators = True + def import_template(self, path): """Import template into current scene. Block if a template is already loaded. @@ -31,7 +33,7 @@ class MayaTemplateBuilder(AbstractTemplateBuilder): get_template_preset implementation) Returns: - bool: Wether the template was succesfully imported or not + bool: Whether the template was successfully imported or not """ if cmds.objExists(PLACEHOLDER_SET): @@ -41,10 +43,44 @@ class MayaTemplateBuilder(AbstractTemplateBuilder): )) cmds.sets(name=PLACEHOLDER_SET, empty=True) - cmds.file(path, i=True, returnNewNodes=True) + new_nodes = cmds.file( + path, + i=True, + returnNewNodes=True, + preserveReferences=True, + loadReferenceDepth="all", + ) + + # make default cameras non-renderable + default_cameras = [cam for cam in cmds.ls(cameras=True) + if cmds.camera(cam, query=True, startupCamera=True)] + for cam in default_cameras: + if not cmds.attributeQuery("renderable", node=cam, exists=True): + self.log.debug( + "Camera {} has no attribute 'renderable'".format(cam) + ) + continue + cmds.setAttr("{}.renderable".format(cam), 0) cmds.setAttr(PLACEHOLDER_SET + ".hiddenInOutliner", True) + imported_sets = cmds.ls(new_nodes, set=True) + if not imported_sets: + return True + + # update imported sets information + asset_name = get_current_asset_name() + for node in imported_sets: + if not cmds.attributeQuery("id", node=node, exists=True): + continue + if cmds.getAttr("{}.id".format(node)) != "pyblish.avalon.instance": + continue + if not cmds.attributeQuery("asset", node=node, exists=True): + continue + + cmds.setAttr( + "{}.asset".format(node), asset_name, type="string") + return True @@ -97,7 +133,7 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): placeholder_name_parts = placeholder_data["builder_type"].split("_") pos = 1 - # add famlily in any + # add family in any placeholder_family = placeholder_data["family"] if placeholder_family: placeholder_name_parts.insert(pos, placeholder_family) @@ -215,26 +251,10 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): return self.get_load_plugin_options(options) def cleanup_placeholder(self, placeholder, failed): - """Hide placeholder, parent them to root - add them to placeholder set and register placeholder's parent - to keep placeholder info available for future use + """Hide placeholder, add them to placeholder set """ - node = placeholder._scene_identifier - node_parent = placeholder.data["parent"] - if node_parent: - cmds.setAttr(node + ".parent", node_parent, type="string") - if cmds.getAttr(node + ".index") < 0: - cmds.setAttr(node + ".index", placeholder.data["index"]) - - holding_sets = cmds.listSets(object=node) - if holding_sets: - for set in holding_sets: - cmds.sets(node, remove=set) - - if cmds.listRelatives(node, p=True): - node = cmds.parent(node, world=True)[0] cmds.sets(node, addElement=PLACEHOLDER_SET) cmds.hide(node) cmds.setAttr(node + ".hiddenInOutliner", True) @@ -267,8 +287,6 @@ class MayaPlaceholderLoadPlugin(PlaceholderPlugin, PlaceholderLoadMixin): elif not cmds.sets(root, q=True): return - if placeholder.data["parent"]: - cmds.parent(nodes_to_parent, placeholder.data["parent"]) # Move loaded nodes to correct index in outliner hierarchy placeholder_form = cmds.xform( placeholder.scene_identifier, diff --git a/openpype/hosts/maya/hooks/pre_auto_load_plugins.py b/openpype/hosts/maya/hooks/pre_auto_load_plugins.py new file mode 100644 index 0000000000..689d7adb4f --- /dev/null +++ b/openpype/hosts/maya/hooks/pre_auto_load_plugins.py @@ -0,0 +1,29 @@ +from openpype.lib import PreLaunchHook + + +class MayaPreAutoLoadPlugins(PreLaunchHook): + """Define -noAutoloadPlugins command flag.""" + + # Before AddLastWorkfileToLaunchArgs + order = 9 + app_groups = ["maya"] + + def execute(self): + + # Ignore if there's no last workfile to start. + if not self.data.get("start_last_workfile"): + return + + maya_settings = self.data["project_settings"]["maya"] + enabled = maya_settings["explicit_plugins_loading"]["enabled"] + if enabled: + # Force disable the `AddLastWorkfileToLaunchArgs`. + self.data.pop("start_last_workfile") + + # Force post initialization so our dedicated plug-in load can run + # prior to Maya opening a scene file. + key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION" + self.launch_context.env[key] = "1" + + self.log.debug("Explicit plugins loading.") + self.launch_context.launch_args.append("-noAutoloadPlugins") diff --git a/openpype/hosts/maya/hooks/pre_open_workfile_post_initialization.py b/openpype/hosts/maya/hooks/pre_open_workfile_post_initialization.py new file mode 100644 index 0000000000..7582ce0591 --- /dev/null +++ b/openpype/hosts/maya/hooks/pre_open_workfile_post_initialization.py @@ -0,0 +1,25 @@ +from openpype.lib import PreLaunchHook + + +class MayaPreOpenWorkfilePostInitialization(PreLaunchHook): + """Define whether open last workfile should run post initialize.""" + + # Before AddLastWorkfileToLaunchArgs. + order = 9 + app_groups = ["maya"] + + def execute(self): + + # Ignore if there's no last workfile to start. + if not self.data.get("start_last_workfile"): + return + + maya_settings = self.data["project_settings"]["maya"] + enabled = maya_settings["open_workfile_post_initialization"] + if enabled: + # Force disable the `AddLastWorkfileToLaunchArgs`. + self.data.pop("start_last_workfile") + + self.log.debug("Opening workfile post initialization.") + key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION" + self.launch_context.env[key] = "1" diff --git a/openpype/hosts/maya/plugins/create/create_animation.py b/openpype/hosts/maya/plugins/create/create_animation.py index e54c12315c..095cbcdd64 100644 --- a/openpype/hosts/maya/plugins/create/create_animation.py +++ b/openpype/hosts/maya/plugins/create/create_animation.py @@ -7,12 +7,20 @@ from openpype.hosts.maya.api import ( class CreateAnimation(plugin.Creator): """Animation output for character rigs""" + # We hide the animation creator from the UI since the creation of it + # is automated upon loading a rig. There's an inventory action to recreate + # it for loaded rigs if by chance someone deleted the animation instance. + # Note: This setting is actually applied from project settings + enabled = False + name = "animationDefault" label = "Animation" family = "animation" icon = "male" write_color_sets = False write_face_sets = False + include_parent_hierarchy = False + include_user_defined_attributes = False def __init__(self, *args, **kwargs): super(CreateAnimation, self).__init__(*args, **kwargs) @@ -36,7 +44,7 @@ class CreateAnimation(plugin.Creator): self.data["visibleOnly"] = False # Include the groups above the out_SET content - self.data["includeParentHierarchy"] = False # Include parent groups + self.data["includeParentHierarchy"] = self.include_parent_hierarchy # Default to exporting world-space self.data["worldSpace"] = True @@ -47,3 +55,6 @@ class CreateAnimation(plugin.Creator): # Default to write normals. self.data["writeNormals"] = True + + value = self.include_user_defined_attributes + self.data["includeUserDefinedAttributes"] = value diff --git a/openpype/hosts/maya/plugins/create/create_ass.py b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py similarity index 84% rename from openpype/hosts/maya/plugins/create/create_ass.py rename to openpype/hosts/maya/plugins/create/create_arnold_scene_source.py index 935a068ca5..2afb897e94 100644 --- a/openpype/hosts/maya/plugins/create/create_ass.py +++ b/openpype/hosts/maya/plugins/create/create_arnold_scene_source.py @@ -6,7 +6,7 @@ from openpype.hosts.maya.api import ( from maya import cmds -class CreateAss(plugin.Creator): +class CreateArnoldSceneSource(plugin.Creator): """Arnold Scene Source""" name = "ass" @@ -29,7 +29,7 @@ class CreateAss(plugin.Creator): maskOperator = False def __init__(self, *args, **kwargs): - super(CreateAss, self).__init__(*args, **kwargs) + super(CreateArnoldSceneSource, self).__init__(*args, **kwargs) # Add animation data self.data.update(lib.collect_animation_data()) @@ -52,7 +52,7 @@ class CreateAss(plugin.Creator): self.data["maskOperator"] = self.maskOperator def process(self): - instance = super(CreateAss, self).process() + instance = super(CreateArnoldSceneSource, self).process() nodes = [] @@ -61,6 +61,6 @@ class CreateAss(plugin.Creator): cmds.sets(nodes, rm=instance) - assContent = cmds.sets(name="content_SET") - assProxy = cmds.sets(name="proxy_SET", empty=True) + assContent = cmds.sets(name=instance + "_content_SET") + assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True) cmds.sets([assContent, assProxy], forceElement=instance) diff --git a/openpype/hosts/maya/plugins/create/create_look.py b/openpype/hosts/maya/plugins/create/create_look.py index 44e439fe1f..51b0b8819a 100644 --- a/openpype/hosts/maya/plugins/create/create_look.py +++ b/openpype/hosts/maya/plugins/create/create_look.py @@ -12,6 +12,7 @@ class CreateLook(plugin.Creator): family = "look" icon = "paint-brush" make_tx = True + rs_tex = False def __init__(self, *args, **kwargs): super(CreateLook, self).__init__(*args, **kwargs) @@ -20,7 +21,8 @@ class CreateLook(plugin.Creator): # Whether to automatically convert the textures to .tx upon publish. self.data["maketx"] = self.make_tx - + # Whether to automatically convert the textures to .rstex upon publish. + self.data["rstex"] = self.rs_tex # Enable users to force a copy. # - on Windows is "forceCopy" always changed to `True` because of # windows implementation of hardlinks diff --git a/openpype/hosts/maya/plugins/create/create_pointcache.py b/openpype/hosts/maya/plugins/create/create_pointcache.py index cdec140ea8..1b8d5e6850 100644 --- a/openpype/hosts/maya/plugins/create/create_pointcache.py +++ b/openpype/hosts/maya/plugins/create/create_pointcache.py @@ -1,3 +1,5 @@ +from maya import cmds + from openpype.hosts.maya.api import ( lib, plugin @@ -13,6 +15,7 @@ class CreatePointCache(plugin.Creator): icon = "gears" write_color_sets = False write_face_sets = False + include_user_defined_attributes = False def __init__(self, *args, **kwargs): super(CreatePointCache, self).__init__(*args, **kwargs) @@ -31,9 +34,17 @@ class CreatePointCache(plugin.Creator): self.data["refresh"] = False # Default to suspend refresh. # Add options for custom attributes + value = self.include_user_defined_attributes + self.data["includeUserDefinedAttributes"] = value self.data["attr"] = "" self.data["attrPrefix"] = "" # Default to not send to farm. self.data["farm"] = False self.data["priority"] = 50 + + def process(self): + instance = super(CreatePointCache, self).process() + + assProxy = cmds.sets(name=instance + "_proxy_SET", empty=True) + cmds.sets(assProxy, forceElement=instance) diff --git a/openpype/hosts/maya/plugins/create/create_render.py b/openpype/hosts/maya/plugins/create/create_render.py index 8375149442..4681175808 100644 --- a/openpype/hosts/maya/plugins/create/create_render.py +++ b/openpype/hosts/maya/plugins/create/create_render.py @@ -54,6 +54,7 @@ class CreateRender(plugin.Creator): tileRendering (bool): Instance is set to tile rendering mode. We won't submit actual render, but we'll make publish job to wait for Tile Assembly job done and then publish. + strict_error_checking (bool): Enable/disable error checking on DL See Also: https://pype.club/docs/artist_hosts_maya#creating-basic-render-setup @@ -180,16 +181,34 @@ class CreateRender(plugin.Creator): primary_pool = pool_setting["primary_pool"] sorted_pools = self._set_default_pool(list(pools), primary_pool) - cmds.addAttr(self.instance, longName="primaryPool", - attributeType="enum", - enumName=":".join(sorted_pools)) + cmds.addAttr( + self.instance, + longName="primaryPool", + attributeType="enum", + enumName=":".join(sorted_pools) + ) + cmds.setAttr( + "{}.primaryPool".format(self.instance), + 0, + keyable=False, + channelBox=True + ) pools = ["-"] + pools secondary_pool = pool_setting["secondary_pool"] sorted_pools = self._set_default_pool(list(pools), secondary_pool) - cmds.addAttr("{}.secondaryPool".format(self.instance), - attributeType="enum", - enumName=":".join(sorted_pools)) + cmds.addAttr( + self.instance, + longName="secondaryPool", + attributeType="enum", + enumName=":".join(sorted_pools) + ) + cmds.setAttr( + "{}.secondaryPool".format(self.instance), + 0, + keyable=False, + channelBox=True + ) def _create_render_settings(self): """Create instance settings.""" @@ -259,6 +278,12 @@ class CreateRender(plugin.Creator): default_priority) self.data["tile_priority"] = tile_priority + strict_error_checking = maya_submit_dl.get("strict_error_checking", + True) + self.data["strict_error_checking"] = strict_error_checking + + # Pool attributes should be last since they will be recreated when + # the deadline server changes. pool_setting = (self._project_settings["deadline"] ["publish"] ["CollectDeadlinePools"]) diff --git a/openpype/hosts/maya/plugins/create/create_review.py b/openpype/hosts/maya/plugins/create/create_review.py index ba51ffa009..40ae99b57c 100644 --- a/openpype/hosts/maya/plugins/create/create_review.py +++ b/openpype/hosts/maya/plugins/create/create_review.py @@ -1,8 +1,14 @@ +import os from collections import OrderedDict +import json + from openpype.hosts.maya.api import ( lib, plugin ) +from openpype.settings import get_project_settings +from openpype.pipeline import get_current_project_name, get_current_task_name +from openpype.client import get_asset_by_name class CreateReview(plugin.Creator): @@ -25,21 +31,46 @@ class CreateReview(plugin.Creator): "depth peeling", "alpha cut" ] + useMayaTimeline = True + panZoom = False def __init__(self, *args, **kwargs): super(CreateReview, self).__init__(*args, **kwargs) - - # get basic animation data : start / end / handles / steps data = OrderedDict(**self.data) - animation_data = lib.collect_animation_data(fps=True) - for key, value in animation_data.items(): + + project_name = get_current_project_name() + asset_doc = get_asset_by_name(project_name, data["asset"]) + task_name = get_current_task_name() + preset = lib.get_capture_preset( + task_name, + asset_doc["data"]["tasks"][task_name]["type"], + data["subset"], + get_project_settings(project_name), + self.log + ) + if os.environ.get("OPENPYPE_DEBUG") == "1": + self.log.debug( + "Using preset: {}".format( + json.dumps(preset, indent=4, sort_keys=True) + ) + ) + + # Option for using Maya or asset frame range in settings. + frame_range = lib.get_frame_range() + if self.useMayaTimeline: + frame_range = lib.collect_animation_data(fps=True) + for key, value in frame_range.items(): data[key] = value - data["review_width"] = self.Width - data["review_height"] = self.Height - data["isolate"] = self.isolate + data["fps"] = lib.collect_animation_data(fps=True)["fps"] + data["keepImages"] = self.keepImages - data["imagePlane"] = self.imagePlane data["transparency"] = self.transparency + data["review_width"] = preset["Resolution"]["width"] + data["review_height"] = preset["Resolution"]["height"] + data["isolate"] = preset["Generic"]["isolate_view"] + data["imagePlane"] = preset["Viewport Options"]["imagePlane"] + data["panZoom"] = preset["Generic"]["pan_zoom"] + data["displayLights"] = lib.DISPLAY_LIGHTS_LABELS self.data = data diff --git a/openpype/hosts/maya/plugins/create/create_vrayproxy.py b/openpype/hosts/maya/plugins/create/create_vrayproxy.py index 5c0365b495..d135073e82 100644 --- a/openpype/hosts/maya/plugins/create/create_vrayproxy.py +++ b/openpype/hosts/maya/plugins/create/create_vrayproxy.py @@ -9,6 +9,9 @@ class CreateVrayProxy(plugin.Creator): family = "vrayproxy" icon = "gears" + vrmesh = True + alembic = True + def __init__(self, *args, **kwargs): super(CreateVrayProxy, self).__init__(*args, **kwargs) @@ -18,3 +21,6 @@ class CreateVrayProxy(plugin.Creator): # Write vertex colors self.data["vertexColors"] = False + + self.data["vrmesh"] = self.vrmesh + self.data["alembic"] = self.alembic diff --git a/openpype/hosts/maya/plugins/create/create_xgen.py b/openpype/hosts/maya/plugins/create/create_xgen.py index 8672c06a1e..70e23cf47b 100644 --- a/openpype/hosts/maya/plugins/create/create_xgen.py +++ b/openpype/hosts/maya/plugins/create/create_xgen.py @@ -2,9 +2,9 @@ from openpype.hosts.maya.api import plugin class CreateXgen(plugin.Creator): - """Xgen interactive export""" + """Xgen""" name = "xgen" - label = "Xgen Interactive" + label = "Xgen" family = "xgen" icon = "pagelines" diff --git a/openpype/hosts/maya/plugins/inventory/connect_geometry.py b/openpype/hosts/maya/plugins/inventory/connect_geometry.py new file mode 100644 index 0000000000..03154b7afe --- /dev/null +++ b/openpype/hosts/maya/plugins/inventory/connect_geometry.py @@ -0,0 +1,153 @@ +from maya import cmds + +from openpype.pipeline import InventoryAction, get_representation_context +from openpype.hosts.maya.api.lib import get_id + + +class ConnectGeometry(InventoryAction): + """Connect geometries within containers. + + Source container will connect to the target containers, by searching for + matching geometry IDs (cbid). + Source containers are of family; "animation" and "pointcache". + The connection with be done with a live world space blendshape. + """ + + label = "Connect Geometry" + icon = "link" + color = "white" + + def process(self, containers): + # Validate selection is more than 1. + message = ( + "Only 1 container selected. 2+ containers needed for this action." + ) + if len(containers) == 1: + self.display_warning(message) + return + + # Categorize containers by family. + containers_by_family = {} + for container in containers: + family = get_representation_context( + container["representation"] + )["subset"]["data"]["family"] + try: + containers_by_family[family].append(container) + except KeyError: + containers_by_family[family] = [container] + + # Validate to only 1 source container. + source_containers = containers_by_family.get("animation", []) + source_containers += containers_by_family.get("pointcache", []) + source_container_namespaces = [ + x["namespace"] for x in source_containers + ] + message = ( + "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " + "\"animation\" or \"pointcache\".".format( + len(source_containers), source_container_namespaces + ) + ) + if len(source_containers) != 1: + self.display_warning(message) + return + + source_object = source_containers[0]["objectName"] + + # Collect matching geometry transforms based cbId attribute. + target_containers = [] + for family, containers in containers_by_family.items(): + if family in ["animation", "pointcache"]: + continue + + target_containers.extend(containers) + + source_data = self.get_container_data(source_object) + matches = [] + node_types = set() + for target_container in target_containers: + target_data = self.get_container_data( + target_container["objectName"] + ) + node_types.update(target_data["node_types"]) + for id, transform in target_data["ids"].items(): + source_match = source_data["ids"].get(id) + if source_match: + matches.append([source_match, transform]) + + # Message user about what is about to happen. + if not matches: + self.display_warning("No matching geometries found.") + return + + message = "Connecting geometries:\n\n" + for match in matches: + message += "{} > {}\n".format(match[0], match[1]) + + choice = self.display_warning(message, show_cancel=True) + if choice is False: + return + + # Setup live worldspace blendshape connection. + for source, target in matches: + blendshape = cmds.blendShape(source, target)[0] + cmds.setAttr(blendshape + ".origin", 0) + cmds.setAttr(blendshape + "." + target.split(":")[-1], 1) + + # Update Xgen if in any of the containers. + if "xgmPalette" in node_types: + cmds.xgmPreview() + + def get_container_data(self, container): + """Collects data about the container nodes. + + Args: + container (dict): Container instance. + + Returns: + data (dict): + "node_types": All node types in container nodes. + "ids": If the node is a mesh, we collect its parent transform + id. + """ + data = {"node_types": set(), "ids": {}} + ref_node = cmds.sets(container, query=True, nodesOnly=True)[0] + for node in cmds.referenceQuery(ref_node, nodes=True): + node_type = cmds.nodeType(node) + data["node_types"].add(node_type) + + # Only interested in mesh transforms for connecting geometry with + # blendshape. + if node_type != "mesh": + continue + + transform = cmds.listRelatives(node, parent=True)[0] + data["ids"][get_id(transform)] = transform + + return data + + def display_warning(self, message, show_cancel=False): + """Show feedback to user. + + Returns: + bool + """ + + from qtpy import QtWidgets + + accept = QtWidgets.QMessageBox.Ok + if show_cancel: + buttons = accept | QtWidgets.QMessageBox.Cancel + else: + buttons = accept + + state = QtWidgets.QMessageBox.warning( + None, + "", + message, + buttons=buttons, + defaultButton=accept + ) + + return state == accept diff --git a/openpype/hosts/maya/plugins/inventory/connect_xgen.py b/openpype/hosts/maya/plugins/inventory/connect_xgen.py new file mode 100644 index 0000000000..177971f176 --- /dev/null +++ b/openpype/hosts/maya/plugins/inventory/connect_xgen.py @@ -0,0 +1,168 @@ +from maya import cmds +import xgenm + +from openpype.pipeline import ( + InventoryAction, get_representation_context, get_representation_path +) + + +class ConnectXgen(InventoryAction): + """Connect Xgen with an animation or pointcache. + """ + + label = "Connect Xgen" + icon = "link" + color = "white" + + def process(self, containers): + # Validate selection is more than 1. + message = ( + "Only 1 container selected. 2+ containers needed for this action." + ) + if len(containers) == 1: + self.display_warning(message) + return + + # Categorize containers by family. + containers_by_family = {} + for container in containers: + family = get_representation_context( + container["representation"] + )["subset"]["data"]["family"] + try: + containers_by_family[family].append(container) + except KeyError: + containers_by_family[family] = [container] + + # Validate to only 1 source container. + source_containers = containers_by_family.get("animation", []) + source_containers += containers_by_family.get("pointcache", []) + source_container_namespaces = [ + x["namespace"] for x in source_containers + ] + message = ( + "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " + "\"animation\" or \"pointcache\".".format( + len(source_containers), source_container_namespaces + ) + ) + if len(source_containers) != 1: + self.display_warning(message) + return + + source_container = source_containers[0] + source_object = source_container["objectName"] + + # Validate source representation is an alembic. + source_path = get_representation_path( + get_representation_context( + source_container["representation"] + )["representation"] + ).replace("\\", "/") + message = "Animation container \"{}\" is not an alembic:\n{}".format( + source_container["namespace"], source_path + ) + if not source_path.endswith(".abc"): + self.display_warning(message) + return + + # Target containers. + target_containers = [] + for family, containers in containers_by_family.items(): + if family in ["animation", "pointcache"]: + continue + + target_containers.extend(containers) + + # Inform user of connections from source representation to target + # descriptions. + descriptions_data = [] + connections_msg = "" + for target_container in target_containers: + reference_node = cmds.sets( + target_container["objectName"], query=True + )[0] + palettes = cmds.ls( + cmds.referenceQuery(reference_node, nodes=True), + type="xgmPalette" + ) + for palette in palettes: + for description in xgenm.descriptions(palette): + descriptions_data.append([palette, description]) + connections_msg += "\n{}/{}".format(palette, description) + + message = "Connecting \"{}\" to:\n".format( + source_container["namespace"] + ) + message += connections_msg + choice = self.display_warning(message, show_cancel=True) + if choice is False: + return + + # Recreate "xgenContainers" attribute to reset. + compound_name = "xgenContainers" + attr = "{}.{}".format(source_object, compound_name) + if cmds.objExists(attr): + cmds.deleteAttr(attr) + + cmds.addAttr( + source_object, + longName=compound_name, + attributeType="compound", + numberOfChildren=1, + multi=True + ) + + # Connect target containers. + for target_container in target_containers: + cmds.addAttr( + source_object, + longName="container", + attributeType="message", + parent=compound_name + ) + index = target_containers.index(target_container) + cmds.connectAttr( + target_container["objectName"] + ".message", + source_object + ".{}[{}].container".format( + compound_name, index + ) + ) + + # Setup cache on Xgen + object = "SplinePrimitive" + for palette, description in descriptions_data: + xgenm.setAttr("useCache", "true", palette, description, object) + xgenm.setAttr("liveMode", "false", palette, description, object) + xgenm.setAttr( + "cacheFileName", source_path, palette, description, object + ) + + # Refresh UI and viewport. + de = xgenm.xgGlobal.DescriptionEditor + de.refresh("Full") + + def display_warning(self, message, show_cancel=False): + """Show feedback to user. + + Returns: + bool + """ + + from qtpy import QtWidgets + + accept = QtWidgets.QMessageBox.Ok + if show_cancel: + buttons = accept | QtWidgets.QMessageBox.Cancel + else: + buttons = accept + + state = QtWidgets.QMessageBox.warning( + None, + "", + message, + buttons=buttons, + defaultButton=accept + ) + + return state == accept diff --git a/openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py b/openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py new file mode 100644 index 0000000000..924a1a4627 --- /dev/null +++ b/openpype/hosts/maya/plugins/inventory/connect_yeti_rig.py @@ -0,0 +1,178 @@ +import os +import json +from collections import defaultdict + +from maya import cmds + +from openpype.pipeline import ( + InventoryAction, get_representation_context, get_representation_path +) +from openpype.hosts.maya.api.lib import get_container_members, get_id + + +class ConnectYetiRig(InventoryAction): + """Connect Yeti Rig with an animation or pointcache.""" + + label = "Connect Yeti Rig" + icon = "link" + color = "white" + + def process(self, containers): + # Validate selection is more than 1. + message = ( + "Only 1 container selected. 2+ containers needed for this action." + ) + if len(containers) == 1: + self.display_warning(message) + return + + # Categorize containers by family. + containers_by_family = defaultdict(list) + for container in containers: + family = get_representation_context( + container["representation"] + )["subset"]["data"]["family"] + containers_by_family[family].append(container) + + # Validate to only 1 source container. + source_containers = containers_by_family.get("animation", []) + source_containers += containers_by_family.get("pointcache", []) + source_container_namespaces = [ + x["namespace"] for x in source_containers + ] + message = ( + "{} animation containers selected:\n\n{}\n\nOnly select 1 of type " + "\"animation\" or \"pointcache\".".format( + len(source_containers), source_container_namespaces + ) + ) + if len(source_containers) != 1: + self.display_warning(message) + return + + source_container = source_containers[0] + source_ids = self.nodes_by_id(source_container) + + # Target containers. + target_ids = {} + inputs = [] + + yeti_rig_containers = containers_by_family.get("yetiRig") + if not yeti_rig_containers: + self.display_warning( + "Select at least one yetiRig container" + ) + return + + for container in yeti_rig_containers: + target_ids.update(self.nodes_by_id(container)) + + maya_file = get_representation_path( + get_representation_context( + container["representation"] + )["representation"] + ) + _, ext = os.path.splitext(maya_file) + settings_file = maya_file.replace(ext, ".rigsettings") + if not os.path.exists(settings_file): + continue + + with open(settings_file) as f: + inputs.extend(json.load(f)["inputs"]) + + # Compare loaded connections to scene. + for input in inputs: + source_node = source_ids.get(input["sourceID"]) + target_node = target_ids.get(input["destinationID"]) + + if not source_node or not target_node: + self.log.debug( + "Could not find nodes for input:\n" + + json.dumps(input, indent=4, sort_keys=True) + ) + continue + source_attr, target_attr = input["connections"] + + if not cmds.attributeQuery( + source_attr, node=source_node, exists=True + ): + self.log.debug( + "Could not find attribute {} on node {} for " + "input:\n{}".format( + source_attr, + source_node, + json.dumps(input, indent=4, sort_keys=True) + ) + ) + continue + + if not cmds.attributeQuery( + target_attr, node=target_node, exists=True + ): + self.log.debug( + "Could not find attribute {} on node {} for " + "input:\n{}".format( + target_attr, + target_node, + json.dumps(input, indent=4, sort_keys=True) + ) + ) + continue + + source_plug = "{}.{}".format( + source_node, source_attr + ) + target_plug = "{}.{}".format( + target_node, target_attr + ) + if cmds.isConnected( + source_plug, target_plug, ignoreUnitConversion=True + ): + self.log.debug( + "Connection already exists: {} -> {}".format( + source_plug, target_plug + ) + ) + continue + + cmds.connectAttr(source_plug, target_plug, force=True) + self.log.debug( + "Connected attributes: {} -> {}".format( + source_plug, target_plug + ) + ) + + def nodes_by_id(self, container): + ids = {} + for member in get_container_members(container): + id = get_id(member) + if not id: + continue + ids[id] = member + + return ids + + def display_warning(self, message, show_cancel=False): + """Show feedback to user. + + Returns: + bool + """ + + from qtpy import QtWidgets + + accept = QtWidgets.QMessageBox.Ok + if show_cancel: + buttons = accept | QtWidgets.QMessageBox.Cancel + else: + buttons = accept + + state = QtWidgets.QMessageBox.warning( + None, + "", + message, + buttons=buttons, + defaultButton=accept + ) + + return state == accept diff --git a/openpype/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py b/openpype/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py new file mode 100644 index 0000000000..39bc59fbbf --- /dev/null +++ b/openpype/hosts/maya/plugins/inventory/rig_recreate_animation_instance.py @@ -0,0 +1,35 @@ +from openpype.pipeline import ( + InventoryAction, + get_representation_context +) +from openpype.hosts.maya.api.lib import ( + create_rig_animation_instance, + get_container_members, +) + + +class RecreateRigAnimationInstance(InventoryAction): + """Recreate animation publish instance for loaded rigs""" + + label = "Recreate rig animation instance" + icon = "wrench" + color = "#888888" + + @staticmethod + def is_compatible(container): + return ( + container.get("loader") == "ReferenceLoader" + and container.get("name", "").startswith("rig") + ) + + def process(self, containers): + + for container in containers: + # todo: delete an existing entry if it exist or skip creation + + namespace = container["namespace"] + representation_id = container["representation"] + context = get_representation_context(representation_id) + nodes = get_container_members(container) + + create_rig_animation_instance(nodes, context, namespace) diff --git a/openpype/hosts/maya/plugins/load/_load_animation.py b/openpype/hosts/maya/plugins/load/_load_animation.py index b419a730b5..2ba5fe6b64 100644 --- a/openpype/hosts/maya/plugins/load/_load_animation.py +++ b/openpype/hosts/maya/plugins/load/_load_animation.py @@ -14,7 +14,7 @@ class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): icon = "code-fork" color = "orange" - def process_reference(self, context, name, namespace, data): + def process_reference(self, context, name, namespace, options): import maya.cmds as cmds from openpype.hosts.maya.api.lib import unique_namespace @@ -41,7 +41,7 @@ class AbcLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): namespace=namespace, sharedReferenceFile=False, groupReference=True, - groupName="{}:{}".format(namespace, name), + groupName=options['group_name'], reference=True, returnNewNodes=True) diff --git a/openpype/hosts/maya/plugins/load/actions.py b/openpype/hosts/maya/plugins/load/actions.py index 98c8192294..ba69debc40 100644 --- a/openpype/hosts/maya/plugins/load/actions.py +++ b/openpype/hosts/maya/plugins/load/actions.py @@ -93,7 +93,20 @@ class ImportMayaLoader(load.LoaderPlugin): """ representations = ["ma", "mb", "obj"] - families = ["*"] + families = [ + "model", + "pointcache", + "proxyAbc", + "animation", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "camera", + "rig", + "camerarig", + "staticMesh" + ] label = "Import" order = 10 @@ -105,7 +118,7 @@ class ImportMayaLoader(load.LoaderPlugin): "clean_import", label="Clean import", default=False, - help="Should all occurences of cbId be purged?" + help="Should all occurrences of cbId be purged?" ) ] diff --git a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py b/openpype/hosts/maya/plugins/load/load_abc_to_standin.py deleted file mode 100644 index 70866a3ba6..0000000000 --- a/openpype/hosts/maya/plugins/load/load_abc_to_standin.py +++ /dev/null @@ -1,132 +0,0 @@ -import os - -from openpype.pipeline import ( - legacy_io, - load, - get_representation_path -) -from openpype.settings import get_project_settings - - -class AlembicStandinLoader(load.LoaderPlugin): - """Load Alembic as Arnold Standin""" - - families = ["animation", "model", "proxyAbc", "pointcache"] - representations = ["abc"] - - label = "Import Alembic as Arnold Standin" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, options): - - import maya.cmds as cmds - import mtoa.ui.arnoldmenu - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace - - version = context["version"] - version_data = version.get("data", {}) - family = version["data"]["families"] - self.log.info("version_data: {}\n".format(version_data)) - self.log.info("family: {}\n".format(family)) - frameStart = version_data.get("frameStart", None) - - asset = context["asset"]["name"] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - # Root group - label = "{}:{}".format(namespace, name) - root = cmds.group(name=label, empty=True) - - settings = get_project_settings(os.environ['AVALON_PROJECT']) - colors = settings["maya"]["load"]["colors"] - fps = legacy_io.Session["AVALON_FPS"] - c = colors.get(family[0]) - if c is not None: - r = (float(c[0]) / 255) - g = (float(c[1]) / 255) - b = (float(c[2]) / 255) - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - r, g, b) - - transform_name = label + "_ABC" - - standinShape = cmds.ls(mtoa.ui.arnoldmenu.createStandIn())[0] - standin = cmds.listRelatives(standinShape, parent=True, - typ="transform") - standin = cmds.rename(standin, transform_name) - standinShape = cmds.listRelatives(standin, children=True)[0] - - cmds.parent(standin, root) - - # Set the standin filepath - cmds.setAttr(standinShape + ".dso", self.fname, type="string") - cmds.setAttr(standinShape + ".abcFPS", float(fps)) - - if frameStart is None: - cmds.setAttr(standinShape + ".useFrameExtension", 0) - - elif "model" in family: - cmds.setAttr(standinShape + ".useFrameExtension", 0) - - else: - cmds.setAttr(standinShape + ".useFrameExtension", 1) - - nodes = [root, standin] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, representation): - - import pymel.core as pm - - path = get_representation_path(representation) - fps = legacy_io.Session["AVALON_FPS"] - # Update the standin - standins = list() - members = pm.sets(container['objectName'], query=True) - self.log.info("container:{}".format(container)) - for member in members: - shape = member.getShape() - if (shape and shape.type() == "aiStandIn"): - standins.append(shape) - - for standin in standins: - standin.dso.set(path) - standin.abcFPS.set(float(fps)) - if "modelMain" in container['objectName']: - standin.useFrameExtension.set(0) - else: - standin.useFrameExtension.set(1) - - container = pm.PyNode(container["objectName"]) - container.representation.set(str(representation["_id"])) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - import maya.cmds as cmds - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/openpype/hosts/maya/plugins/load/load_arnold_standin.py b/openpype/hosts/maya/plugins/load/load_arnold_standin.py new file mode 100644 index 0000000000..7c3a732389 --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_arnold_standin.py @@ -0,0 +1,222 @@ +import os +import clique + +import maya.cmds as cmds + +from openpype.settings import get_project_settings +from openpype.pipeline import ( + load, + get_representation_path +) +from openpype.hosts.maya.api.lib import ( + unique_namespace, get_attribute_input, maintained_selection +) +from openpype.hosts.maya.api.pipeline import containerise + + +def is_sequence(files): + sequence = False + collections, remainder = clique.assemble(files) + if collections: + sequence = True + + return sequence + + +class ArnoldStandinLoader(load.LoaderPlugin): + """Load as Arnold standin""" + + families = ["ass", "animation", "model", "proxyAbc", "pointcache"] + representations = ["ass", "abc"] + + label = "Load as Arnold standin" + order = -5 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, options): + + # Make sure to load arnold before importing `mtoa.ui.arnoldmenu` + cmds.loadPlugin("mtoa", quiet=True) + import mtoa.ui.arnoldmenu + + version = context['version'] + version_data = version.get("data", {}) + + self.log.info("version_data: {}\n".format(version_data)) + + asset = context['asset']['name'] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + # Root group + label = "{}:{}".format(namespace, name) + root = cmds.group(name=label, empty=True) + + # Set color. + settings = get_project_settings(context["project"]["name"]) + color = settings['maya']['load']['colors'].get('ass') + if color is not None: + cmds.setAttr(root + ".useOutlinerColor", True) + cmds.setAttr( + root + ".outlinerColor", color[0], color[1], color[2] + ) + + with maintained_selection(): + # Create transform with shape + transform_name = label + "_standin" + + standin_shape = mtoa.ui.arnoldmenu.createStandIn() + standin = cmds.listRelatives(standin_shape, parent=True)[0] + standin = cmds.rename(standin, transform_name) + standin_shape = cmds.listRelatives(standin, shapes=True)[0] + + cmds.parent(standin, root) + + # Set the standin filepath + path, operator = self._setup_proxy( + standin_shape, self.fname, namespace + ) + cmds.setAttr(standin_shape + ".dso", path, type="string") + sequence = is_sequence(os.listdir(os.path.dirname(self.fname))) + cmds.setAttr(standin_shape + ".useFrameExtension", sequence) + + nodes = [root, standin, standin_shape] + if operator is not None: + nodes.append(operator) + self[:] = nodes + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__) + + def get_next_free_multi_index(self, attr_name): + """Find the next unconnected multi index at the input attribute.""" + for index in range(10000000): + connection_info = cmds.connectionInfo( + "{}[{}]".format(attr_name, index), + sourceFromDestination=True + ) + if len(connection_info or []) == 0: + return index + + def _get_proxy_path(self, path): + basename_split = os.path.basename(path).split(".") + proxy_basename = ( + basename_split[0] + "_proxy." + ".".join(basename_split[1:]) + ) + proxy_path = "/".join([os.path.dirname(path), proxy_basename]) + return proxy_basename, proxy_path + + def _setup_proxy(self, shape, path, namespace): + proxy_basename, proxy_path = self._get_proxy_path(path) + + options_node = "defaultArnoldRenderOptions" + merge_operator = get_attribute_input(options_node + ".operator") + if merge_operator is None: + merge_operator = cmds.createNode("aiMerge") + cmds.connectAttr( + merge_operator + ".message", options_node + ".operator" + ) + + merge_operator = merge_operator.split(".")[0] + + string_replace_operator = cmds.createNode( + "aiStringReplace", name=namespace + ":string_replace_operator" + ) + node_type = "alembic" if path.endswith(".abc") else "procedural" + cmds.setAttr( + string_replace_operator + ".selection", + "*.(@node=='{}')".format(node_type), + type="string" + ) + cmds.setAttr( + string_replace_operator + ".match", + proxy_basename, + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path), + type="string" + ) + + cmds.connectAttr( + string_replace_operator + ".out", + "{}.inputs[{}]".format( + merge_operator, + self.get_next_free_multi_index(merge_operator + ".inputs") + ) + ) + + # We setup the string operator no matter whether there is a proxy or + # not. This makes it easier to update since the string operator will + # always be created. Return original path to use for standin. + if not os.path.exists(proxy_path): + return path, string_replace_operator + + return proxy_path, string_replace_operator + + def update(self, container, representation): + # Update the standin + members = cmds.sets(container['objectName'], query=True) + for member in members: + if cmds.nodeType(member) == "aiStringReplace": + string_replace_operator = member + + shapes = cmds.listRelatives(member, shapes=True) + if not shapes: + continue + if cmds.nodeType(shapes[0]) == "aiStandIn": + standin = shapes[0] + + path = get_representation_path(representation) + proxy_basename, proxy_path = self._get_proxy_path(path) + + # Whether there is proxy or so, we still update the string operator. + # If no proxy exists, the string operator won't replace anything. + cmds.setAttr( + string_replace_operator + ".match", + proxy_basename, + type="string" + ) + cmds.setAttr( + string_replace_operator + ".replace", + os.path.basename(path), + type="string" + ) + + dso_path = path + if os.path.exists(proxy_path): + dso_path = proxy_path + cmds.setAttr(standin + ".dso", dso_path, type="string") + + sequence = is_sequence(os.listdir(os.path.dirname(path))) + cmds.setAttr(standin + ".useFrameExtension", sequence) + + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass diff --git a/openpype/hosts/maya/plugins/load/load_ass.py b/openpype/hosts/maya/plugins/load/load_ass.py deleted file mode 100644 index 5db6fc3dfa..0000000000 --- a/openpype/hosts/maya/plugins/load/load_ass.py +++ /dev/null @@ -1,290 +0,0 @@ -import os -import clique - -from openpype.settings import get_project_settings -from openpype.pipeline import ( - load, - get_representation_path -) -import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api.plugin import get_reference_node -from openpype.hosts.maya.api.lib import ( - maintained_selection, - unique_namespace -) -from openpype.hosts.maya.api.pipeline import containerise - - -class AssProxyLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """Load Arnold Proxy as reference""" - - families = ["ass"] - representations = ["ass"] - - label = "Reference .ASS standin with Proxy" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, options): - - import maya.cmds as cmds - import pymel.core as pm - - version = context['version'] - version_data = version.get("data", {}) - - self.log.info("version_data: {}\n".format(version_data)) - - frameStart = version_data.get("frameStart", None) - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "ass" - - with maintained_selection(): - - groupName = "{}:{}".format(namespace, name) - path = self.fname - proxyPath_base = os.path.splitext(path)[0] - - if frameStart is not None: - proxyPath_base = os.path.splitext(proxyPath_base)[0] - - publish_folder = os.path.split(path)[0] - files_in_folder = os.listdir(publish_folder) - collections, remainder = clique.assemble(files_in_folder) - - if collections: - hashes = collections[0].padding * '#' - coll = collections[0].format('{head}[index]{tail}') - filename = coll.replace('[index]', hashes) - - path = os.path.join(publish_folder, filename) - - proxyPath = proxyPath_base + ".ma" - - project_name = context["project"]["name"] - file_url = self.prepare_root_value(proxyPath, - project_name) - - nodes = cmds.file(file_url, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName=groupName) - - cmds.makeIdentity(groupName, apply=False, rotate=True, - translate=True, scale=True) - - # Set attributes - proxyShape = pm.ls(nodes, type="mesh")[0] - - proxyShape.aiTranslator.set('procedural') - proxyShape.dso.set(path) - proxyShape.aiOverrideShaders.set(0) - - settings = get_project_settings(project_name) - colors = settings['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) - ) - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - from maya import cmds - import pymel.core as pm - - node = container["objectName"] - - representation["context"].pop("frame", None) - path = get_representation_path(representation) - print(path) - # path = self.fname - print(self.fname) - proxyPath = os.path.splitext(path)[0] + ".ma" - print(proxyPath) - - # Get reference node from container members - members = cmds.sets(node, query=True, nodesOnly=True) - reference_node = get_reference_node(members) - - assert os.path.exists(proxyPath), "%s does not exist." % proxyPath - - try: - file_url = self.prepare_root_value(proxyPath, - representation["context"] - ["project"] - ["name"]) - content = cmds.file(file_url, - loadReference=reference_node, - type="mayaAscii", - returnNewNodes=True) - - # Set attributes - proxyShape = pm.ls(content, type="mesh")[0] - - proxyShape.aiTranslator.set('procedural') - proxyShape.dso.set(path) - proxyShape.aiOverrideShaders.set(0) - - except RuntimeError as exc: - # When changing a reference to a file that has load errors the - # command will raise an error even if the file is still loaded - # correctly (e.g. when raising errors on Arnold attributes) - # When the file is loaded and has content, we consider it's fine. - if not cmds.referenceQuery(reference_node, isLoaded=True): - raise - - content = cmds.referenceQuery(reference_node, - nodes=True, - dagPath=True) - if not content: - raise - - self.log.warning("Ignoring file read error:\n%s", exc) - - # Add new nodes of the reference to the container - cmds.sets(content, forceElement=node) - - # Remove any placeHolderList attribute entries from the set that - # are remaining from nodes being removed from the referenced file. - members = cmds.sets(node, query=True) - invalid = [x for x in members if ".placeHolderList" in x] - if invalid: - cmds.sets(invalid, remove=node) - - # Update metadata - cmds.setAttr("{}.representation".format(node), - str(representation["_id"]), - type="string") - - -class AssStandinLoader(load.LoaderPlugin): - """Load .ASS file as standin""" - - families = ["ass"] - representations = ["ass"] - - label = "Load .ASS file as standin" - order = -5 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, options): - - import maya.cmds as cmds - import mtoa.ui.arnoldmenu - import pymel.core as pm - - version = context['version'] - version_data = version.get("data", {}) - - self.log.info("version_data: {}\n".format(version_data)) - - frameStart = version_data.get("frameStart", None) - - asset = context['asset']['name'] - namespace = namespace or unique_namespace( - asset + "_", - prefix="_" if asset[0].isdigit() else "", - suffix="_", - ) - - # cmds.loadPlugin("gpuCache", quiet=True) - - # Root group - label = "{}:{}".format(namespace, name) - root = pm.group(name=label, empty=True) - - settings = get_project_settings(os.environ['AVALON_PROJECT']) - colors = settings['maya']['load']['colors'] - - c = colors.get('ass') - if c is not None: - cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - c[0], c[1], c[2]) - - # Create transform with shape - transform_name = label + "_ASS" - # transform = pm.createNode("transform", name=transform_name, - # parent=root) - - standinShape = pm.PyNode(mtoa.ui.arnoldmenu.createStandIn()) - standin = standinShape.getParent() - standin.rename(transform_name) - - pm.parent(standin, root) - - # Set the standin filepath - standinShape.dso.set(self.fname) - if frameStart is not None: - standinShape.useFrameExtension.set(1) - - nodes = [root, standin] - self[:] = nodes - - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) - - def update(self, container, representation): - - import pymel.core as pm - - path = get_representation_path(representation) - - files_in_path = os.listdir(os.path.split(path)[0]) - sequence = 0 - collections, remainder = clique.assemble(files_in_path) - if collections: - sequence = 1 - - # Update the standin - standins = list() - members = pm.sets(container['objectName'], query=True) - for member in members: - shape = member.getShape() - if (shape and shape.type() == "aiStandIn"): - standins.append(shape) - - for standin in standins: - standin.dso.set(path) - standin.useFrameExtension.set(sequence) - - container = pm.PyNode(container["objectName"]) - container.representation.set(str(representation["_id"])) - - def switch(self, container, representation): - self.update(container, representation) - - def remove(self, container): - import maya.cmds as cmds - members = cmds.sets(container['objectName'], query=True) - cmds.lockNode(members, lock=False) - cmds.delete([container['objectName']] + members) - - # Clean up the namespace - try: - cmds.namespace(removeNamespace=container['namespace'], - deleteNamespaceContent=True) - except RuntimeError: - pass diff --git a/openpype/hosts/maya/plugins/load/load_assembly.py b/openpype/hosts/maya/plugins/load/load_assembly.py index 902f38695c..275f21be5d 100644 --- a/openpype/hosts/maya/plugins/load/load_assembly.py +++ b/openpype/hosts/maya/plugins/load/load_assembly.py @@ -1,8 +1,14 @@ +import maya.cmds as cmds + from openpype.pipeline import ( load, remove_container ) +from openpype.hosts.maya.api.pipeline import containerise +from openpype.hosts.maya.api.lib import unique_namespace +from openpype.hosts.maya.api import setdress + class AssemblyLoader(load.LoaderPlugin): @@ -16,9 +22,6 @@ class AssemblyLoader(load.LoaderPlugin): def load(self, context, name, namespace, data): - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace - asset = context['asset']['name'] namespace = namespace or unique_namespace( asset + "_", @@ -26,8 +29,6 @@ class AssemblyLoader(load.LoaderPlugin): suffix="_", ) - from openpype.hosts.maya.api import setdress - containers = setdress.load_package( filepath=self.fname, name=name, @@ -50,15 +51,11 @@ class AssemblyLoader(load.LoaderPlugin): def update(self, container, representation): - from openpype import setdress return setdress.update_package(container, representation) def remove(self, container): """Remove all sub containers""" - from openpype import setdress - import maya.cmds as cmds - # Remove all members member_containers = setdress.get_contained_containers(container) for member_container in member_containers: diff --git a/openpype/hosts/maya/plugins/load/load_audio.py b/openpype/hosts/maya/plugins/load/load_audio.py index 6f60cb5726..9e7fd96bdb 100644 --- a/openpype/hosts/maya/plugins/load/load_audio.py +++ b/openpype/hosts/maya/plugins/load/load_audio.py @@ -11,7 +11,7 @@ from openpype.pipeline import ( get_representation_path, ) from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import unique_namespace +from openpype.hosts.maya.api.lib import unique_namespace, get_container_members class AudioLoader(load.LoaderPlugin): @@ -52,17 +52,15 @@ class AudioLoader(load.LoaderPlugin): ) def update(self, container, representation): - import pymel.core as pm - audio_node = None - for node in pm.PyNode(container["objectName"]).members(): - if node.nodeType() == "audio": - audio_node = node + members = get_container_members(container) + audio_nodes = cmds.ls(members, type="audio") - assert audio_node is not None, "Audio node not found." + assert audio_nodes is not None, "Audio node not found." + audio_node = audio_nodes[0] path = get_representation_path(representation) - audio_node.filename.set(path) + cmds.setAttr("{}.filename".format(audio_node), path, type="string") cmds.setAttr( container["objectName"] + ".representation", str(representation["_id"]), @@ -80,8 +78,12 @@ class AudioLoader(load.LoaderPlugin): asset = get_asset_by_id( project_name, subset["parent"], fields=["parent"] ) - audio_node.sourceStart.set(1 - asset["data"]["frameStart"]) - audio_node.sourceEnd.set(asset["data"]["frameEnd"]) + + source_start = 1 - asset["data"]["frameStart"] + source_end = asset["data"]["frameEnd"] + + cmds.setAttr("{}.sourceStart".format(audio_node), source_start) + cmds.setAttr("{}.sourceEnd".format(audio_node), source_end) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/maya/plugins/load/load_gpucache.py b/openpype/hosts/maya/plugins/load/load_gpucache.py index 07e5734f43..794b21eb5d 100644 --- a/openpype/hosts/maya/plugins/load/load_gpucache.py +++ b/openpype/hosts/maya/plugins/load/load_gpucache.py @@ -1,5 +1,9 @@ import os +import maya.cmds as cmds + +from openpype.hosts.maya.api.pipeline import containerise +from openpype.hosts.maya.api.lib import unique_namespace from openpype.pipeline import ( load, get_representation_path @@ -11,19 +15,15 @@ class GpuCacheLoader(load.LoaderPlugin): """Load Alembic as gpuCache""" families = ["model", "animation", "proxyAbc", "pointcache"] - representations = ["abc"] + representations = ["abc", "gpu_cache"] - label = "Import Gpu Cache" + label = "Load Gpu Cache" order = -5 icon = "code-fork" color = "orange" def load(self, context, name, namespace, data): - import maya.cmds as cmds - from openpype.hosts.maya.api.pipeline import containerise - from openpype.hosts.maya.api.lib import unique_namespace - asset = context['asset']['name'] namespace = namespace or unique_namespace( asset + "_", @@ -42,10 +42,9 @@ class GpuCacheLoader(load.LoaderPlugin): c = colors.get('model') if c is not None: cmds.setAttr(root + ".useOutlinerColor", 1) - cmds.setAttr(root + ".outlinerColor", - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) + cmds.setAttr( + root + ".outlinerColor", + (float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255) ) # Create transform with shape @@ -74,9 +73,6 @@ class GpuCacheLoader(load.LoaderPlugin): loader=self.__class__.__name__) def update(self, container, representation): - - import maya.cmds as cmds - path = get_representation_path(representation) # Update the cache @@ -96,7 +92,6 @@ class GpuCacheLoader(load.LoaderPlugin): self.update(container, representation) def remove(self, container): - import maya.cmds as cmds members = cmds.sets(container['objectName'], query=True) cmds.lockNode(members, lock=False) cmds.delete([container['objectName']] + members) diff --git a/openpype/hosts/maya/plugins/load/load_image.py b/openpype/hosts/maya/plugins/load/load_image.py new file mode 100644 index 0000000000..552bcc33af --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_image.py @@ -0,0 +1,337 @@ +import os +import copy + +from openpype.lib import EnumDef +from openpype.pipeline import ( + load, + get_representation_context +) +from openpype.pipeline.load.utils import get_representation_path_from_context +from openpype.pipeline.colorspace import ( + get_imageio_colorspace_from_filepath, + get_imageio_config, + get_imageio_file_rules +) +from openpype.settings import get_project_settings + +from openpype.hosts.maya.api.pipeline import containerise +from openpype.hosts.maya.api.lib import ( + unique_namespace, + namespaced +) + +from maya import cmds + + +def create_texture(): + """Create place2dTexture with file node with uv connections + + Mimics Maya "file [Texture]" creation. + """ + + place = cmds.shadingNode("place2dTexture", asUtility=True, name="place2d") + file = cmds.shadingNode("file", asTexture=True, name="file") + + connections = ["coverage", "translateFrame", "rotateFrame", "rotateUV", + "mirrorU", "mirrorV", "stagger", "wrapV", "wrapU", + "repeatUV", "offset", "noiseUV", "vertexUvThree", + "vertexUvTwo", "vertexUvOne", "vertexCameraOne"] + for attr in connections: + src = "{}.{}".format(place, attr) + dest = "{}.{}".format(file, attr) + cmds.connectAttr(src, dest) + + cmds.connectAttr(place + '.outUV', file + '.uvCoord') + cmds.connectAttr(place + '.outUvFilterSize', file + '.uvFilterSize') + + return file, place + + +def create_projection(): + """Create texture with place3dTexture and projection + + Mimics Maya "file [Projection]" creation. + """ + + file, place = create_texture() + projection = cmds.shadingNode("projection", asTexture=True, + name="projection") + place3d = cmds.shadingNode("place3dTexture", asUtility=True, + name="place3d") + + cmds.connectAttr(place3d + '.worldInverseMatrix[0]', + projection + ".placementMatrix") + cmds.connectAttr(file + '.outColor', projection + ".image") + + return file, place, projection, place3d + + +def create_stencil(): + """Create texture with extra place2dTexture offset and stencil + + Mimics Maya "file [Stencil]" creation. + """ + + file, place = create_texture() + + place_stencil = cmds.shadingNode("place2dTexture", asUtility=True, + name="place2d_stencil") + stencil = cmds.shadingNode("stencil", asTexture=True, name="stencil") + + for src_attr, dest_attr in [ + ("outUV", "uvCoord"), + ("outUvFilterSize", "uvFilterSize") + ]: + src_plug = "{}.{}".format(place_stencil, src_attr) + cmds.connectAttr(src_plug, "{}.{}".format(place, dest_attr)) + cmds.connectAttr(src_plug, "{}.{}".format(stencil, dest_attr)) + + return file, place, stencil, place_stencil + + +class FileNodeLoader(load.LoaderPlugin): + """File node loader.""" + + families = ["image", "plate", "render"] + label = "Load file node" + representations = ["exr", "tif", "png", "jpg"] + icon = "image" + color = "orange" + order = 2 + + options = [ + EnumDef( + "mode", + items={ + "texture": "Texture", + "projection": "Projection", + "stencil": "Stencil" + }, + default="texture", + label="Texture Mode" + ) + ] + + def load(self, context, name, namespace, data): + + asset = context['asset']['name'] + namespace = namespace or unique_namespace( + asset + "_", + prefix="_" if asset[0].isdigit() else "", + suffix="_", + ) + + with namespaced(namespace, new=True) as namespace: + # Create the nodes within the namespace + nodes = { + "texture": create_texture, + "projection": create_projection, + "stencil": create_stencil + }[data.get("mode", "texture")]() + + file_node = cmds.ls(nodes, type="file")[0] + + self._apply_representation_context(context, file_node) + + # For ease of access for the user select all the nodes and select + # the file node last so that UI shows its attributes by default + cmds.select(list(nodes) + [file_node], replace=True) + + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__ + ) + + def update(self, container, representation): + + members = cmds.sets(container['objectName'], query=True) + file_node = cmds.ls(members, type="file")[0] + + context = get_representation_context(representation) + self._apply_representation_context(context, file_node) + + # Update representation + cmds.setAttr( + container["objectName"] + ".representation", + str(representation["_id"]), + type="string" + ) + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + members = cmds.sets(container['objectName'], query=True) + cmds.lockNode(members, lock=False) + cmds.delete([container['objectName']] + members) + + # Clean up the namespace + try: + cmds.namespace(removeNamespace=container['namespace'], + deleteNamespaceContent=True) + except RuntimeError: + pass + + def _apply_representation_context(self, context, file_node): + """Update the file node to match the context. + + This sets the file node's attributes for: + - file path + - udim tiling mode (if it is an udim tile) + - use frame extension (if it is a sequence) + - colorspace + + """ + + repre_context = context["representation"]["context"] + has_frames = repre_context.get("frame") is not None + has_udim = repre_context.get("udim") is not None + + # Set UV tiling mode if UDIM tiles + if has_udim: + cmds.setAttr(file_node + ".uvTilingMode", 3) # UDIM-tiles + else: + cmds.setAttr(file_node + ".uvTilingMode", 0) # off + + # Enable sequence if publish has `startFrame` and `endFrame` and + # `startFrame != endFrame` + if has_frames and self._is_sequence(context): + # When enabling useFrameExtension maya automatically + # connects an expression to .frameExtension to set + # the current frame. However, this expression is generated + # with some delay and thus it'll show a warning if frame 0 + # doesn't exist because we're explicitly setting the + # token. + cmds.setAttr(file_node + ".useFrameExtension", True) + else: + cmds.setAttr(file_node + ".useFrameExtension", False) + + # Set the file node path attribute + path = self._format_path(context) + cmds.setAttr(file_node + ".fileTextureName", path, type="string") + + # Set colorspace + colorspace = self._get_colorspace(context) + if colorspace: + cmds.setAttr(file_node + ".colorSpace", colorspace, type="string") + else: + self.log.debug("Unknown colorspace - setting colorspace skipped.") + + def _is_sequence(self, context): + """Check whether frameStart and frameEnd are not the same.""" + version = context.get("version", {}) + representation = context.get("representation", {}) + + for doc in [representation, version]: + # Frame range can be set on version or representation. + # When set on representation it overrides version data. + data = doc.get("data", {}) + start = data.get("frameStartHandle", data.get("frameStart", None)) + end = data.get("frameEndHandle", data.get("frameEnd", None)) + + if start is None or end is None: + continue + + if start != end: + return True + else: + return False + + return False + + def _get_colorspace(self, context): + """Return colorspace of the file to load. + + Retrieves the explicit colorspace from the publish. If no colorspace + data is stored with published content then project imageio settings + are used to make an assumption of the colorspace based on the file + rules. If no file rules match then None is returned. + + Returns: + str or None: The colorspace of the file or None if not detected. + + """ + + # We can't apply color spaces if management is not enabled + if not cmds.colorManagementPrefs(query=True, cmEnabled=True): + return + + representation = context["representation"] + colorspace_data = representation.get("data", {}).get("colorspaceData") + if colorspace_data: + return colorspace_data["colorspace"] + + # Assume colorspace from filepath based on project settings + project_name = context["project"]["name"] + host_name = os.environ.get("AVALON_APP") + project_settings = get_project_settings(project_name) + + config_data = get_imageio_config( + project_name, host_name, + project_settings=project_settings + ) + + # ignore if host imageio is not enabled + if not config_data: + return + + file_rules = get_imageio_file_rules( + project_name, host_name, + project_settings=project_settings + ) + + path = get_representation_path_from_context(context) + colorspace = get_imageio_colorspace_from_filepath( + path=path, + host_name=host_name, + project_name=project_name, + config_data=config_data, + file_rules=file_rules, + project_settings=project_settings + ) + + return colorspace + + def _format_path(self, context): + """Format the path with correct tokens for frames and udim tiles.""" + + context = copy.deepcopy(context) + representation = context["representation"] + template = representation.get("data", {}).get("template") + if not template: + # No template to find token locations for + return get_representation_path_from_context(context) + + def _placeholder(key): + # Substitute with a long placeholder value so that potential + # custom formatting with padding doesn't find its way into + # our formatting, so that wouldn't be padded as 0 + return "___{}___".format(key) + + # We format UDIM and Frame numbers with their specific tokens. To do so + # we in-place change the representation context data to format the path + # with our own data + tokens = { + "frame": "", + "udim": "" + } + has_tokens = False + repre_context = representation["context"] + for key, _token in tokens.items(): + if key in repre_context: + repre_context[key] = _placeholder(key) + has_tokens = True + + # Replace with our custom template that has the tokens set + representation["data"]["template"] = template + path = get_representation_path_from_context(context) + + if has_tokens: + for key, token in tokens.items(): + if key in repre_context: + path = path.replace(_placeholder(key), token) + + return path diff --git a/openpype/hosts/maya/plugins/load/load_image_plane.py b/openpype/hosts/maya/plugins/load/load_image_plane.py index 6421f3ffe2..bf13708e9b 100644 --- a/openpype/hosts/maya/plugins/load/load_image_plane.py +++ b/openpype/hosts/maya/plugins/load/load_image_plane.py @@ -11,11 +11,26 @@ from openpype.pipeline import ( get_representation_path ) from openpype.hosts.maya.api.pipeline import containerise -from openpype.hosts.maya.api.lib import unique_namespace +from openpype.hosts.maya.api.lib import ( + unique_namespace, + namespaced, + pairwise, + get_container_members +) from maya import cmds +def disconnect_inputs(plug): + overrides = cmds.listConnections(plug, + source=True, + destination=False, + plugs=True, + connections=True) or [] + for dest, src in pairwise(overrides): + cmds.disconnectAttr(src, dest) + + class CameraWindow(QtWidgets.QDialog): def __init__(self, cameras): @@ -74,6 +89,7 @@ class CameraWindow(QtWidgets.QDialog): self.camera = None self.close() + class ImagePlaneLoader(load.LoaderPlugin): """Specific loader of plate for image planes on selected camera.""" @@ -84,9 +100,7 @@ class ImagePlaneLoader(load.LoaderPlugin): color = "orange" def load(self, context, name, namespace, data, options=None): - import pymel.core as pm - new_nodes = [] image_plane_depth = 1000 asset = context['asset']['name'] namespace = namespace or unique_namespace( @@ -96,16 +110,20 @@ class ImagePlaneLoader(load.LoaderPlugin): ) # Get camera from user selection. - camera = None # is_static_image_plane = None # is_in_all_views = None - if data: - camera = pm.PyNode(data.get("camera")) + camera = data.get("camera") if data else None if not camera: - cameras = pm.ls(type="camera") - camera_names = {x.getParent().name(): x for x in cameras} - camera_names["Create new camera."] = "create_camera" + cameras = cmds.ls(type="camera") + + # Cameras by names + camera_names = {} + for camera in cameras: + parent = cmds.listRelatives(camera, parent=True, path=True)[0] + camera_names[parent] = camera + + camera_names["Create new camera."] = "create-camera" window = CameraWindow(camera_names.keys()) window.exec_() # Skip if no camera was selected (Dialog was closed) @@ -113,43 +131,48 @@ class ImagePlaneLoader(load.LoaderPlugin): return camera = camera_names[window.camera] - if camera == "create_camera": - camera = pm.createNode("camera") + if camera == "create-camera": + camera = cmds.createNode("camera") if camera is None: return try: - camera.displayResolution.set(1) - camera.farClipPlane.set(image_plane_depth * 10) + cmds.setAttr("{}.displayResolution".format(camera), True) + cmds.setAttr("{}.farClipPlane".format(camera), + image_plane_depth * 10) except RuntimeError: pass # Create image plane - image_plane_transform, image_plane_shape = pm.imagePlane( - fileName=context["representation"]["data"]["path"], - camera=camera) - image_plane_shape.depth.set(image_plane_depth) + with namespaced(namespace): + # Create inside the namespace + image_plane_transform, image_plane_shape = cmds.imagePlane( + fileName=context["representation"]["data"]["path"], + camera=camera + ) + start_frame = cmds.playbackOptions(query=True, min=True) + end_frame = cmds.playbackOptions(query=True, max=True) - - start_frame = pm.playbackOptions(q=True, min=True) - end_frame = pm.playbackOptions(q=True, max=True) - - image_plane_shape.frameOffset.set(0) - image_plane_shape.frameIn.set(start_frame) - image_plane_shape.frameOut.set(end_frame) - image_plane_shape.frameCache.set(end_frame) - image_plane_shape.useFrameExtension.set(1) + for attr, value in { + "depth": image_plane_depth, + "frameOffset": 0, + "frameIn": start_frame, + "frameOut": end_frame, + "frameCache": end_frame, + "useFrameExtension": True + }.items(): + plug = "{}.{}".format(image_plane_shape, attr) + cmds.setAttr(plug, value) movie_representations = ["mov", "preview"] if context["representation"]["name"] in movie_representations: - # Need to get "type" by string, because its a method as well. - pm.Attribute(image_plane_shape + ".type").set(2) + cmds.setAttr(image_plane_shape + ".type", 2) # Ask user whether to use sequence or still image. if context["representation"]["name"] == "exr": # Ensure OpenEXRLoader plugin is loaded. - pm.loadPlugin("OpenEXRLoader.mll", quiet=True) + cmds.loadPlugin("OpenEXRLoader", quiet=True) message = ( "Hold image sequence on first frame?" @@ -161,32 +184,18 @@ class ImagePlaneLoader(load.LoaderPlugin): None, "Frame Hold.", message, - QtWidgets.QMessageBox.Ok, - QtWidgets.QMessageBox.Cancel + QtWidgets.QMessageBox.Yes, + QtWidgets.QMessageBox.No ) - if reply == QtWidgets.QMessageBox.Ok: - # find the input and output of frame extension - expressions = image_plane_shape.frameExtension.inputs() - frame_ext_output = image_plane_shape.frameExtension.outputs() - if expressions: - # the "time1" node is non-deletable attr - # in Maya, use disconnectAttr instead - pm.disconnectAttr(expressions, frame_ext_output) + if reply == QtWidgets.QMessageBox.Yes: + frame_extension_plug = "{}.frameExtension".format(image_plane_shape) # noqa - if not image_plane_shape.frameExtension.isFreeToChange(): - raise RuntimeError("Can't set frame extension for {}".format(image_plane_shape)) # noqa - # get the node of time instead and set the time for it. - image_plane_shape.frameExtension.set(start_frame) + # Remove current frame expression + disconnect_inputs(frame_extension_plug) - new_nodes.extend( - [ - image_plane_transform.longName().split("|")[-1], - image_plane_shape.longName().split("|")[-1] - ] - ) + cmds.setAttr(frame_extension_plug, start_frame) - for node in new_nodes: - pm.rename(node, "{}:{}".format(namespace, node)) + new_nodes = [image_plane_transform, image_plane_shape] return containerise( name=name, @@ -197,21 +206,19 @@ class ImagePlaneLoader(load.LoaderPlugin): ) def update(self, container, representation): - import pymel.core as pm - image_plane_shape = None - for node in pm.PyNode(container["objectName"]).members(): - if node.nodeType() == "imagePlane": - image_plane_shape = node - assert image_plane_shape is not None, "Image plane not found." + members = get_container_members(container) + image_planes = cmds.ls(members, type="imagePlane") + assert image_planes, "Image plane not found." + image_plane_shape = image_planes[0] path = get_representation_path(representation) - image_plane_shape.imageName.set(path) - cmds.setAttr( - container["objectName"] + ".representation", - str(representation["_id"]), - type="string" - ) + cmds.setAttr("{}.imageName".format(image_plane_shape), + path, + type="string") + cmds.setAttr("{}.representation".format(container["objectName"]), + str(representation["_id"]), + type="string") # Set frame range. project_name = legacy_io.active_project() @@ -227,10 +234,14 @@ class ImagePlaneLoader(load.LoaderPlugin): start_frame = asset["data"]["frameStart"] end_frame = asset["data"]["frameEnd"] - image_plane_shape.frameOffset.set(0) - image_plane_shape.frameIn.set(start_frame) - image_plane_shape.frameOut.set(end_frame) - image_plane_shape.frameCache.set(end_frame) + for attr, value in { + "frameOffset": 0, + "frameIn": start_frame, + "frameOut": end_frame, + "frameCache": end_frame + }: + plug = "{}.{}".format(image_plane_shape, attr) + cmds.setAttr(plug, value) def switch(self, container, representation): self.update(container, representation) diff --git a/openpype/hosts/maya/plugins/load/load_reference.py b/openpype/hosts/maya/plugins/load/load_reference.py index 96d7d5d3b2..74ca27ff3c 100644 --- a/openpype/hosts/maya/plugins/load/load_reference.py +++ b/openpype/hosts/maya/plugins/load/load_reference.py @@ -1,14 +1,88 @@ import os +import difflib +import contextlib from maya import cmds from openpype.settings import get_project_settings -from openpype.pipeline import legacy_io -from openpype.pipeline.create import ( - legacy_create, - get_legacy_creator_by_name, -) import openpype.hosts.maya.api.plugin -from openpype.hosts.maya.api.lib import maintained_selection +from openpype.hosts.maya.api.lib import ( + maintained_selection, + get_container_members, + parent_nodes, + create_rig_animation_instance +) + + +@contextlib.contextmanager +def preserve_modelpanel_cameras(container, log=None): + """Preserve camera members of container in the modelPanels. + + This is used to ensure a camera remains in the modelPanels after updating + to a new version. + + """ + + # Get the modelPanels that used the old camera + members = get_container_members(container) + old_cameras = set(cmds.ls(members, type="camera", long=True)) + if not old_cameras: + # No need to manage anything + yield + return + + panel_cameras = {} + for panel in cmds.getPanel(type="modelPanel"): + cam = cmds.ls(cmds.modelPanel(panel, query=True, camera=True), + long=True)[0] + + # Often but not always maya returns the transform from the + # modelPanel as opposed to the camera shape, so we convert it + # to explicitly be the camera shape + if cmds.nodeType(cam) != "camera": + cam = cmds.listRelatives(cam, + children=True, + fullPath=True, + type="camera")[0] + if cam in old_cameras: + panel_cameras[panel] = cam + + if not panel_cameras: + # No need to manage anything + yield + return + + try: + yield + finally: + new_members = get_container_members(container) + new_cameras = set(cmds.ls(new_members, type="camera", long=True)) + if not new_cameras: + return + + for panel, cam_name in panel_cameras.items(): + new_camera = None + if cam_name in new_cameras: + new_camera = cam_name + elif len(new_cameras) == 1: + new_camera = next(iter(new_cameras)) + else: + # Multiple cameras in the updated container but not an exact + # match detected by name. Find the closest match + matches = difflib.get_close_matches(word=cam_name, + possibilities=new_cameras, + n=1) + if matches: + new_camera = matches[0] # best match + if log: + log.info("Camera in '{}' restored with " + "closest match camera: {} (before: {})" + .format(panel, new_camera, cam_name)) + + if not new_camera: + # Unable to find the camera to re-apply in the modelpanel + continue + + cmds.modelPanel(panel, edit=True, camera=new_camera) class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): @@ -25,9 +99,10 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): "camera", "rig", "camerarig", - "xgen", "staticMesh", + "skeletalMesh", "mvLook"] + representations = ["ma", "abc", "fbx", "mb"] label = "Reference" @@ -35,26 +110,23 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): icon = "code-fork" color = "orange" - # Name of creator class that will be used to create animation instance - animation_creator_name = "CreateAnimation" - def process_reference(self, context, name, namespace, options): import maya.cmds as cmds - import pymel.core as pm try: family = context["representation"]["context"]["family"] except ValueError: family = "model" - group_name = "{}:_GRP".format(namespace) # True by default to keep legacy behaviours attach_to_root = options.get("attach_to_root", True) + group_name = options["group_name"] with maintained_selection(): cmds.loadPlugin("AbcImport.mll", quiet=True) file_url = self.prepare_root_value(self.fname, context["project"]["name"]) + nodes = cmds.file(file_url, namespace=namespace, sharedReferenceFile=False, @@ -67,7 +139,10 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): new_nodes = (list(set(nodes) - set(shapes))) - current_namespace = pm.namespaceInfo(currentNamespace=True) + # if there are cameras, try to lock their transforms + self._lock_camera_transforms(new_nodes) + + current_namespace = cmds.namespaceInfo(currentNamespace=True) if current_namespace != ":": group_name = current_namespace + ":" + group_name @@ -77,37 +152,37 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): self[:] = new_nodes if attach_to_root: - group_node = pm.PyNode(group_name) - roots = set() + roots = cmds.listRelatives(group_name, + children=True, + fullPath=True) or [] - for node in new_nodes: - try: - roots.add(pm.PyNode(node).getAllParents()[-2]) - except: # noqa: E722 - pass - - if family not in ["layout", "setdress", - "mayaAscii", "mayaScene"]: - for root in roots: - root.setParent(world=True) - - group_node.zeroTransformPivots() - for root in roots: - root.setParent(group_node) - - cmds.setAttr(group_name + ".displayHandle", 1) + if family not in {"layout", "setdress", + "mayaAscii", "mayaScene"}: + # QUESTION Why do we need to exclude these families? + with parent_nodes(roots, parent=None): + cmds.xform(group_name, zeroTransformPivots=True) settings = get_project_settings(os.environ['AVALON_PROJECT']) + + display_handle = settings['maya']['load'].get( + 'reference_loader', {} + ).get('display_handle', True) + cmds.setAttr( + "{}.displayHandle".format(group_name), display_handle + ) + colors = settings['maya']['load']['colors'] c = colors.get(family) if c is not None: - group_node.useOutlinerColor.set(1) - group_node.outlinerColor.set( - (float(c[0]) / 255), - (float(c[1]) / 255), - (float(c[2]) / 255)) + cmds.setAttr("{}.useOutlinerColor".format(group_name), 1) + cmds.setAttr("{}.outlinerColor".format(group_name), + (float(c[0]) / 255), + (float(c[1]) / 255), + (float(c[2]) / 255)) - cmds.setAttr(group_name + ".displayHandle", 1) + cmds.setAttr( + "{}.displayHandle".format(group_name), display_handle + ) # get bounding box bbox = cmds.exactWorldBoundingBox(group_name) # get pivot position on world space @@ -121,49 +196,47 @@ class ReferenceLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): cy = cy + pivot[1] cz = cz + pivot[2] # set selection handle offset to center of bounding box - cmds.setAttr(group_name + ".selectHandleX", cx) - cmds.setAttr(group_name + ".selectHandleY", cy) - cmds.setAttr(group_name + ".selectHandleZ", cz) + cmds.setAttr("{}.selectHandleX".format(group_name), cx) + cmds.setAttr("{}.selectHandleY".format(group_name), cy) + cmds.setAttr("{}.selectHandleZ".format(group_name), cz) if family == "rig": self._post_process_rig(name, namespace, context, options) else: if "translate" in options: - cmds.setAttr(group_name + ".t", *options["translate"]) + cmds.setAttr("{}.translate".format(group_name), + *options["translate"]) return new_nodes def switch(self, container, representation): self.update(container, representation) + def update(self, container, representation): + with preserve_modelpanel_cameras(container, log=self.log): + super(ReferenceLoader, self).update(container, representation) + + # We also want to lock camera transforms on any new cameras in the + # reference or for a camera which might have changed names. + members = get_container_members(container) + self._lock_camera_transforms(members) + def _post_process_rig(self, name, namespace, context, options): - - output = next((node for node in self if - node.endswith("out_SET")), None) - controls = next((node for node in self if - node.endswith("controls_SET")), None) - - assert output, "No out_SET in rig, this is a bug." - assert controls, "No controls_SET in rig, this is a bug." - - # Find the roots amongst the loaded nodes - roots = cmds.ls(self[:], assemblies=True, long=True) - assert roots, "No root nodes in rig, this is a bug." - - asset = legacy_io.Session["AVALON_ASSET"] - dependency = str(context["representation"]["_id"]) - - self.log.info("Creating subset: {}".format(namespace)) - - # Create the animation instance - creator_plugin = get_legacy_creator_by_name( - self.animation_creator_name + nodes = self[:] + create_rig_animation_instance( + nodes, context, namespace, options=options, log=self.log ) - with maintained_selection(): - cmds.select([output, controls] + roots, noExpand=True) - legacy_create( - creator_plugin, - name=namespace, - asset=asset, - options={"useSelection": True}, - data={"dependencies": dependency} - ) + + def _lock_camera_transforms(self, nodes): + cameras = cmds.ls(nodes, type="camera") + if not cameras: + return + + # Check the Maya version, lockTransform has been introduced since + # Maya 2016.5 Ext 2 + version = int(cmds.about(version=True)) + if version >= 2016: + for camera in cameras: + cmds.camera(camera, edit=True, lockTransform=True) + else: + self.log.warning("This version of Maya does not support locking of" + " transforms of cameras.") diff --git a/openpype/hosts/maya/plugins/load/load_vrayproxy.py b/openpype/hosts/maya/plugins/load/load_vrayproxy.py index 720a132aa7..64184f9e7b 100644 --- a/openpype/hosts/maya/plugins/load/load_vrayproxy.py +++ b/openpype/hosts/maya/plugins/load/load_vrayproxy.py @@ -81,10 +81,11 @@ class VRayProxyLoader(load.LoaderPlugin): c = colors.get(family) if c is not None: cmds.setAttr("{0}.useOutlinerColor".format(group_node), 1) - cmds.setAttr("{0}.outlinerColor".format(group_node), - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) + cmds.setAttr( + "{0}.outlinerColor".format(group_node), + (float(c[0]) / 255), + (float(c[1]) / 255), + (float(c[2]) / 255) ) return containerise( @@ -101,7 +102,7 @@ class VRayProxyLoader(load.LoaderPlugin): assert cmds.objExists(node), "Missing container" members = cmds.sets(node, query=True) or [] - vraymeshes = cmds.ls(members, type="VRayMesh") + vraymeshes = cmds.ls(members, type="VRayProxy") assert vraymeshes, "Cannot find VRayMesh in container" # get all representations for this version diff --git a/openpype/hosts/maya/plugins/load/load_xgen.py b/openpype/hosts/maya/plugins/load/load_xgen.py new file mode 100644 index 0000000000..7e6cabc77c --- /dev/null +++ b/openpype/hosts/maya/plugins/load/load_xgen.py @@ -0,0 +1,173 @@ +import os + +import maya.cmds as cmds +import xgenm + +from qtpy import QtWidgets + +import openpype.hosts.maya.api.plugin +from openpype.hosts.maya.api.lib import ( + maintained_selection, + get_container_members, + attribute_values, + write_xgen_file +) +from openpype.hosts.maya.api import current_file +from openpype.pipeline import get_representation_path + + +class XgenLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): + """Load Xgen as reference""" + + families = ["xgen"] + representations = ["ma", "mb"] + + label = "Reference Xgen" + icon = "code-fork" + color = "orange" + + def get_xgen_xgd_paths(self, palette): + _, maya_extension = os.path.splitext(current_file()) + xgen_file = current_file().replace( + maya_extension, + "__{}.xgen".format(palette.replace("|", "").replace(":", "__")) + ) + xgd_file = xgen_file.replace(".xgen", ".xgd") + return xgen_file, xgd_file + + def process_reference(self, context, name, namespace, options): + # Validate workfile has a path. + if current_file() is None: + QtWidgets.QMessageBox.warning( + None, + "", + "Current workfile has not been saved. Please save the workfile" + " before loading an Xgen." + ) + return + + maya_filepath = self.prepare_root_value( + self.fname, context["project"]["name"] + ) + + # Reference xgen. Xgen does not like being referenced in under a group. + new_nodes = [] + + with maintained_selection(): + nodes = cmds.file( + maya_filepath, + namespace=namespace, + sharedReferenceFile=False, + reference=True, + returnNewNodes=True + ) + + xgen_palette = cmds.ls( + nodes, type="xgmPalette", long=True + )[0].replace("|", "") + + xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette) + self.set_palette_attributes(xgen_palette, xgen_file, xgd_file) + + # Change the cache and disk values of xgDataPath and xgProjectPath + # to ensure paths are setup correctly. + project_path = os.path.dirname(current_file()).replace("\\", "/") + xgenm.setAttr("xgProjectPath", project_path, xgen_palette) + data_path = "${{PROJECT}}xgen/collections/{};{}".format( + xgen_palette.replace(":", "__ns__"), + xgenm.getAttr("xgDataPath", xgen_palette) + ) + xgenm.setAttr("xgDataPath", data_path, xgen_palette) + + data = {"xgProjectPath": project_path, "xgDataPath": data_path} + write_xgen_file(data, xgen_file) + + # This create an expression attribute of float. If we did not add + # any changes to collection, then Xgen does not create an xgd file + # on save. This gives errors when launching the workfile again due + # to trying to find the xgd file. + name = "custom_float_ignore" + if name not in xgenm.customAttrs(xgen_palette): + xgenm.addCustomAttr( + "custom_float_ignore", xgen_palette + ) + + shapes = cmds.ls(nodes, shapes=True, long=True) + + new_nodes = (list(set(nodes) - set(shapes))) + + self[:] = new_nodes + + return new_nodes + + def set_palette_attributes(self, xgen_palette, xgen_file, xgd_file): + cmds.setAttr( + "{}.xgBaseFile".format(xgen_palette), + os.path.basename(xgen_file), + type="string" + ) + cmds.setAttr( + "{}.xgFileName".format(xgen_palette), + os.path.basename(xgd_file), + type="string" + ) + cmds.setAttr("{}.xgExportAsDelta".format(xgen_palette), True) + + def update(self, container, representation): + """Workflow for updating Xgen. + + - Copy and potentially overwrite the workspace .xgen file. + - Export changes to delta file. + - Set collection attributes to not include delta files. + - Update xgen maya file reference. + - Apply the delta file changes. + - Reset collection attributes to include delta files. + + We have to do this workflow because when using referencing of the xgen + collection, Maya implicitly imports the Xgen data from the xgen file so + we dont have any control over when adding the delta file changes. + + There is an implicit increment of the xgen and delta files, due to + using the workfile basename. + """ + + container_node = container["objectName"] + members = get_container_members(container_node) + xgen_palette = cmds.ls( + members, type="xgmPalette", long=True + )[0].replace("|", "") + xgen_file, xgd_file = self.get_xgen_xgd_paths(xgen_palette) + + # Export current changes to apply later. + xgenm.createDelta(xgen_palette.replace("|", ""), xgd_file) + + self.set_palette_attributes(xgen_palette, xgen_file, xgd_file) + + maya_file = get_representation_path(representation) + _, extension = os.path.splitext(maya_file) + new_xgen_file = maya_file.replace(extension, ".xgen") + data_path = "" + with open(new_xgen_file, "r") as f: + for line in f: + if line.startswith("\txgDataPath"): + line = line.rstrip() + data_path = line.split("\t")[-1] + break + + project_path = os.path.dirname(current_file()).replace("\\", "/") + data_path = "${{PROJECT}}xgen/collections/{};{}".format( + xgen_palette.replace(":", "__ns__"), + data_path + ) + data = {"xgProjectPath": project_path, "xgDataPath": data_path} + write_xgen_file(data, xgen_file) + + attribute_data = { + "{}.xgFileName".format(xgen_palette): os.path.basename(xgen_file), + "{}.xgBaseFile".format(xgen_palette): "", + "{}.xgExportAsDelta".format(xgen_palette): False + } + with attribute_values(attribute_data): + super().update(container, representation) + + xgenm.applyDelta(xgen_palette.replace("|", ""), xgd_file) diff --git a/openpype/hosts/maya/plugins/load/load_yeti_rig.py b/openpype/hosts/maya/plugins/load/load_yeti_rig.py index 651607de8a..b8066871b0 100644 --- a/openpype/hosts/maya/plugins/load/load_yeti_rig.py +++ b/openpype/hosts/maya/plugins/load/load_yeti_rig.py @@ -1,17 +1,12 @@ -import os -from collections import defaultdict +import maya.cmds as cmds -from openpype.settings import get_project_settings +from openpype.settings import get_current_project_settings import openpype.hosts.maya.api.plugin from openpype.hosts.maya.api import lib class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): - """ - This loader will load Yeti rig. You can select something in scene and if it - has same ID as mesh published with rig, their shapes will be linked - together. - """ + """This loader will load Yeti rig.""" families = ["yetiRig"] representations = ["ma"] @@ -22,72 +17,30 @@ class YetiRigLoader(openpype.hosts.maya.api.plugin.ReferenceLoader): color = "orange" def process_reference( - self, context, name=None, namespace=None, options=None): - - import maya.cmds as cmds - - # get roots of selected hierarchies - selected_roots = [] - for sel in cmds.ls(sl=True, long=True): - selected_roots.append(sel.split("|")[1]) - - # get all objects under those roots - selected_hierarchy = [] - for root in selected_roots: - selected_hierarchy.append(cmds.listRelatives( - root, - allDescendents=True) or []) - - # flatten the list and filter only shapes - shapes_flat = [] - for root in selected_hierarchy: - shapes = cmds.ls(root, long=True, type="mesh") or [] - for shape in shapes: - shapes_flat.append(shape) - - # create dictionary of cbId and shape nodes - scene_lookup = defaultdict(list) - for node in shapes_flat: - cb_id = lib.get_id(node) - scene_lookup[cb_id] = node - - # load rig + self, context, name=None, namespace=None, options=None + ): + group_name = options['group_name'] with lib.maintained_selection(): - file_url = self.prepare_root_value(self.fname, - context["project"]["name"]) - nodes = cmds.file(file_url, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) + file_url = self.prepare_root_value( + self.fname, context["project"]["name"] + ) + nodes = cmds.file( + file_url, + namespace=namespace, + reference=True, + returnNewNodes=True, + groupReference=True, + groupName=group_name + ) - # for every shape node we've just loaded find matching shape by its - # cbId in selection. If found outMesh of scene shape will connect to - # inMesh of loaded shape. - for destination_node in nodes: - source_node = scene_lookup[lib.get_id(destination_node)] - if source_node: - self.log.info("found: {}".format(source_node)) - self.log.info( - "creating connection to {}".format(destination_node)) - - cmds.connectAttr("{}.outMesh".format(source_node), - "{}.inMesh".format(destination_node), - force=True) - - groupName = "{}:{}".format(namespace, name) - - settings = get_project_settings(os.environ['AVALON_PROJECT']) - colors = settings['maya']['load']['colors'] - - c = colors.get('yetiRig') + settings = get_current_project_settings() + colors = settings["maya"]["load"]["colors"] + c = colors.get("yetiRig") if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - (float(c[0])/255), - (float(c[1])/255), - (float(c[2])/255) + cmds.setAttr(group_name + ".useOutlinerColor", 1) + cmds.setAttr( + group_name + ".outlinerColor", + (float(c[0]) / 255), (float(c[1]) / 255), (float(c[2]) / 255) ) self[:] = nodes diff --git a/openpype/hosts/maya/plugins/publish/collect_animation.py b/openpype/hosts/maya/plugins/publish/collect_animation.py index 549098863f..8f523f770b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_animation.py +++ b/openpype/hosts/maya/plugins/publish/collect_animation.py @@ -46,7 +46,6 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): hierarchy = members + descendants - # Ignore certain node types (e.g. constraints) ignore = cmds.ls(hierarchy, type=self.ignore_type, long=True) if ignore: @@ -58,3 +57,18 @@ class CollectAnimationOutputGeometry(pyblish.api.InstancePlugin): if instance.data.get("farm"): instance.data["families"].append("publish.farm") + + # Collect user defined attributes. + if not instance.data.get("includeUserDefinedAttributes", False): + return + + user_defined_attributes = set() + for node in hierarchy: + attrs = cmds.listAttr(node, userDefined=True) or list() + shapes = cmds.listRelatives(node, shapes=True) or list() + for shape in shapes: + attrs.extend(cmds.listAttr(shape, userDefined=True) or list()) + + user_defined_attributes.update(attrs) + + instance.data["userDefinedAttributes"] = list(user_defined_attributes) diff --git a/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py new file mode 100644 index 0000000000..f160a3a0c5 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_arnold_scene_source.py @@ -0,0 +1,50 @@ +from maya import cmds + +import pyblish.api +from openpype.hosts.maya.api.lib import get_all_children + + +class CollectArnoldSceneSource(pyblish.api.InstancePlugin): + """Collect Arnold Scene Source data.""" + + # Offset to be after renderable camera collection. + order = pyblish.api.CollectorOrder + 0.2 + label = "Collect Arnold Scene Source" + families = ["ass"] + + def process(self, instance): + objsets = instance.data["setMembers"] + + for objset in objsets: + objset = str(objset) + members = cmds.sets(objset, query=True) + if members is None: + self.log.warning("Skipped empty instance: \"%s\" " % objset) + continue + if objset.endswith("content_SET"): + members = cmds.ls(members, long=True) + children = get_all_children(members) + instance.data["contentMembers"] = children + self.log.debug("content members: {}".format(children)) + elif objset.endswith("proxy_SET"): + set_members = get_all_children(cmds.ls(members, long=True)) + instance.data["proxy"] = set_members + self.log.debug("proxy members: {}".format(set_members)) + + # Use camera in object set if present else default to render globals + # camera. + cameras = cmds.ls(type="camera", long=True) + renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] + if renderable: + camera = renderable[0] + for node in instance.data["contentMembers"]: + camera_shapes = cmds.listRelatives( + node, shapes=True, type="camera" + ) + if camera_shapes: + camera = node + instance.data["camera"] = camera + else: + self.log.debug("No renderable cameras found.") + + self.log.debug("data: {}".format(instance.data)) diff --git a/openpype/hosts/maya/plugins/publish/collect_ass.py b/openpype/hosts/maya/plugins/publish/collect_ass.py deleted file mode 100644 index b5e05d6665..0000000000 --- a/openpype/hosts/maya/plugins/publish/collect_ass.py +++ /dev/null @@ -1,47 +0,0 @@ -from maya import cmds -from openpype.pipeline.publish import KnownPublishError - -import pyblish.api - - -class CollectAssData(pyblish.api.InstancePlugin): - """Collect Ass data.""" - - # Offset to be after renderable camera collection. - order = pyblish.api.CollectorOrder + 0.2 - label = 'Collect Ass' - families = ["ass"] - - def process(self, instance): - objsets = instance.data['setMembers'] - - for objset in objsets: - objset = str(objset) - members = cmds.sets(objset, query=True) - if members is None: - self.log.warning("Skipped empty instance: \"%s\" " % objset) - continue - if "content_SET" in objset: - instance.data['setMembers'] = members - self.log.debug('content members: {}'.format(members)) - elif objset.startswith("proxy_SET"): - if len(members) != 1: - msg = "You have multiple proxy meshes, please only use one" - raise KnownPublishError(msg) - instance.data['proxy'] = members - self.log.debug('proxy members: {}'.format(members)) - - # Use camera in object set if present else default to render globals - # camera. - cameras = cmds.ls(type="camera", long=True) - renderable = [c for c in cameras if cmds.getAttr("%s.renderable" % c)] - camera = renderable[0] - for node in instance.data["setMembers"]: - camera_shapes = cmds.listRelatives( - node, shapes=True, type="camera" - ) - if camera_shapes: - camera = node - instance.data["camera"] = camera - - self.log.debug("data: {}".format(instance.data)) diff --git a/openpype/hosts/maya/plugins/publish/collect_inputs.py b/openpype/hosts/maya/plugins/publish/collect_inputs.py index 470fceffc9..895c92762b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_inputs.py +++ b/openpype/hosts/maya/plugins/publish/collect_inputs.py @@ -1,5 +1,4 @@ import copy -from bson.objectid import ObjectId from maya import cmds import maya.api.OpenMaya as om @@ -165,10 +164,9 @@ class CollectUpstreamInputs(pyblish.api.InstancePlugin): containers = collect_input_containers(scene_containers, nodes) - inputs = [ObjectId(c["representation"]) for c in containers] + inputs = [c["representation"] for c in containers] instance.data["inputRepresentations"] = inputs - - self.log.info("Collected inputs: %s" % inputs) + self.log.debug("Collected inputs: %s" % inputs) def _collect_renderlayer_inputs(self, scene_containers, instance): """Collects inputs from nodes in renderlayer, incl. shaders + camera""" diff --git a/openpype/hosts/maya/plugins/publish/collect_instances.py b/openpype/hosts/maya/plugins/publish/collect_instances.py index 6c6819f0a2..87a4de162d 100644 --- a/openpype/hosts/maya/plugins/publish/collect_instances.py +++ b/openpype/hosts/maya/plugins/publish/collect_instances.py @@ -1,48 +1,8 @@ from maya import cmds -import maya.api.OpenMaya as om import pyblish.api import json - - -def get_all_children(nodes): - """Return all children of `nodes` including each instanced child. - Using maya.cmds.listRelatives(allDescendents=True) includes only the first - instance. As such, this function acts as an optimal replacement with a - focus on a fast query. - - """ - - sel = om.MSelectionList() - traversed = set() - iterator = om.MItDag(om.MItDag.kDepthFirst) - for node in nodes: - - if node in traversed: - # Ignore if already processed as a child - # before - continue - - sel.clear() - sel.add(node) - dag = sel.getDagPath(0) - - iterator.reset(dag) - # ignore self - iterator.next() # noqa: B305 - while not iterator.isDone(): - - path = iterator.fullPathName() - - if path in traversed: - iterator.prune() - iterator.next() # noqa: B305 - continue - - traversed.add(path) - iterator.next() # noqa: B305 - - return list(traversed) +from openpype.hosts.maya.api.lib import get_all_children class CollectInstances(pyblish.api.ContextPlugin): @@ -137,6 +97,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Create the instance instance = context.create_instance(objset) instance[:] = members_hierarchy + instance.data["objset"] = objset # Store the exact members of the object set instance.data["setMembers"] = members @@ -148,13 +109,6 @@ class CollectInstances(pyblish.api.ContextPlugin): # Append start frame and end frame to label if present if "frameStart" and "frameEnd" in data: - - # Backwards compatibility for 'handles' data - if "handles" in data: - data["handleStart"] = data["handles"] - data["handleEnd"] = data["handles"] - data.pop('handles') - # Take handles from context if not set locally on the instance for key in ["handleStart", "handleEnd"]: if key not in data: diff --git a/openpype/hosts/maya/plugins/publish/collect_look.py b/openpype/hosts/maya/plugins/publish/collect_look.py index b01160a1c0..287ddc228b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_look.py @@ -556,7 +556,7 @@ class CollectLook(pyblish.api.InstancePlugin): continue if cmds.getAttr(attribute, type=True) == "message": continue - node_attributes[attr] = cmds.getAttr(attribute) + node_attributes[attr] = cmds.getAttr(attribute, asString=True) # Only include if there are any properties we care about if not node_attributes: continue diff --git a/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py b/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py index 1250ea438f..122fabe8a1 100644 --- a/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py +++ b/openpype/hosts/maya/plugins/publish/collect_maya_workspace.py @@ -12,7 +12,6 @@ class CollectMayaWorkspace(pyblish.api.ContextPlugin): label = "Maya Workspace" hosts = ['maya'] - version = (0, 1, 0) def process(self, context): workspace = cmds.workspace(rootDirectory=True, query=True) diff --git a/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py b/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py index a7cb14855b..33fc7a025f 100644 --- a/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py +++ b/openpype/hosts/maya/plugins/publish/collect_multiverse_look.py @@ -255,7 +255,7 @@ class CollectMultiverseLookData(pyblish.api.InstancePlugin): Searches through the overrides finding all material overrides. From there it extracts the shading group and then finds all texture files in the shading group network. It also checks for mipmap versions of texture files - and adds them to the resouces to get published. + and adds them to the resources to get published. """ diff --git a/openpype/hosts/maya/plugins/publish/collect_pointcache.py b/openpype/hosts/maya/plugins/publish/collect_pointcache.py index a841341f72..d0430c5612 100644 --- a/openpype/hosts/maya/plugins/publish/collect_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/collect_pointcache.py @@ -1,3 +1,5 @@ +from maya import cmds + import pyblish.api @@ -12,3 +14,46 @@ class CollectPointcache(pyblish.api.InstancePlugin): def process(self, instance): if instance.data.get("farm"): instance.data["families"].append("publish.farm") + + proxy_set = None + for node in instance.data["setMembers"]: + if cmds.nodeType(node) != "objectSet": + continue + members = cmds.sets(node, query=True) + if members is None: + self.log.warning("Skipped empty objectset: \"%s\" " % node) + continue + if node.endswith("proxy_SET"): + proxy_set = node + instance.data["proxy"] = [] + instance.data["proxyRoots"] = [] + for member in members: + instance.data["proxy"].extend(cmds.ls(member, long=True)) + instance.data["proxyRoots"].extend( + cmds.ls(member, long=True) + ) + instance.data["proxy"].extend( + cmds.listRelatives(member, shapes=True, fullPath=True) + ) + self.log.debug( + "proxy members: {}".format(instance.data["proxy"]) + ) + + if proxy_set: + instance.remove(proxy_set) + instance.data["setMembers"].remove(proxy_set) + + # Collect user defined attributes. + if not instance.data.get("includeUserDefinedAttributes", False): + return + + user_defined_attributes = set() + for node in instance: + attrs = cmds.listAttr(node, userDefined=True) or list() + shapes = cmds.listRelatives(node, shapes=True) or list() + for shape in shapes: + attrs.extend(cmds.listAttr(shape, userDefined=True) or list()) + + user_defined_attributes.update(attrs) + + instance.data["userDefinedAttributes"] = list(user_defined_attributes) diff --git a/openpype/hosts/maya/plugins/publish/collect_render.py b/openpype/hosts/maya/plugins/publish/collect_render.py index b1ad3ca58e..babd494758 100644 --- a/openpype/hosts/maya/plugins/publish/collect_render.py +++ b/openpype/hosts/maya/plugins/publish/collect_render.py @@ -184,7 +184,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.info("multipart: {}".format( multipart)) assert exp_files, "no file names were generated, this is bug" - self.log.info(exp_files) + self.log.info( + "expected files: {}".format( + json.dumps(exp_files, indent=4, sort_keys=True) + ) + ) # if we want to attach render to subset, check if we have AOV's # in expectedFiles. If so, raise error as we cannot attach AOV @@ -265,7 +269,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): self.log.info(full_exp_files) self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides - + colorspace_data = lib.get_color_management_preferences() data = { "subset": expected_layer_name, "attachTo": attach_to, @@ -318,6 +322,12 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "aovSeparator": layer_render_products.layer_data.aov_separator, # noqa: E501 "renderSetupIncludeLights": render_instance.data.get( "renderSetupIncludeLights" + ), + "colorspaceConfig": colorspace_data["config"], + "colorspaceDisplay": colorspace_data["display"], + "colorspaceView": colorspace_data["view"], + "strict_error_checking": render_instance.data.get( + "strict_error_checking", True ) } @@ -326,7 +336,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): context.data["system_settings"]["modules"]["deadline"] ) if deadline_settings["enabled"]: - data["deadlineUrl"] = render_instance.data.get("deadlineUrl") + data["deadlineUrl"] = render_instance.data["deadlineUrl"] if self.sync_workfile_version: data["version"] = context.data["version"] diff --git a/openpype/hosts/maya/plugins/publish/collect_review.py b/openpype/hosts/maya/plugins/publish/collect_review.py index eb872c2935..5c190a4a7b 100644 --- a/openpype/hosts/maya/plugins/publish/collect_review.py +++ b/openpype/hosts/maya/plugins/publish/collect_review.py @@ -1,10 +1,10 @@ from maya import cmds, mel -import pymel.core as pm import pyblish.api from openpype.client import get_subset_by_name -from openpype.pipeline import legacy_io +from openpype.pipeline import legacy_io, KnownPublishError +from openpype.hosts.maya.api import lib class CollectReview(pyblish.api.InstancePlugin): @@ -15,7 +15,6 @@ class CollectReview(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.3 label = 'Collect Review Data' families = ["review"] - legacy = True def process(self, instance): @@ -23,62 +22,95 @@ class CollectReview(pyblish.api.InstancePlugin): task = legacy_io.Session["AVALON_TASK"] + # Get panel. + instance.data["panel"] = cmds.playblast( + activeEditor=True + ).split("|")[-1] + # get cameras members = instance.data['setMembers'] - cameras = cmds.ls(members, long=True, - dag=True, cameras=True) self.log.debug('members: {}'.format(members)) + cameras = cmds.ls(members, long=True, dag=True, cameras=True) + camera = cameras[0] if cameras else None - # validate required settings - assert len(cameras) == 1, "Not a single camera found in extraction" - camera = cameras[0] - self.log.debug('camera: {}'.format(camera)) + context = instance.context + objectset = context.data['objectsets'] - objectset = instance.context.data['objectsets'] + # Convert enum attribute index to string for Display Lights. + index = instance.data.get("displayLights", 0) + display_lights = lib.DISPLAY_LIGHTS_VALUES[index] + if display_lights == "project_settings": + settings = instance.context.data["project_settings"] + settings = settings["maya"]["publish"]["ExtractPlayblast"] + settings = settings["capture_preset"]["Viewport Options"] + display_lights = settings["displayLights"] - reviewable_subset = None - reviewable_subset = list(set(members) & set(objectset)) - if reviewable_subset: - assert len(reviewable_subset) <= 1, "Multiple subsets for review" - self.log.debug('subset for review: {}'.format(reviewable_subset)) + # Collect camera focal length. + burninDataMembers = instance.data.get("burninDataMembers", {}) + if camera is not None: + attr = camera + ".focalLength" + if lib.get_attribute_input(attr): + start = instance.data["frameStart"] + end = instance.data["frameEnd"] + 1 + time_range = range(int(start), int(end)) + focal_length = [cmds.getAttr(attr, time=t) for t in time_range] + else: + focal_length = cmds.getAttr(attr) - i = 0 - for inst in instance.context: + burninDataMembers["focalLength"] = focal_length - self.log.debug('filtering {}'.format(inst)) - data = instance.context[i].data + # Account for nested instances like model. + reviewable_subsets = list(set(members) & set(objectset)) + if reviewable_subsets: + if len(reviewable_subsets) > 1: + raise KnownPublishError( + "Multiple attached subsets for review are not supported. " + "Attached: {}".format(", ".join(reviewable_subsets)) + ) - if inst.name != reviewable_subset[0]: - self.log.debug('subset name does not match {}'.format( - reviewable_subset[0])) - i += 1 - continue + reviewable_subset = reviewable_subsets[0] + self.log.debug( + "Subset attached to review: {}".format(reviewable_subset) + ) + + # Find the relevant publishing instance in the current context + reviewable_inst = next(inst for inst in context + if inst.name == reviewable_subset) + data = reviewable_inst.data + + self.log.debug( + 'Adding review family to {}'.format(reviewable_subset) + ) + if data.get('families'): + data['families'].append('review') + else: + data['families'] = ['review'] + + data["cameras"] = cameras + data['review_camera'] = camera + data['frameStartFtrack'] = instance.data["frameStartHandle"] + data['frameEndFtrack'] = instance.data["frameEndHandle"] + data['frameStartHandle'] = instance.data["frameStartHandle"] + data['frameEndHandle'] = instance.data["frameEndHandle"] + data['handleStart'] = instance.data["handleStart"] + data['handleEnd'] = instance.data["handleEnd"] + data["frameStart"] = instance.data["frameStart"] + data["frameEnd"] = instance.data["frameEnd"] + data['step'] = instance.data['step'] + data['fps'] = instance.data['fps'] + data['review_width'] = instance.data['review_width'] + data['review_height'] = instance.data['review_height'] + data["isolate"] = instance.data["isolate"] + data["panZoom"] = instance.data.get("panZoom", False) + data["panel"] = instance.data["panel"] + data["displayLights"] = display_lights + data["burninDataMembers"] = burninDataMembers + + # The review instance must be active + cmds.setAttr(str(instance) + '.active', 1) + + instance.data['remove'] = True - if data.get('families'): - data['families'].append('review') - else: - data['families'] = ['review'] - self.log.debug('adding review family to {}'.format( - reviewable_subset)) - data['review_camera'] = camera - # data["publish"] = False - data['frameStartFtrack'] = instance.data["frameStartHandle"] - data['frameEndFtrack'] = instance.data["frameEndHandle"] - data['frameStartHandle'] = instance.data["frameStartHandle"] - data['frameEndHandle'] = instance.data["frameEndHandle"] - data["frameStart"] = instance.data["frameStart"] - data["frameEnd"] = instance.data["frameEnd"] - data['handles'] = instance.data.get('handles', None) - data['step'] = instance.data['step'] - data['fps'] = instance.data['fps'] - data['review_width'] = instance.data['review_width'] - data['review_height'] = instance.data['review_height'] - data["isolate"] = instance.data["isolate"] - cmds.setAttr(str(instance) + '.active', 1) - self.log.debug('data {}'.format(instance.context[i].data)) - instance.context[i].data.update(data) - instance.data['remove'] = True - self.log.debug('isntance data {}'.format(instance.data)) else: legacy_subset_name = task + 'Review' asset_doc = instance.context.data['assetEntity'] @@ -93,49 +125,52 @@ class CollectReview(pyblish.api.InstancePlugin): self.log.debug("Existing subsets found, keep legacy name.") instance.data['subset'] = legacy_subset_name + instance.data["cameras"] = cameras instance.data['review_camera'] = camera instance.data['frameStartFtrack'] = \ instance.data["frameStartHandle"] instance.data['frameEndFtrack'] = \ instance.data["frameEndHandle"] + instance.data["displayLights"] = display_lights + instance.data["burninDataMembers"] = burninDataMembers # make ftrack publishable - instance.data["families"] = ['ftrack'] + instance.data.setdefault("families", []).append('ftrack') cmds.setAttr(str(instance) + '.active', 1) # Collect audio playback_slider = mel.eval('$tmpVar=$gPlayBackSlider') - audio_name = cmds.timeControl(playback_slider, q=True, s=True) + audio_name = cmds.timeControl(playback_slider, + query=True, + sound=True) display_sounds = cmds.timeControl( - playback_slider, q=True, displaySound=True + playback_slider, query=True, displaySound=True ) - audio_nodes = [] + def get_audio_node_data(node): + return { + "offset": cmds.getAttr("{}.offset".format(node)), + "filename": cmds.getAttr("{}.filename".format(node)) + } + + audio_data = [] if audio_name: - audio_nodes.append(pm.PyNode(audio_name)) + audio_data.append(get_audio_node_data(audio_name)) - if not audio_name and display_sounds: - start_frame = int(pm.playbackOptions(q=True, min=True)) - end_frame = float(pm.playbackOptions(q=True, max=True)) - frame_range = range(int(start_frame), int(end_frame)) + elif display_sounds: + start_frame = int(cmds.playbackOptions(query=True, min=True)) + end_frame = int(cmds.playbackOptions(query=True, max=True)) - for node in pm.ls(type="audio"): + for node in cmds.ls(type="audio"): # Check if frame range and audio range intersections, # for whether to include this audio node or not. - start_audio = node.offset.get() - end_audio = node.offset.get() + node.duration.get() - audio_range = range(int(start_audio), int(end_audio)) + duration = cmds.getAttr("{}.duration".format(node)) + start_audio = cmds.getAttr("{}.offset".format(node)) + end_audio = start_audio + duration - if bool(set(frame_range).intersection(audio_range)): - audio_nodes.append(node) + if start_audio <= end_frame and end_audio > start_frame: + audio_data.append(get_audio_node_data(node)) - instance.data["audio"] = [] - for node in audio_nodes: - instance.data["audio"].append( - { - "offset": node.offset.get(), - "filename": node.filename.get() - } - ) + instance.data["audio"] = audio_data diff --git a/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py b/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py index 236797ca3c..24521a2f09 100644 --- a/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py +++ b/openpype/hosts/maya/plugins/publish/collect_vrayproxy.py @@ -9,10 +9,16 @@ class CollectVrayProxy(pyblish.api.InstancePlugin): Add `pointcache` family for it. """ order = pyblish.api.CollectorOrder + 0.01 - label = 'Collect Vray Proxy' + label = "Collect Vray Proxy" families = ["vrayproxy"] def process(self, instance): """Collector entry point.""" if not instance.data.get('families'): instance.data["families"] = [] + + if instance.data.get("vrmesh"): + instance.data["families"].append("vrayproxy.vrmesh") + + if instance.data.get("alembic"): + instance.data["families"].append("vrayproxy.alembic") diff --git a/openpype/hosts/maya/plugins/publish/collect_xgen.py b/openpype/hosts/maya/plugins/publish/collect_xgen.py new file mode 100644 index 0000000000..da0549b2d8 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/collect_xgen.py @@ -0,0 +1,71 @@ +import os + +from maya import cmds + +import pyblish.api +from openpype.hosts.maya.api.lib import get_attribute_input + + +class CollectXgen(pyblish.api.InstancePlugin): + """Collect Xgen""" + + order = pyblish.api.CollectorOrder + 0.499999 + label = "Collect Xgen" + families = ["xgen"] + + def process(self, instance): + data = { + "xgmPalettes": cmds.ls(instance, type="xgmPalette", long=True), + "xgmDescriptions": cmds.ls( + instance, type="xgmDescription", long=True + ), + "xgmSubdPatches": cmds.ls(instance, type="xgmSubdPatch", long=True) + } + data["xgenNodes"] = ( + data["xgmPalettes"] + + data["xgmDescriptions"] + + data["xgmSubdPatches"] + ) + + if data["xgmPalettes"]: + data["xgmPalette"] = data["xgmPalettes"][0] + + data["xgenConnections"] = {} + for node in data["xgmSubdPatches"]: + data["xgenConnections"][node] = {} + for attr in ["transform", "geometry"]: + input = get_attribute_input("{}.{}".format(node, attr)) + data["xgenConnections"][node][attr] = input + + # Collect all files under palette root as resources. + import xgenm + + data_path = xgenm.getAttr( + "xgDataPath", data["xgmPalette"].replace("|", "") + ).split(os.pathsep)[0] + data_path = data_path.replace( + "${PROJECT}", + xgenm.getAttr("xgProjectPath", data["xgmPalette"].replace("|", "")) + ) + transfers = [] + + # Since we are duplicating this palette when extracting we predict that + # the name will be the basename without namespaces. + predicted_palette_name = data["xgmPalette"].split(":")[-1] + predicted_palette_name = predicted_palette_name.replace("|", "") + + for root, _, files in os.walk(data_path): + for file in files: + source = os.path.join(root, file).replace("\\", "/") + destination = os.path.join( + instance.data["resourcesDir"], + "collections", + predicted_palette_name, + source.replace(data_path, "")[1:] + ) + transfers.append((source, destination.replace("\\", "/"))) + + data["transfers"] = transfers + + self.log.info(data) + instance.data.update(data) diff --git a/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py new file mode 100644 index 0000000000..14bcc71da6 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_arnold_scene_source.py @@ -0,0 +1,198 @@ +import os +from collections import defaultdict +import json + +from maya import cmds +import arnold + +from openpype.pipeline import publish +from openpype.hosts.maya.api import lib + + +class ExtractArnoldSceneSource(publish.Extractor): + """Extract the content of the instance to an Arnold Scene Source file.""" + + label = "Extract Arnold Scene Source" + hosts = ["maya"] + families = ["ass"] + asciiAss = False + + def process(self, instance): + staging_dir = self.staging_dir(instance) + file_path = os.path.join(staging_dir, "{}.ass".format(instance.name)) + + # Mask + mask = arnold.AI_NODE_ALL + + node_types = { + "options": arnold.AI_NODE_OPTIONS, + "camera": arnold.AI_NODE_CAMERA, + "light": arnold.AI_NODE_LIGHT, + "shape": arnold.AI_NODE_SHAPE, + "shader": arnold.AI_NODE_SHADER, + "override": arnold.AI_NODE_OVERRIDE, + "driver": arnold.AI_NODE_DRIVER, + "filter": arnold.AI_NODE_FILTER, + "color_manager": arnold.AI_NODE_COLOR_MANAGER, + "operator": arnold.AI_NODE_OPERATOR + } + + for key in node_types.keys(): + if instance.data.get("mask" + key.title()): + mask = mask ^ node_types[key] + + # Motion blur + attribute_data = { + "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get( + "motionBlur", True + ), + "defaultArnoldRenderOptions.motion_steps": instance.data.get( + "motionBlurKeys", 2 + ), + "defaultArnoldRenderOptions.motion_frames": instance.data.get( + "motionBlurLength", 0.5 + ) + } + + # Write out .ass file + kwargs = { + "filename": file_path, + "startFrame": instance.data.get("frameStartHandle", 1), + "endFrame": instance.data.get("frameEndHandle", 1), + "frameStep": instance.data.get("step", 1), + "selected": True, + "asciiAss": self.asciiAss, + "shadowLinks": True, + "lightLinks": True, + "boundingBox": True, + "expandProcedurals": instance.data.get("expandProcedurals", False), + "camera": instance.data["camera"], + "mask": mask + } + + filenames, nodes_by_id = self._extract( + instance.data["contentMembers"], attribute_data, kwargs + ) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "ass", + "ext": "ass", + "files": filenames if len(filenames) > 1 else filenames[0], + "stagingDir": staging_dir, + "frameStart": kwargs["startFrame"] + } + + instance.data["representations"].append(representation) + + json_path = os.path.join(staging_dir, "{}.json".format(instance.name)) + with open(json_path, "w") as f: + json.dump(nodes_by_id, f) + + representation = { + "name": "json", + "ext": "json", + "files": os.path.basename(json_path), + "stagingDir": staging_dir + } + + instance.data["representations"].append(representation) + + self.log.info( + "Extracted instance {} to: {}".format(instance.name, staging_dir) + ) + + # Extract proxy. + if not instance.data.get("proxy", []): + return + + kwargs["filename"] = file_path.replace(".ass", "_proxy.ass") + filenames, _ = self._extract( + instance.data["proxy"], attribute_data, kwargs + ) + + representation = { + "name": "proxy", + "ext": "ass", + "files": filenames if len(filenames) > 1 else filenames[0], + "stagingDir": staging_dir, + "frameStart": kwargs["startFrame"], + "outputName": "proxy" + } + + instance.data["representations"].append(representation) + + def _extract(self, nodes, attribute_data, kwargs): + self.log.info( + "Writing {} with:\n{}".format(kwargs["filename"], kwargs) + ) + filenames = [] + nodes_by_id = defaultdict(list) + # Duplicating nodes so they are direct children of the world. This + # makes the hierarchy of any exported ass file the same. + with lib.delete_after() as delete_bin: + duplicate_nodes = [] + for node in nodes: + # Only interested in transforms: + if cmds.nodeType(node) != "transform": + continue + + # Only interested in transforms with shapes. + shapes = cmds.listRelatives( + node, shapes=True, noIntermediate=True + ) + if not shapes: + continue + + duplicate_transform = cmds.duplicate(node)[0] + + if cmds.listRelatives(duplicate_transform, parent=True): + duplicate_transform = cmds.parent( + duplicate_transform, world=True + )[0] + + basename = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + duplicate_transform = cmds.rename( + duplicate_transform, basename + ) + + # Discard children nodes that are not shapes + shapes = cmds.listRelatives( + duplicate_transform, shapes=True, fullPath=True + ) + children = cmds.listRelatives( + duplicate_transform, children=True, fullPath=True + ) + cmds.delete(set(children) - set(shapes)) + + duplicate_nodes.append(duplicate_transform) + duplicate_nodes.extend(shapes) + delete_bin.append(duplicate_transform) + + # Copy cbId to mtoa_constant. + for node in duplicate_nodes: + # Converting Maya hierarchy separator "|" to Arnold + # separator "/". + nodes_by_id[lib.get_id(node)].append(node.replace("|", "/")) + + with lib.attribute_values(attribute_data): + with lib.maintained_selection(): + self.log.info( + "Writing: {}".format(duplicate_nodes) + ) + cmds.select(duplicate_nodes, noExpand=True) + + self.log.info( + "Extracting ass sequence with: {}".format(kwargs) + ) + + exported_files = cmds.arnoldExportAss(**kwargs) + + for file in exported_files: + filenames.append(os.path.split(file)[1]) + + self.log.info("Exported: {}".format(filenames)) + + return filenames, nodes_by_id diff --git a/openpype/hosts/maya/plugins/publish/extract_ass.py b/openpype/hosts/maya/plugins/publish/extract_ass.py deleted file mode 100644 index 049f256a7a..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_ass.py +++ /dev/null @@ -1,106 +0,0 @@ -import os - -from maya import cmds -import arnold - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection, attribute_values - - -class ExtractAssStandin(publish.Extractor): - """Extract the content of the instance to a ass file""" - - label = "Arnold Scene Source (.ass)" - hosts = ["maya"] - families = ["ass"] - asciiAss = False - - def process(self, instance): - staging_dir = self.staging_dir(instance) - filename = "{}.ass".format(instance.name) - filenames = [] - file_path = os.path.join(staging_dir, filename) - - # Mask - mask = arnold.AI_NODE_ALL - - node_types = { - "options": arnold.AI_NODE_OPTIONS, - "camera": arnold.AI_NODE_CAMERA, - "light": arnold.AI_NODE_LIGHT, - "shape": arnold.AI_NODE_SHAPE, - "shader": arnold.AI_NODE_SHADER, - "override": arnold.AI_NODE_OVERRIDE, - "driver": arnold.AI_NODE_DRIVER, - "filter": arnold.AI_NODE_FILTER, - "color_manager": arnold.AI_NODE_COLOR_MANAGER, - "operator": arnold.AI_NODE_OPERATOR - } - - for key in node_types.keys(): - if instance.data.get("mask" + key.title()): - mask = mask ^ node_types[key] - - # Motion blur - values = { - "defaultArnoldRenderOptions.motion_blur_enable": instance.data.get( - "motionBlur", True - ), - "defaultArnoldRenderOptions.motion_steps": instance.data.get( - "motionBlurKeys", 2 - ), - "defaultArnoldRenderOptions.motion_frames": instance.data.get( - "motionBlurLength", 0.5 - ) - } - - # Write out .ass file - kwargs = { - "filename": file_path, - "startFrame": instance.data.get("frameStartHandle", 1), - "endFrame": instance.data.get("frameEndHandle", 1), - "frameStep": instance.data.get("step", 1), - "selected": True, - "asciiAss": self.asciiAss, - "shadowLinks": True, - "lightLinks": True, - "boundingBox": True, - "expandProcedurals": instance.data.get("expandProcedurals", False), - "camera": instance.data["camera"], - "mask": mask - } - - self.log.info("Writing: '%s'" % file_path) - with attribute_values(values): - with maintained_selection(): - self.log.info( - "Writing: {}".format(instance.data["setMembers"]) - ) - cmds.select(instance.data["setMembers"], noExpand=True) - - self.log.info( - "Extracting ass sequence with: {}".format(kwargs) - ) - - exported_files = cmds.arnoldExportAss(**kwargs) - - for file in exported_files: - filenames.append(os.path.split(file)[1]) - - self.log.info("Exported: {}".format(filenames)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'ass', - 'ext': 'ass', - 'files': filenames if len(filenames) > 1 else filenames[0], - "stagingDir": staging_dir, - 'frameStart': kwargs["startFrame"] - } - - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" - % (instance.name, staging_dir)) diff --git a/openpype/hosts/maya/plugins/publish/extract_assproxy.py b/openpype/hosts/maya/plugins/publish/extract_assproxy.py deleted file mode 100644 index 4937a28a9e..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_assproxy.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -import contextlib - -from maya import cmds - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import maintained_selection - - -class ExtractAssProxy(publish.Extractor): - """Extract proxy model as Maya Ascii to use as arnold standin - - - """ - - order = publish.Extractor.order + 0.2 - label = "Ass Proxy (Maya ASCII)" - hosts = ["maya"] - families = ["ass"] - - def process(self, instance): - - @contextlib.contextmanager - def unparent(root): - """Temporarily unparent `root`""" - parent = cmds.listRelatives(root, parent=True) - if parent: - cmds.parent(root, world=True) - yield - self.log.info("{} - {}".format(root, parent)) - cmds.parent(root, parent) - else: - yield - - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}.ma".format(instance.name) - path = os.path.join(stagingdir, filename) - - # Perform extraction - self.log.info("Performing extraction..") - - # Get only the shape contents we need in such a way that we avoid - # taking along intermediateObjects - proxy = instance.data.get('proxy', None) - - if not proxy: - self.log.info("no proxy mesh") - return - - members = cmds.ls(proxy, - dag=True, - transforms=True, - noIntermediate=True) - self.log.info(members) - - with maintained_selection(): - with unparent(members[0]): - cmds.select(members, noExpand=True) - cmds.file(path, - force=True, - typ="mayaAscii", - exportSelected=True, - preserveReferences=False, - channels=False, - constraints=False, - expressions=False, - constructionHistory=False) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'ma', - 'ext': 'ma', - 'files': filename, - "stagingDir": stagingdir - } - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/openpype/hosts/maya/plugins/publish/extract_gltf.py b/openpype/hosts/maya/plugins/publish/extract_gltf.py index f5ceed5f33..ac258ffb3d 100644 --- a/openpype/hosts/maya/plugins/publish/extract_gltf.py +++ b/openpype/hosts/maya/plugins/publish/extract_gltf.py @@ -22,6 +22,8 @@ class ExtractGLB(publish.Extractor): self.log.info("Extracting GLB to: {}".format(path)) + cmds.loadPlugin("maya2glTF", quiet=True) + nodes = instance[:] self.log.info("Instance: {0}".format(nodes)) @@ -45,6 +47,7 @@ class ExtractGLB(publish.Extractor): "glb": True, "vno": True # visibleNodeOnly } + with lib.maintained_selection(): cmds.select(nodes, hi=True, noExpand=True) extract_gltf(staging_dir, diff --git a/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py b/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py new file mode 100644 index 0000000000..422f5ad019 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_gpu_cache.py @@ -0,0 +1,65 @@ +import json + +from maya import cmds + +from openpype.pipeline import publish + + +class ExtractGPUCache(publish.Extractor): + """Extract the content of the instance to a GPU cache file.""" + + label = "GPU Cache" + hosts = ["maya"] + families = ["model", "animation", "pointcache"] + step = 1.0 + stepSave = 1 + optimize = True + optimizationThreshold = 40000 + optimizeAnimationsForMotionBlur = True + writeMaterials = True + useBaseTessellation = True + + def process(self, instance): + cmds.loadPlugin("gpuCache", quiet=True) + + staging_dir = self.staging_dir(instance) + filename = "{}_gpu_cache".format(instance.name) + + # Write out GPU cache file. + kwargs = { + "directory": staging_dir, + "fileName": filename, + "saveMultipleFiles": False, + "simulationRate": self.step, + "sampleMultiplier": self.stepSave, + "optimize": self.optimize, + "optimizationThreshold": self.optimizationThreshold, + "optimizeAnimationsForMotionBlur": ( + self.optimizeAnimationsForMotionBlur + ), + "writeMaterials": self.writeMaterials, + "useBaseTessellation": self.useBaseTessellation + } + self.log.debug( + "Extract {} with:\n{}".format( + instance[:], json.dumps(kwargs, indent=4, sort_keys=True) + ) + ) + cmds.gpuCache(instance[:], **kwargs) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": "gpu_cache", + "ext": "abc", + "files": filename + ".abc", + "stagingDir": staging_dir, + "outputName": "gpu_cache" + } + + instance.data["representations"].append(representation) + + self.log.info( + "Extracted instance {} to: {}".format(instance.name, staging_dir) + ) diff --git a/openpype/hosts/maya/plugins/publish/extract_look.py b/openpype/hosts/maya/plugins/publish/extract_look.py index df07a674dc..3cc95a0b2e 100644 --- a/openpype/hosts/maya/plugins/publish/extract_look.py +++ b/openpype/hosts/maya/plugins/publish/extract_look.py @@ -1,20 +1,23 @@ # -*- coding: utf-8 -*- """Maya look extractor.""" -import os -import sys -import json -import tempfile -import platform -import contextlib -import subprocess +from abc import ABCMeta, abstractmethod from collections import OrderedDict - -from maya import cmds # noqa +import contextlib +import json +import logging +import os +import platform +import tempfile +import six +import attr import pyblish.api -from openpype.lib import source_hash, run_subprocess -from openpype.pipeline import legacy_io, publish +from maya import cmds # noqa + +from openpype.lib.vendor_bin_utils import find_executable +from openpype.lib import source_hash, run_subprocess, get_oiio_tools_path +from openpype.pipeline import legacy_io, publish, KnownPublishError from openpype.hosts.maya.api import lib # Modes for transfer @@ -22,34 +25,18 @@ COPY = 1 HARDLINK = 2 -def escape_space(path): - """Ensure path is enclosed by quotes to allow paths with spaces""" - return '"{}"'.format(path) if " " in path else path - - -def get_ocio_config_path(profile_folder): - """Path to OpenPype vendorized OCIO. - - Vendorized OCIO config file path is grabbed from the specific path - hierarchy specified below. - - "{OPENPYPE_ROOT}/vendor/OpenColorIO-Configs/{profile_folder}/config.ocio" - Args: - profile_folder (str): Name of folder to grab config file from. - - Returns: - str: Path to vendorized config file. - """ - - return os.path.join( - os.environ["OPENPYPE_ROOT"], - "vendor", - "bin", - "ocioconfig", - "OpenColorIOConfigs", - profile_folder, - "config.ocio" - ) +@attr.s +class TextureResult(object): + """The resulting texture of a processed file for a resource""" + # Path to the file + path = attr.ib() + # Colorspace of the resulting texture. This might not be the input + # colorspace of the texture if a TextureProcessor has processed the file. + colorspace = attr.ib() + # Hash generated for the texture using openpype.lib.source_hash + file_hash = attr.ib() + # The transfer mode, e.g. COPY or HARDLINK + transfer_mode = attr.ib() def find_paths_by_hash(texture_hash): @@ -68,61 +55,6 @@ def find_paths_by_hash(texture_hash): return legacy_io.distinct(key, {"type": "version"}) -def maketx(source, destination, args, logger): - """Make `.tx` using `maketx` with some default settings. - - The settings are based on default as used in Arnold's - txManager in the scene. - This function requires the `maketx` executable to be - on the `PATH`. - - Args: - source (str): Path to source file. - destination (str): Writing destination path. - args (list): Additional arguments for `maketx`. - logger (logging.Logger): Logger to log messages to. - - Returns: - str: Output of `maketx` command. - - """ - from openpype.lib import get_oiio_tools_path - - maketx_path = get_oiio_tools_path("maketx") - - if not maketx_path: - print( - "OIIO tool not found in {}".format(maketx_path)) - raise AssertionError("OIIO tool not found") - - subprocess_args = [ - maketx_path, - "-v", # verbose - "-u", # update mode - # unpremultiply before conversion (recommended when alpha present) - "--unpremult", - "--checknan", - # use oiio-optimized settings for tile-size, planarconfig, metadata - "--oiio", - "--filter", "lanczos3", - source - ] - - subprocess_args.extend(args) - subprocess_args.extend(["-o", destination]) - - cmd = " ".join(subprocess_args) - logger.debug(cmd) - - try: - out = run_subprocess(subprocess_args) - except Exception: - logger.error("Maketx converion failed", exc_info=True) - raise - - return out - - @contextlib.contextmanager def no_workspace_dir(): """Force maya to a fake temporary workspace directory. @@ -155,6 +87,303 @@ def no_workspace_dir(): os.rmdir(fake_workspace_dir) +@six.add_metaclass(ABCMeta) +class TextureProcessor: + + extension = None + + def __init__(self, log=None): + if log is None: + log = logging.getLogger(self.__class__.__name__) + self.log = log + + def apply_settings(self, system_settings, project_settings): + """Apply OpenPype system/project settings to the TextureProcessor + + Args: + system_settings (dict): OpenPype system settings + project_settings (dict): OpenPype project settings + + Returns: + None + + """ + pass + + @abstractmethod + def process(self, + source, + colorspace, + color_management, + staging_dir): + """Process the `source` texture. + + Must be implemented on inherited class. + + This must always return a TextureResult even when it does not generate + a texture. If it doesn't generate a texture then it should return a + TextureResult using the input path and colorspace. + + Args: + source (str): Path to source file. + colorspace (str): Colorspace of the source file. + color_management (dict): Maya Color management data from + `lib.get_color_management_preferences` + staging_dir (str): Output directory to write to. + + Returns: + TextureResult: The resulting texture information. + + """ + pass + + def __repr__(self): + # Log instance as class name + return self.__class__.__name__ + + +class MakeRSTexBin(TextureProcessor): + """Make `.rstexbin` using `redshiftTextureProcessor`""" + + extension = ".rstexbin" + + def process(self, + source, + colorspace, + color_management, + staging_dir): + + texture_processor_path = self.get_redshift_tool( + "redshiftTextureProcessor" + ) + if not texture_processor_path: + raise KnownPublishError("Must have Redshift available.") + + subprocess_args = [ + texture_processor_path, + source + ] + + hash_args = ["rstex"] + texture_hash = source_hash(source, *hash_args) + + # Redshift stores the output texture next to the input but with + # the extension replaced to `.rstexbin` + basename, ext = os.path.splitext(source) + destination = "{}{}".format(basename, self.extension) + + self.log.debug(" ".join(subprocess_args)) + try: + run_subprocess(subprocess_args) + except Exception: + self.log.error("Texture .rstexbin conversion failed", + exc_info=True) + raise + + return TextureResult( + path=destination, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=COPY + ) + + @staticmethod + def get_redshift_tool(tool_name): + """Path to redshift texture processor. + + On Windows it adds .exe extension if missing from tool argument. + + Args: + tool_name (string): Tool name. + + Returns: + str: Full path to redshift texture processor executable. + """ + if "REDSHIFT_COREDATAPATH" not in os.environ: + raise RuntimeError("Must have Redshift available.") + + redshift_tool_path = os.path.join( + os.environ["REDSHIFT_COREDATAPATH"], + "bin", + tool_name + ) + + return find_executable(redshift_tool_path) + + +class MakeTX(TextureProcessor): + """Make `.tx` using `maketx` with some default settings. + + Some hardcoded arguments passed to `maketx` are based on the defaults used + in Arnold's txManager tool. + + """ + + extension = ".tx" + + def __init__(self, log=None): + super(MakeTX, self).__init__(log=log) + self.extra_args = [] + + def apply_settings(self, system_settings, project_settings): + # Allow extra maketx arguments from project settings + args_settings = ( + project_settings["maya"]["publish"] + .get("ExtractLook", {}).get("maketx_arguments", []) + ) + extra_args = [] + for arg_data in args_settings: + argument = arg_data["argument"] + parameters = arg_data["parameters"] + if not argument: + self.log.debug("Ignoring empty parameter from " + "`maketx_arguments` setting..") + continue + + extra_args.append(argument) + extra_args.extend(parameters) + + self.extra_args = extra_args + + def process(self, + source, + colorspace, + color_management, + staging_dir): + """Process the texture. + + This function requires the `maketx` executable to be available in an + OpenImageIO toolset detectable by OpenPype. + + Args: + source (str): Path to source file. + colorspace (str): Colorspace of the source file. + color_management (dict): Maya Color management data from + `lib.get_color_management_preferences` + staging_dir (str): Output directory to write to. + + Returns: + TextureResult: The resulting texture information. + + """ + + maketx_path = get_oiio_tools_path("maketx") + + if not maketx_path: + raise AssertionError( + "OIIO 'maketx' tool not found. Result: {}".format(maketx_path) + ) + + # Define .tx filepath in staging if source file is not .tx + fname, ext = os.path.splitext(os.path.basename(source)) + if ext == ".tx": + # Do nothing if the source file is already a .tx file. + return TextureResult( + path=source, + file_hash=source_hash(source), + colorspace=colorspace, + transfer_mode=COPY + ) + + # Hardcoded default arguments for maketx conversion based on Arnold's + # txManager in Maya + args = [ + # unpremultiply before conversion (recommended when alpha present) + "--unpremult", + # use oiio-optimized settings for tile-size, planarconfig, metadata + "--oiio", + "--filter", "lanczos3", + ] + if color_management["enabled"]: + config_path = color_management["config"] + if not os.path.exists(config_path): + raise RuntimeError("OCIO config not found at: " + "{}".format(config_path)) + + render_colorspace = color_management["rendering_space"] + + self.log.info("tx: converting colorspace {0} " + "-> {1}".format(colorspace, + render_colorspace)) + args.extend(["--colorconvert", colorspace, render_colorspace]) + args.extend(["--colorconfig", config_path]) + + else: + # Maya Color management is disabled. We cannot rely on an OCIO + self.log.debug("tx: Maya color management is disabled. No color " + "conversion will be applied to .tx conversion for: " + "{}".format(source)) + # Assume linear + render_colorspace = "linear" + + # Note: The texture hash is only reliable if we include any potential + # conversion arguments provide to e.g. `maketx` + hash_args = ["maketx"] + args + self.extra_args + texture_hash = source_hash(source, *hash_args) + + # Ensure folder exists + resources_dir = os.path.join(staging_dir, "resources") + if not os.path.exists(resources_dir): + os.makedirs(resources_dir) + + self.log.info("Generating .tx file for %s .." % source) + + subprocess_args = [ + maketx_path, + "-v", # verbose + "-u", # update mode + # --checknan doesn't influence the output file but aborts the + # conversion if it finds any. So we can avoid it for the file hash + "--checknan", + source + ] + + subprocess_args.extend(args) + if self.extra_args: + subprocess_args.extend(self.extra_args) + + # Add source hash attribute after other arguments for log readability + # Note: argument is excluded from the hash since it is the hash itself + subprocess_args.extend([ + "--sattrib", + "sourceHash", + texture_hash + ]) + + destination = os.path.join(resources_dir, fname + ".tx") + subprocess_args.extend(["-o", destination]) + + # We want to make sure we are explicit about what OCIO config gets + # used. So when we supply no --colorconfig flag that no fallback to + # an OCIO env var occurs. + env = os.environ.copy() + env.pop("OCIO", None) + + self.log.debug(" ".join(subprocess_args)) + try: + run_subprocess(subprocess_args, env=env) + except Exception: + self.log.error("Texture maketx conversion failed", + exc_info=True) + raise + + return TextureResult( + path=destination, + file_hash=texture_hash, + colorspace=render_colorspace, + transfer_mode=COPY + ) + + @staticmethod + def _has_arnold(): + """Return whether the arnold package is available and importable.""" + try: + import arnold # noqa: F401 + return True + except (ImportError, ModuleNotFoundError): + return False + + class ExtractLook(publish.Extractor): """Extract Look (Maya Scene + JSON) @@ -171,22 +400,6 @@ class ExtractLook(publish.Extractor): scene_type = "ma" look_data_type = "json" - @staticmethod - def get_renderer_name(): - """Get renderer name from Maya. - - Returns: - str: Renderer name. - - """ - renderer = cmds.getAttr( - "defaultRenderGlobals.currentRenderer" - ).lower() - # handle various renderman names - if renderer.startswith("renderman"): - renderer = "renderman" - return renderer - def get_maya_scene_type(self, instance): """Get Maya scene type from settings. @@ -226,16 +439,12 @@ class ExtractLook(publish.Extractor): dir_path = self.staging_dir(instance) maya_fname = "{0}.{1}".format(instance.name, self.scene_type) json_fname = "{0}.{1}".format(instance.name, self.look_data_type) - - # Make texture dump folder maya_path = os.path.join(dir_path, maya_fname) json_path = os.path.join(dir_path, json_fname) - self.log.info("Performing extraction..") - # Remove all members of the sets so they are not included in the # exported file by accident - self.log.info("Extract sets (%s) ..." % _scene_type) + self.log.info("Processing sets..") lookdata = instance.data["lookData"] relationships = lookdata["relationships"] sets = list(relationships.keys()) @@ -243,13 +452,36 @@ class ExtractLook(publish.Extractor): self.log.info("No sets found") return - results = self.process_resources(instance, staging_dir=dir_path) + # Specify texture processing executables to activate + # TODO: Load these more dynamically once we support more processors + processors = [] + context = instance.context + for key, Processor in { + # Instance data key to texture processor mapping + "maketx": MakeTX, + "rstex": MakeRSTexBin + }.items(): + if instance.data.get(key, False): + processor = Processor() + processor.apply_settings(context.data["system_settings"], + context.data["project_settings"]) + processors.append(processor) + + if processors: + self.log.debug("Collected texture processors: " + "{}".format(processors)) + + self.log.debug("Processing resources..") + results = self.process_resources(instance, + staging_dir=dir_path, + processors=processors) transfers = results["fileTransfers"] hardlinks = results["fileHardlinks"] hashes = results["fileHashes"] remap = results["attrRemap"] # Extract in correct render layer + self.log.info("Extracting look maya scene file: {}".format(maya_path)) layer = instance.data.get("renderlayer", "defaultRenderLayer") with lib.renderlayer(layer): # TODO: Ensure membership edits don't become renderlayer overrides @@ -257,7 +489,7 @@ class ExtractLook(publish.Extractor): # To avoid Maya trying to automatically remap the file # textures relative to the `workspace -directory` we force # it to a fake temporary workspace. This fixes textures - # getting incorrectly remapped. (LKD-17, PLN-101) + # getting incorrectly remapped. with no_workspace_dir(): with lib.attribute_values(remap): with lib.maintained_selection(): @@ -321,40 +553,38 @@ class ExtractLook(publish.Extractor): # Source hash for the textures instance.data["sourceHashes"] = hashes - """ - self.log.info("Returning colorspaces to their original values ...") - for attr, value in remap.items(): - self.log.info(" - {}: {}".format(attr, value)) - cmds.setAttr(attr, value, type="string") - """ self.log.info("Extracted instance '%s' to: %s" % (instance.name, maya_path)) - def process_resources(self, instance, staging_dir): + def _set_resource_result_colorspace(self, resource, colorspace): + """Update resource resulting colorspace after texture processing""" + if "result_color_space" in resource: + if resource["result_color_space"] == colorspace: + return + + self.log.warning( + "Resource already has a resulting colorspace but is now " + "being overridden to a new one: {} -> {}".format( + resource["result_color_space"], colorspace + ) + ) + resource["result_color_space"] = colorspace + + def process_resources(self, instance, staging_dir, processors): + """Process all resources in the instance. + + It is assumed that all resources are nodes using file textures. + + Extract the textures to transfer, possibly convert with maketx and + remap the node paths to the destination path. Note that a source + might be included more than once amongst the resources as they could + be the input file to multiple nodes. + + """ - # Extract the textures to transfer, possibly convert with maketx and - # remap the node paths to the destination path. Note that a source - # might be included more than once amongst the resources as they could - # be the input file to multiple nodes. resources = instance.data["resources"] - do_maketx = instance.data.get("maketx", False) + color_management = lib.get_color_management_preferences() - # Collect all unique files used in the resources - files_metadata = {} - for resource in resources: - # Preserve color space values (force value after filepath change) - # This will also trigger in the same order at end of context to - # ensure after context it's still the original value. - color_space = resource.get("color_space") - - for f in resource["files"]: - files_metadata[os.path.normpath(f)] = { - "color_space": color_space} - - # Process the resource files - transfers = [] - hardlinks = [] - hashes = {} # Temporary fix to NOT create hardlinks on windows machines if platform.system().lower() == "windows": self.log.info( @@ -364,86 +594,114 @@ class ExtractLook(publish.Extractor): else: force_copy = instance.data.get("forceCopy", False) - for filepath in files_metadata: + destinations_cache = {} - linearize = False - if do_maketx and files_metadata[filepath]["color_space"].lower() == "srgb": # noqa: E501 - linearize = True - # set its file node to 'raw' as tx will be linearized - files_metadata[filepath]["color_space"] = "Raw" + def get_resource_destination_cached(path): + """Get resource destination with cached result per filepath""" + if path not in destinations_cache: + destination = self.get_resource_destination( + path, instance.data["resourcesDir"], processors) + destinations_cache[path] = destination + return destinations_cache[path] - # if do_maketx: - # color_space = "Raw" - - source, mode, texture_hash = self._process_texture( - filepath, - do_maketx, - staging=staging_dir, - linearize=linearize, - force=force_copy - ) - destination = self.resource_destination(instance, - source, - do_maketx) - - # Force copy is specified. - if force_copy: - mode = COPY - - if mode == COPY: - transfers.append((source, destination)) - self.log.info('file will be copied {} -> {}'.format( - source, destination)) - elif mode == HARDLINK: - hardlinks.append((source, destination)) - self.log.info('file will be hardlinked {} -> {}'.format( - source, destination)) - - # Store the hashes from hash to destination to include in the - # database - hashes[texture_hash] = destination - - # Remap the resources to the destination path (change node attributes) - destinations = {} - remap = OrderedDict() # needs to be ordered, see color space values + # Process all resource's individual files + processed_files = {} + transfers = [] + hardlinks = [] + hashes = {} + remap = OrderedDict() for resource in resources: - source = os.path.normpath(resource["source"]) - if source not in destinations: - # Cache destination as source resource might be included - # multiple times - destinations[source] = self.resource_destination( - instance, source, do_maketx + colorspace = resource["color_space"] + + for filepath in resource["files"]: + filepath = os.path.normpath(filepath) + + if filepath in processed_files: + # The file was already processed, likely due to usage by + # another resource in the scene. We confirm here it + # didn't do color spaces different than the current + # resource. + processed_file = processed_files[filepath] + self.log.debug( + "File was already processed. Likely used by another " + "resource too: {}".format(filepath) + ) + + if colorspace != processed_file["color_space"]: + self.log.warning( + "File '{}' was already processed using colorspace " + "'{}' instead of the current resource's " + "colorspace '{}'. The already processed texture " + "result's colorspace '{}' will be used." + "".format(filepath, + colorspace, + processed_file["color_space"], + processed_file["result_color_space"])) + + self._set_resource_result_colorspace( + resource, + colorspace=processed_file["result_color_space"] + ) + continue + + texture_result = self._process_texture( + filepath, + processors=processors, + staging_dir=staging_dir, + force_copy=force_copy, + color_management=color_management, + colorspace=colorspace ) + # Set the resulting color space on the resource + self._set_resource_result_colorspace( + resource, colorspace=texture_result.colorspace + ) + + processed_files[filepath] = { + "color_space": colorspace, + "result_color_space": texture_result.colorspace, + } + + source = texture_result.path + destination = get_resource_destination_cached(source) + if force_copy or texture_result.transfer_mode == COPY: + transfers.append((source, destination)) + self.log.info('file will be copied {} -> {}'.format( + source, destination)) + elif texture_result.transfer_mode == HARDLINK: + hardlinks.append((source, destination)) + self.log.info('file will be hardlinked {} -> {}'.format( + source, destination)) + + # Store the hashes from hash to destination to include in the + # database + hashes[texture_result.file_hash] = destination + + # Set up remapping attributes for the node during the publish + # The order of these can be important if one attribute directly + # affects another, e.g. we set colorspace after filepath because + # maya sometimes tries to guess the colorspace when changing + # filepaths (which is avoidable, but we don't want to have those + # attributes changed in the resulting publish) + # Remap filepath to publish destination + # TODO It would be much better if we could use the destination path + # from the actual processed texture results, but since the + # attribute will need to preserve tokens like , etc for + # now we will define the output path from the attribute value + # including the tokens to persist them. + filepath_attr = resource["attribute"] + remap[filepath_attr] = get_resource_destination_cached( + resource["source"] + ) + # Preserve color space values (force value after filepath change) # This will also trigger in the same order at end of context to # ensure after context it's still the original value. - color_space_attr = resource["node"] + ".colorSpace" - try: - color_space = cmds.getAttr(color_space_attr) - except ValueError: - # node doesn't have color space attribute - color_space = "Raw" - else: - # get the resolved files - metadata = files_metadata.get(source) - # if the files are unresolved from `source` - # assume color space from the first file of - # the resource - if not metadata: - first_file = next(iter(resource.get( - "files", [])), None) - if not first_file: - continue - first_filepath = os.path.normpath(first_file) - metadata = files_metadata[first_filepath] - if metadata["color_space"] == "Raw": - # set color space to raw if we linearized it - color_space = "Raw" - # Remap file node filename to destination - remap[color_space_attr] = color_space - attr = resource["attribute"] - remap[attr] = destinations[source] + node = resource["node"] + if cmds.attributeQuery("colorSpace", node=node, exists=True): + color_space_attr = "{}.colorSpace".format(node) + remap[color_space_attr] = resource["result_color_space"] self.log.info("Finished remapping destinations ...") @@ -454,96 +712,131 @@ class ExtractLook(publish.Extractor): "attrRemap": remap, } - def resource_destination(self, instance, filepath, do_maketx): + def get_resource_destination(self, filepath, resources_dir, processors): """Get resource destination path. This is utility function to change path if resource file name is changed by some external tool like `maketx`. Args: - instance: Current Instance. - filepath (str): Resource path - do_maketx (bool): Flag if resource is processed by `maketx`. + filepath (str): Resource source path + resources_dir (str): Destination dir for resources in publish. + processors (list): Texture processors converting resource. Returns: str: Path to resource file """ - resources_dir = instance.data["resourcesDir"] - # Compute destination location basename, ext = os.path.splitext(os.path.basename(filepath)) - # If `maketx` then the texture will always end with .tx - if do_maketx: - ext = ".tx" + # Get extension from the last processor + for processor in reversed(processors): + processor_ext = processor.extension + if processor_ext and ext != processor_ext: + self.log.debug("Processor {} overrides extension to '{}' " + "for path: {}".format(processor, + processor_ext, + filepath)) + ext = processor_ext + break return os.path.join( resources_dir, basename + ext ) - def _process_texture(self, filepath, do_maketx, staging, linearize, force): - """Process a single texture file on disk for publishing. - This will: - 1. Check whether it's already published, if so it will do hardlink - 2. If not published and maketx is enabled, generate a new .tx file. - 3. Compute the destination path for the source file. - Args: - filepath (str): The source file path to process. - do_maketx (bool): Whether to produce a .tx file - Returns: - """ - - fname, ext = os.path.splitext(os.path.basename(filepath)) - - args = [] - if do_maketx: - args.append("maketx") - texture_hash = source_hash(filepath, *args) + def _get_existing_hashed_texture(self, texture_hash): + """Return the first found filepath from a texture hash""" # If source has been published before with the same settings, # then don't reprocess but hardlink from the original existing = find_paths_by_hash(texture_hash) - if existing and not force: - self.log.info("Found hash in database, preparing hardlink..") + if existing: source = next((p for p in existing if os.path.exists(p)), None) if source: - return source, HARDLINK, texture_hash + return source else: self.log.warning( - ("Paths not found on disk, " - "skipping hardlink: %s") % (existing,) + "Paths not found on disk, " + "skipping hardlink: {}".format(existing) ) - if do_maketx and ext != ".tx": - # Produce .tx file in staging if source file is not .tx - converted = os.path.join(staging, "resources", fname + ".tx") - additional_args = [ - "--sattrib", - "sourceHash", - texture_hash - ] - if linearize: - self.log.info("tx: converting sRGB -> linear") - additional_args.extend(["--colorconvert", "sRGB", "linear"]) + def _process_texture(self, + filepath, + processors, + staging_dir, + force_copy, + color_management, + colorspace): + """Process a single texture file on disk for publishing. - config_path = get_ocio_config_path("nuke-default") - additional_args.extend(["--colorconfig", config_path]) - # Ensure folder exists - if not os.path.exists(os.path.dirname(converted)): - os.makedirs(os.path.dirname(converted)) + This will: + 1. Check whether it's already published, if so it will do hardlink + (if the texture hash is found and force copy is not enabled) + 2. It will process the texture using the supplied texture + processors like MakeTX and MakeRSTexBin if enabled. + 3. Compute the destination path for the source file. - self.log.info("Generating .tx file for %s .." % filepath) - maketx( - filepath, - converted, - additional_args, - self.log + Args: + filepath (str): The source file path to process. + processors (list): List of TextureProcessor processing the texture + staging_dir (str): The staging directory to write to. + force_copy (bool): Whether to force a copy even if a file hash + might have existed already in the project, otherwise + hardlinking the existing file is allowed. + color_management (dict): Maya's Color Management settings from + `lib.get_color_management_preferences` + colorspace (str): The source colorspace of the resources this + texture belongs to. + + Returns: + TextureResult: The texture result information. + """ + + if len(processors) > 1: + raise KnownPublishError( + "More than one texture processor not supported. " + "Current processors enabled: {}".format(processors) ) - return converted, COPY, texture_hash + for processor in processors: + self.log.debug("Processing texture {} with processor {}".format( + filepath, processor + )) - return filepath, COPY, texture_hash + processed_result = processor.process(filepath, + colorspace, + color_management, + staging_dir) + if not processed_result: + raise RuntimeError("Texture Processor {} returned " + "no result.".format(processor)) + self.log.info("Generated processed " + "texture: {}".format(processed_result.path)) + + # TODO: Currently all processors force copy instead of allowing + # hardlinks using source hashes. This should be refactored + return processed_result + + # No texture processing for this file + texture_hash = source_hash(filepath) + if not force_copy: + existing = self._get_existing_hashed_texture(filepath) + if existing: + self.log.info("Found hash in database, preparing hardlink..") + return TextureResult( + path=filepath, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=HARDLINK + ) + + return TextureResult( + path=filepath, + file_hash=texture_hash, + colorspace=colorspace, + transfer_mode=COPY + ) class ExtractModelRenderSets(ExtractLook): diff --git a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py index 3769ec3605..c2411ca651 100644 --- a/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py +++ b/openpype/hosts/maya/plugins/publish/extract_maya_scene_raw.py @@ -20,8 +20,7 @@ class ExtractMayaSceneRaw(publish.Extractor): "mayaScene", "setdress", "layout", - "camerarig", - "xgen"] + "camerarig"] scene_type = "ma" def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py index 0628623e88..cf610ac6b4 100644 --- a/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py +++ b/openpype/hosts/maya/plugins/publish/extract_multiverse_usd_over.py @@ -102,7 +102,7 @@ class ExtractMultiverseUsdOverride(publish.Extractor): long=True) self.log.info("Collected object {}".format(members)) - # TODO: Deal with asset, composition, overide with options. + # TODO: Deal with asset, composition, override with options. import multiverse time_opts = None diff --git a/openpype/hosts/maya/plugins/publish/extract_playblast.py b/openpype/hosts/maya/plugins/publish/extract_playblast.py index 1f9f9db99a..3ceef6f3d3 100644 --- a/openpype/hosts/maya/plugins/publish/extract_playblast.py +++ b/openpype/hosts/maya/plugins/publish/extract_playblast.py @@ -1,4 +1,6 @@ import os +import json +import contextlib import clique import capture @@ -7,7 +9,16 @@ from openpype.pipeline import publish from openpype.hosts.maya.api import lib from maya import cmds -import pymel.core as pm + + +@contextlib.contextmanager +def panel_camera(panel, camera): + original_camera = cmds.modelPanel(panel, query=True, camera=True) + try: + cmds.modelPanel(panel, edit=True, camera=camera) + yield + finally: + cmds.modelPanel(panel, edit=True, camera=original_camera) class ExtractPlayblast(publish.Extractor): @@ -23,6 +34,18 @@ class ExtractPlayblast(publish.Extractor): families = ["review"] optional = True capture_preset = {} + profiles = None + + def _capture(self, preset): + if os.environ.get("OPENPYPE_DEBUG") == "1": + self.log.debug( + "Using preset: {}".format( + json.dumps(preset, indent=4, sort_keys=True) + ) + ) + + path = capture.capture(log=self.log, **preset) + self.log.debug("playblast path {}".format(path)) def process(self, instance): self.log.info("Extracting capture..") @@ -42,41 +65,50 @@ class ExtractPlayblast(publish.Extractor): self.log.info("start: {}, end: {}".format(start, end)) # get cameras - camera = instance.data['review_camera'] + camera = instance.data["review_camera"] - override_viewport_options = ( - self.capture_preset['Viewport Options'] - ['override_viewport_options'] + task_data = instance.data["anatomyData"].get("task", {}) + capture_preset = lib.get_capture_preset( + task_data.get("name"), + task_data.get("type"), + instance.data["subset"], + instance.context.data["project_settings"], + self.log ) - preset = lib.load_capture_preset(data=self.capture_preset) - # Grab capture presets from the project settings - capture_presets = self.capture_preset + + preset = lib.load_capture_preset(data=capture_preset) + + # "isolate_view" will already have been applied at creation, so we'll + # ignore it here. + preset.pop("isolate_view") + # Set resolution variables from capture presets - width_preset = capture_presets["Resolution"]["width"] - height_preset = capture_presets["Resolution"]["height"] + width_preset = capture_preset["Resolution"]["width"] + height_preset = capture_preset["Resolution"]["height"] + # Set resolution variables from asset values asset_data = instance.data["assetEntity"]["data"] asset_width = asset_data.get("resolutionWidth") asset_height = asset_data.get("resolutionHeight") review_instance_width = instance.data.get("review_width") review_instance_height = instance.data.get("review_height") - preset['camera'] = camera + preset["camera"] = camera # Tests if project resolution is set, # if it is a value other than zero, that value is # used, if not then the asset resolution is # used if review_instance_width and review_instance_height: - preset['width'] = review_instance_width - preset['height'] = review_instance_height + preset["width"] = review_instance_width + preset["height"] = review_instance_height elif width_preset and height_preset: - preset['width'] = width_preset - preset['height'] = height_preset + preset["width"] = width_preset + preset["height"] = height_preset elif asset_width and asset_height: - preset['width'] = asset_width - preset['height'] = asset_height - preset['start_frame'] = start - preset['end_frame'] = end + preset["width"] = asset_width + preset["height"] = asset_height + preset["start_frame"] = start + preset["end_frame"] = end # Enforce persisting camera depth of field camera_options = preset.setdefault("camera_options", {}) @@ -89,14 +121,18 @@ class ExtractPlayblast(publish.Extractor): self.log.info("Outputting images to %s" % path) - preset['filename'] = path - preset['overwrite'] = True + preset["filename"] = path + preset["overwrite"] = True - pm.refresh(f=True) + cmds.refresh(force=True) - refreshFrameInt = int(pm.playbackOptions(q=True, minTime=True)) - pm.currentTime(refreshFrameInt - 1, edit=True) - pm.currentTime(refreshFrameInt, edit=True) + refreshFrameInt = int(cmds.playbackOptions(q=True, minTime=True)) + cmds.currentTime(refreshFrameInt - 1, edit=True) + cmds.currentTime(refreshFrameInt, edit=True) + + # Use displayLights setting from instance + key = "displayLights" + preset["viewport_options"][key] = instance.data[key] # Override transparency if requested. transparency = instance.data.get("transparency", 0) @@ -104,8 +140,9 @@ class ExtractPlayblast(publish.Extractor): preset["viewport2_options"]["transparencyAlgorithm"] = transparency # Isolate view is requested by having objects in the set besides a - # camera. - if preset.pop("isolate_view", False) and instance.data.get("isolate"): + # camera. If there is only 1 member it'll be the camera because we + # validate to have 1 camera only. + if instance.data["isolate"] and len(instance.data["setMembers"]) > 1: preset["isolate"] = instance.data["setMembers"] # Show/Hide image planes on request. @@ -117,31 +154,74 @@ class ExtractPlayblast(publish.Extractor): # Disable Pan/Zoom. pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"])) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False) + preset.pop("pan_zoom", None) + preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"] - with lib.maintained_time(): - filename = preset.get("filename", "%TEMP%") + # Need to explicitly enable some viewport changes so the viewport is + # refreshed ahead of playblasting. + keys = [ + "useDefaultMaterial", + "wireframeOnShaded", + "xray", + "jointXray", + "backfaceCulling" + ] + viewport_defaults = {} + for key in keys: + viewport_defaults[key] = cmds.modelEditor( + instance.data["panel"], query=True, **{key: True} + ) + if preset["viewport_options"][key]: + cmds.modelEditor( + instance.data["panel"], edit=True, **{key: True} + ) - # Force viewer to False in call to capture because we have our own - # viewer opening call to allow a signal to trigger between - # playblast and viewer - preset['viewer'] = False + override_viewport_options = ( + capture_preset["Viewport Options"]["override_viewport_options"] + ) - self.log.info('using viewport preset: {}'.format(preset)) + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between + # playblast and viewer + preset["viewer"] = False - # Update preset with current panel setting - # if override_viewport_options is turned off - if not override_viewport_options: - panel = cmds.getPanel(withFocus=True) - panel_preset = capture.parse_active_view() - preset.update(panel_preset) - cmds.setFocus(panel) + # Update preset with current panel setting + # if override_viewport_options is turned off + if not override_viewport_options: + panel_preset = capture.parse_view(instance.data["panel"]) + panel_preset.pop("camera") + preset.update(panel_preset) - path = capture.capture(log=self.log, **preset) + # Need to ensure Python 2 compatibility. + # TODO: Remove once dropping Python 2. + if getattr(contextlib, "nested", None): + # Python 3 compatibility. + with contextlib.nested( + lib.maintained_time(), + panel_camera(instance.data["panel"], preset["camera"]) + ): + self._capture(preset) + else: + # Python 2 compatibility. + with contextlib.ExitStack() as stack: + stack.enter_context(lib.maintained_time()) + stack.enter_context( + panel_camera(instance.data["panel"], preset["camera"]) + ) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom) + self._capture(preset) - self.log.debug("playblast path {}".format(path)) + # Restoring viewport options. + if viewport_defaults: + cmds.modelEditor( + instance.data["panel"], edit=True, **viewport_defaults + ) + + try: + cmds.setAttr( + "{}.panZoomEnabled".format(preset["camera"]), pan_zoom) + except RuntimeError: + self.log.warning("Cannot restore Pan/Zoom settings.") collected_files = os.listdir(stagingdir) patterns = [clique.PATTERNS["frames"]] @@ -149,10 +229,11 @@ class ExtractPlayblast(publish.Extractor): minimum_items=1, patterns=patterns) + filename = preset.get("filename", "%TEMP%") self.log.debug("filename {}".format(filename)) frame_collection = None for collection in collections: - filebase = collection.format('{head}').rstrip(".") + filebase = collection.format("{head}").rstrip(".") self.log.debug("collection head {}".format(filebase)) if filebase in filename: frame_collection = collection @@ -168,7 +249,7 @@ class ExtractPlayblast(publish.Extractor): tags.append("delete") # Add camera node name to representation data - camera_node_name = pm.ls(camera)[0].getTransform().name() + camera_node_name = cmds.listRelatives(camera, parent=True)[0] collected_files = list(frame_collection) # single frame file shouldn't be in list, only as a string @@ -176,15 +257,14 @@ class ExtractPlayblast(publish.Extractor): collected_files = collected_files[0] representation = { - 'name': 'png', - 'ext': 'png', - 'files': collected_files, + "name": capture_preset["Codec"]["compression"], + "ext": capture_preset["Codec"]["compression"], + "files": collected_files, "stagingDir": stagingdir, "frameStart": start, "frameEnd": end, - 'fps': fps, - 'preview': True, - 'tags': tags, - 'camera_name': camera_node_name + "fps": fps, + "tags": tags, + "camera_name": camera_node_name } instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_pointcache.py b/openpype/hosts/maya/plugins/publish/extract_pointcache.py index 7ed73fd5b0..f44c13767c 100644 --- a/openpype/hosts/maya/plugins/publish/extract_pointcache.py +++ b/openpype/hosts/maya/plugins/publish/extract_pointcache.py @@ -23,9 +23,7 @@ class ExtractAlembic(publish.Extractor): label = "Extract Pointcache (Alembic)" hosts = ["maya"] - families = ["pointcache", - "model", - "vrayproxy"] + families = ["pointcache", "model", "vrayproxy.alembic"] targets = ["local", "remote"] def process(self, instance): @@ -41,6 +39,7 @@ class ExtractAlembic(publish.Extractor): attrs = instance.data.get("attr", "").split(";") attrs = [value for value in attrs if value.strip()] + attrs += instance.data.get("userDefinedAttributes", []) attrs += ["cbId"] attr_prefixes = instance.data.get("attrPrefix", "").split(";") @@ -87,6 +86,7 @@ class ExtractAlembic(publish.Extractor): end=end)) suspend = not instance.data.get("refresh", False) + self.log.info(nodes) with suspended_refresh(suspend=suspend): with maintained_selection(): cmds.select(nodes, noExpand=True) @@ -101,9 +101,9 @@ class ExtractAlembic(publish.Extractor): instance.data["representations"] = [] representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, + "name": "abc", + "ext": "abc", + "files": filename, "stagingDir": dirname } instance.data["representations"].append(representation) @@ -112,6 +112,37 @@ class ExtractAlembic(publish.Extractor): self.log.info("Extracted {} to {}".format(instance, dirname)) + # Extract proxy. + if not instance.data.get("proxy"): + self.log.info("No proxy nodes found. Skipping proxy extraction.") + return + + path = path.replace(".abc", "_proxy.abc") + if not instance.data.get("includeParentHierarchy", True): + # Set the root nodes if we don't want to include parents + # The roots are to be considered the ones that are the actual + # direct members of the set + options["root"] = instance.data["proxyRoots"] + + with suspended_refresh(suspend=suspend): + with maintained_selection(): + cmds.select(instance.data["proxy"]) + extract_alembic( + file=path, + startFrame=start, + endFrame=end, + **options + ) + + representation = { + "name": "proxy", + "ext": "abc", + "files": os.path.basename(path), + "stagingDir": dirname, + "outputName": "proxy" + } + instance.data["representations"].append(representation) + def get_members_and_roots(self, instance): return instance[:], instance.data.get("setMembers") diff --git a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py index 1edafeb926..4160ac4cb2 100644 --- a/openpype/hosts/maya/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/maya/plugins/publish/extract_thumbnail.py @@ -1,6 +1,7 @@ import os import glob import tempfile +import json import capture @@ -8,7 +9,6 @@ from openpype.pipeline import publish from openpype.hosts.maya.api import lib from maya import cmds -import pymel.core as pm class ExtractThumbnail(publish.Extractor): @@ -26,28 +26,31 @@ class ExtractThumbnail(publish.Extractor): def process(self, instance): self.log.info("Extracting capture..") - camera = instance.data['review_camera'] + camera = instance.data["review_camera"] - capture_preset = ( - instance.context.data["project_settings"]['maya']['publish']['ExtractPlayblast']['capture_preset'] + task_data = instance.data["anatomyData"].get("task", {}) + capture_preset = lib.get_capture_preset( + task_data.get("name"), + task_data.get("type"), + instance.data["subset"], + instance.context.data["project_settings"], + self.log ) + + preset = lib.load_capture_preset(data=capture_preset) + + # "isolate_view" will already have been applied at creation, so we'll + # ignore it here. + preset.pop("isolate_view") + override_viewport_options = ( - capture_preset['Viewport Options']['override_viewport_options'] + capture_preset["Viewport Options"]["override_viewport_options"] ) - try: - preset = lib.load_capture_preset(data=capture_preset) - except KeyError as ke: - self.log.error('Error loading capture presets: {}'.format(str(ke))) - preset = {} - self.log.info('Using viewport preset: {}'.format(preset)) - - # preset["off_screen"] = False - - preset['camera'] = camera - preset['start_frame'] = instance.data["frameStart"] - preset['end_frame'] = instance.data["frameStart"] - preset['camera_options'] = { + preset["camera"] = camera + preset["start_frame"] = instance.data["frameStart"] + preset["end_frame"] = instance.data["frameStart"] + preset["camera_options"] = { "displayGateMask": False, "displayResolution": False, "displayFilmGate": False, @@ -59,10 +62,9 @@ class ExtractThumbnail(publish.Extractor): "overscan": 1.0, "depthOfField": cmds.getAttr("{0}.depthOfField".format(camera)), } - capture_presets = capture_preset # Set resolution variables from capture presets - width_preset = capture_presets["Resolution"]["width"] - height_preset = capture_presets["Resolution"]["height"] + width_preset = capture_preset["Resolution"]["width"] + height_preset = capture_preset["Resolution"]["height"] # Set resolution variables from asset values asset_data = instance.data["assetEntity"]["data"] asset_width = asset_data.get("resolutionWidth") @@ -74,14 +76,14 @@ class ExtractThumbnail(publish.Extractor): # used, if not then the asset resolution is # used if review_instance_width and review_instance_height: - preset['width'] = review_instance_width - preset['height'] = review_instance_height + preset["width"] = review_instance_width + preset["height"] = review_instance_height elif width_preset and height_preset: - preset['width'] = width_preset - preset['height'] = height_preset + preset["width"] = width_preset + preset["height"] = height_preset elif asset_width and asset_height: - preset['width'] = asset_width - preset['height'] = asset_height + preset["width"] = asset_width + preset["height"] = asset_height # Create temp directory for thumbnail # - this is to avoid "override" of source file @@ -96,14 +98,18 @@ class ExtractThumbnail(publish.Extractor): self.log.info("Outputting images to %s" % path) - preset['filename'] = path - preset['overwrite'] = True + preset["filename"] = path + preset["overwrite"] = True - pm.refresh(f=True) + cmds.refresh(force=True) - refreshFrameInt = int(pm.playbackOptions(q=True, minTime=True)) - pm.currentTime(refreshFrameInt - 1, edit=True) - pm.currentTime(refreshFrameInt, edit=True) + refreshFrameInt = int(cmds.playbackOptions(q=True, minTime=True)) + cmds.currentTime(refreshFrameInt - 1, edit=True) + cmds.currentTime(refreshFrameInt, edit=True) + + # Use displayLights setting from instance + key = "displayLights" + preset["viewport_options"][key] = instance.data[key] # Override transparency if requested. transparency = instance.data.get("transparency", 0) @@ -111,8 +117,9 @@ class ExtractThumbnail(publish.Extractor): preset["viewport2_options"]["transparencyAlgorithm"] = transparency # Isolate view is requested by having objects in the set besides a - # camera. - if preset.pop("isolate_view", False) and instance.data.get("isolate"): + # camera. If there is only 1 member it'll be the camera because we + # validate to have 1 camera only. + if instance.data["isolate"] and len(instance.data["setMembers"]) > 1: preset["isolate"] = instance.data["setMembers"] # Show or Hide Image Plane @@ -123,39 +130,44 @@ class ExtractThumbnail(publish.Extractor): preset["viewport_options"] = {"imagePlane": image_plane} # Disable Pan/Zoom. - pan_zoom = cmds.getAttr("{}.panZoomEnabled".format(preset["camera"])) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), False) + preset.pop("pan_zoom", None) + preset["camera_options"]["panZoomEnabled"] = instance.data["panZoom"] with lib.maintained_time(): # Force viewer to False in call to capture because we have our own # viewer opening call to allow a signal to trigger between # playblast and viewer - preset['viewer'] = False + preset["viewer"] = False # Update preset with current panel setting # if override_viewport_options is turned off - if not override_viewport_options: - panel = cmds.getPanel(withFocus=True) + panel = cmds.getPanel(withFocus=True) or "" + if not override_viewport_options and "modelPanel" in panel: panel_preset = capture.parse_active_view() preset.update(panel_preset) cmds.setFocus(panel) + if os.environ.get("OPENPYPE_DEBUG") == "1": + self.log.debug( + "Using preset: {}".format( + json.dumps(preset, indent=4, sort_keys=True) + ) + ) + path = capture.capture(**preset) playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) - cmds.setAttr("{}.panZoomEnabled".format(preset["camera"]), pan_zoom) - self.log.info("file list {}".format(thumbnail)) if "representations" not in instance.data: instance.data["representations"] = [] representation = { - 'name': 'thumbnail', - 'ext': 'jpg', - 'files': thumbnail, + "name": "thumbnail", + "ext": "jpg", + "files": thumbnail, "stagingDir": dst_staging, "thumbnail": True } diff --git a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py index 38bf02245a..df16c6c357 100644 --- a/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py +++ b/openpype/hosts/maya/plugins/publish/extract_vrayproxy.py @@ -16,7 +16,7 @@ class ExtractVRayProxy(publish.Extractor): label = "VRay Proxy (.vrmesh)" hosts = ["maya"] - families = ["vrayproxy"] + families = ["vrayproxy.vrmesh"] def process(self, instance): @@ -30,9 +30,7 @@ class ExtractVRayProxy(publish.Extractor): # non-animated subsets keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "frameStartHandle", "frameEndHandle", - # Backwards compatibility - "handles"] + "frameStartHandle", "frameEndHandle"] for key in keys: instance.data.pop(key, None) diff --git a/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py b/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py new file mode 100644 index 0000000000..20e1bd37d8 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_workfile_xgen.py @@ -0,0 +1,250 @@ +import os +import shutil +import copy + +from maya import cmds + +import pyblish.api +from openpype.hosts.maya.api.lib import extract_alembic +from openpype.pipeline import publish +from openpype.lib import StringTemplate + + +class ExtractWorkfileXgen(publish.Extractor): + """Extract Workfile Xgen. + + When submitting a render, we need to prep Xgen side car files. + """ + + # Offset to run before workfile scene save. + order = pyblish.api.ExtractorOrder - 0.499 + label = "Extract Workfile Xgen" + families = ["workfile"] + hosts = ["maya"] + + def get_render_max_frame_range(self, context): + """Return start to end frame range including all renderlayers in + context. + + This will return the full frame range which includes all frames of the + renderlayer instances to be published/submitted. + + Args: + context (pyblish.api.Context): Current publishing context. + + Returns: + tuple or None: Start frame, end frame tuple if any renderlayers + found. Otherwise None is returned. + + """ + + def _is_active_renderlayer(i): + """Return whether instance is active renderlayer""" + if not i.data.get("publish", True): + return False + + is_renderlayer = ( + "renderlayer" in i.data.get("families", []) or + i.data["family"] == "renderlayer" + ) + return is_renderlayer + + start_frame = None + end_frame = None + for instance in context: + if not _is_active_renderlayer(instance): + # Only consider renderlyare instances + continue + + render_start_frame = instance.data["frameStart"] + render_end_frame = instance.data["frameStart"] + + if start_frame is None: + start_frame = render_start_frame + else: + start_frame = min(start_frame, render_start_frame) + + if end_frame is None: + end_frame = render_end_frame + else: + end_frame = max(end_frame, render_end_frame) + + if start_frame is None or end_frame is None: + return + + return start_frame, end_frame + + def process(self, instance): + transfers = [] + + # Validate there is any palettes in the scene. + if not cmds.ls(type="xgmPalette"): + self.log.debug( + "No collections found in the scene. Skipping Xgen extraction." + ) + return + + import xgenm + + # Validate to extract only when we are publishing a renderlayer as + # well. + render_range = self.get_render_max_frame_range(instance.context) + if not render_range: + self.log.debug( + "No publishable renderlayers found in context. Skipping Xgen" + " extraction." + ) + return + + start_frame, end_frame = render_range + + # We decrement start frame and increment end frame so motion blur will + # render correctly. + start_frame -= 1 + end_frame += 1 + + # Extract patches alembic. + path_no_ext, _ = os.path.splitext(instance.context.data["currentFile"]) + kwargs = {"attrPrefix": ["xgen"], "stripNamespaces": True} + alembic_files = [] + for palette in cmds.ls(type="xgmPalette"): + patch_names = [] + for description in xgenm.descriptions(palette): + for name in xgenm.boundGeometry(palette, description): + patch_names.append(name) + + alembic_file = "{}__{}.abc".format( + path_no_ext, palette.replace(":", "__ns__") + ) + extract_alembic( + alembic_file, + root=patch_names, + selection=False, + startFrame=float(start_frame), + endFrame=float(end_frame), + verbose=True, + **kwargs + ) + alembic_files.append(alembic_file) + + template_data = copy.deepcopy(instance.data["anatomyData"]) + published_maya_path = StringTemplate( + instance.context.data["anatomy"].templates["publish"]["file"] + ).format(template_data) + published_basename, _ = os.path.splitext(published_maya_path) + + for source in alembic_files: + destination = os.path.join( + os.path.dirname(instance.data["resourcesDir"]), + os.path.basename( + source.replace(path_no_ext, published_basename) + ) + ) + transfers.append((source, destination)) + + # Validate that we are using the published workfile. + deadline_settings = instance.context.get("deadline") + if deadline_settings: + publish_settings = deadline_settings["publish"] + if not publish_settings["MayaSubmitDeadline"]["use_published"]: + self.log.debug( + "Not using the published workfile. Abort Xgen extraction." + ) + return + + # Collect Xgen and Delta files. + xgen_files = [] + sources = [] + current_dir = os.path.dirname(instance.context.data["currentFile"]) + attrs = ["xgFileName", "xgBaseFile"] + for palette in cmds.ls(type="xgmPalette"): + for attr in attrs: + source = os.path.join( + current_dir, cmds.getAttr(palette + "." + attr) + ) + if not os.path.exists(source): + continue + + ext = os.path.splitext(source)[1] + if ext == ".xgen": + xgen_files.append(source) + if ext == ".xgd": + sources.append(source) + + # Copy .xgen file to temporary location and modify. + staging_dir = self.staging_dir(instance) + for source in xgen_files: + destination = os.path.join(staging_dir, os.path.basename(source)) + shutil.copy(source, destination) + + lines = [] + with open(destination, "r") as f: + for line in [line.rstrip() for line in f]: + if line.startswith("\txgProjectPath"): + path = os.path.dirname(instance.data["resourcesDir"]) + line = "\txgProjectPath\t\t{}/".format( + path.replace("\\", "/") + ) + + lines.append(line) + + with open(destination, "w") as f: + f.write("\n".join(lines)) + + sources.append(destination) + + # Add resource files to workfile instance. + for source in sources: + basename = os.path.basename(source) + destination = os.path.join( + os.path.dirname(instance.data["resourcesDir"]), basename + ) + transfers.append((source, destination)) + + destination_dir = os.path.join( + instance.data["resourcesDir"], "collections" + ) + for palette in cmds.ls(type="xgmPalette"): + project_path = xgenm.getAttr("xgProjectPath", palette) + data_path = xgenm.getAttr("xgDataPath", palette) + data_path = data_path.replace("${PROJECT}", project_path) + for path in data_path.split(";"): + for root, _, files in os.walk(path): + for f in files: + source = os.path.join(root, f) + destination = "{}/{}{}".format( + destination_dir, + palette.replace(":", "__ns__"), + source.replace(path, "") + ) + transfers.append((source, destination)) + + for source, destination in transfers: + self.log.debug("Transfer: {} > {}".format(source, destination)) + + instance.data["transfers"] = transfers + + # Set palette attributes in preparation for workfile publish. + attrs = {"xgFileName": None, "xgBaseFile": ""} + data = {} + for palette in cmds.ls(type="xgmPalette"): + attrs["xgFileName"] = "resources/{}.xgen".format( + palette.replace(":", "__ns__") + ) + for attr, value in attrs.items(): + node_attr = palette + "." + attr + + old_value = cmds.getAttr(node_attr) + try: + data[palette][attr] = old_value + except KeyError: + data[palette] = {attr: old_value} + + cmds.setAttr(node_attr, value, type="string") + self.log.info( + "Setting \"{}\" on \"{}\"".format(value, node_attr) + ) + + cmds.setAttr(palette + "." + "xgExportAsDelta", False) + + instance.data["xgenAttributes"] = data diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen.py b/openpype/hosts/maya/plugins/publish/extract_xgen.py new file mode 100644 index 0000000000..fb097ca84a --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/extract_xgen.py @@ -0,0 +1,143 @@ +import os +import copy +import tempfile + +from maya import cmds +import xgenm + +from openpype.pipeline import publish +from openpype.hosts.maya.api.lib import ( + maintained_selection, attribute_values, write_xgen_file, delete_after +) +from openpype.lib import StringTemplate + + +class ExtractXgen(publish.Extractor): + """Extract Xgen + + Workflow: + - Duplicate nodes used for patches. + - Export palette and import onto duplicate nodes. + - Export/Publish duplicate nodes and palette. + - Export duplicate palette to .xgen file and add to publish. + - Publish all xgen files as resources. + """ + + label = "Extract Xgen" + hosts = ["maya"] + families = ["xgen"] + scene_type = "ma" + + def process(self, instance): + if "representations" not in instance.data: + instance.data["representations"] = [] + + staging_dir = self.staging_dir(instance) + maya_filename = "{}.{}".format(instance.data["name"], self.scene_type) + maya_filepath = os.path.join(staging_dir, maya_filename) + + # Get published xgen file name. + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({"ext": "xgen"}) + templates = instance.context.data["anatomy"].templates["publish"] + xgen_filename = StringTemplate(templates["file"]).format(template_data) + + xgen_path = os.path.join( + self.staging_dir(instance), xgen_filename + ).replace("\\", "/") + type = "mayaAscii" if self.scene_type == "ma" else "mayaBinary" + + # Duplicate xgen setup. + with delete_after() as delete_bin: + duplicate_nodes = [] + # Collect nodes to export. + for _, connections in instance.data["xgenConnections"].items(): + transform_name = connections["transform"].split(".")[0] + + # Duplicate_transform subd patch geometry. + duplicate_transform = cmds.duplicate(transform_name)[0] + delete_bin.append(duplicate_transform) + + # Discard the children. + shapes = cmds.listRelatives(duplicate_transform, shapes=True) + children = cmds.listRelatives( + duplicate_transform, children=True + ) + cmds.delete(set(children) - set(shapes)) + + if cmds.listRelatives(duplicate_transform, parent=True): + duplicate_transform = cmds.parent( + duplicate_transform, world=True + )[0] + + duplicate_nodes.append(duplicate_transform) + + # Export temp xgen palette files. + temp_xgen_path = os.path.join( + tempfile.gettempdir(), "temp.xgen" + ).replace("\\", "/") + xgenm.exportPalette( + instance.data["xgmPalette"].replace("|", ""), temp_xgen_path + ) + self.log.info("Extracted to {}".format(temp_xgen_path)) + + # Import xgen onto the duplicate. + with maintained_selection(): + cmds.select(duplicate_nodes) + palette = xgenm.importPalette(temp_xgen_path, []) + + delete_bin.append(palette) + + # Export duplicated palettes. + xgenm.exportPalette(palette, xgen_path) + + # Export Maya file. + attribute_data = {"{}.xgFileName".format(palette): xgen_filename} + with attribute_values(attribute_data): + with maintained_selection(): + cmds.select(duplicate_nodes + [palette]) + cmds.file( + maya_filepath, + force=True, + type=type, + exportSelected=True, + preserveReferences=False, + constructionHistory=True, + shader=True, + constraints=True, + expressions=True + ) + + self.log.info("Extracted to {}".format(maya_filepath)) + + if os.path.exists(temp_xgen_path): + os.remove(temp_xgen_path) + + data = { + "xgDataPath": os.path.join( + instance.data["resourcesDir"], + "collections", + palette.replace(":", "__ns__") + ).replace("\\", "/"), + "xgProjectPath": os.path.dirname( + instance.data["resourcesDir"] + ).replace("\\", "/") + } + write_xgen_file(data, xgen_path) + + # Adding representations. + representation = { + "name": "xgen", + "ext": "xgen", + "files": xgen_filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) + + representation = { + "name": self.scene_type, + "ext": self.scene_type, + "files": maya_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation) diff --git a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py b/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py deleted file mode 100644 index 77350f343e..0000000000 --- a/openpype/hosts/maya/plugins/publish/extract_xgen_cache.py +++ /dev/null @@ -1,64 +0,0 @@ -import os - -from maya import cmds - -from openpype.pipeline import publish -from openpype.hosts.maya.api.lib import ( - suspended_refresh, - maintained_selection -) - - -class ExtractXgenCache(publish.Extractor): - """Produce an alembic of just xgen interactive groom - - """ - - label = "Extract Xgen ABC Cache" - hosts = ["maya"] - families = ["xgen"] - optional = True - - def process(self, instance): - - # Collect the out set nodes - out_descriptions = [node for node in instance - if cmds.nodeType(node) == "xgmSplineDescription"] - - start = 1 - end = 1 - - self.log.info("Extracting Xgen Cache..") - dirname = self.staging_dir(instance) - - parent_dir = self.staging_dir(instance) - filename = "{name}.abc".format(**instance.data) - path = os.path.join(parent_dir, filename) - - with suspended_refresh(): - with maintained_selection(): - command = ( - '-file ' - + path - + ' -df "ogawa" -fr ' - + str(start) - + ' ' - + str(end) - + ' -step 1 -mxf -wfw' - ) - for desc in out_descriptions: - command += (" -obj " + desc) - cmds.xgmSplineCache(export=True, j=command) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - 'name': 'abc', - 'ext': 'abc', - 'files': filename, - "stagingDir": dirname, - } - instance.data["representations"].append(representation) - - self.log.info("Extracted {} to {}".format(instance, dirname)) diff --git a/openpype/hosts/maya/plugins/publish/help/validate_review_subset_uniqueness.xml b/openpype/hosts/maya/plugins/publish/help/validate_review_subset_uniqueness.xml deleted file mode 100644 index fd1bf4cbaa..0000000000 --- a/openpype/hosts/maya/plugins/publish/help/validate_review_subset_uniqueness.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - Review subsets not unique - - ## Non unique subset name found - - Non unique subset names: '{non_unique}' - - ### __Detailed Info__ (optional) - - This might happen if you already published for this asset - review subset with legacy name {task}Review. - This legacy name limits possibility of publishing of multiple - reviews from a single workfile. Proper review subset name should - now - contain variant also (as 'Main', 'Default' etc.). That would - result in completely new subset though, so this situation must - be handled manually. - - ### How to repair? - - Legacy subsets must be removed from Openpype DB, please ask admin - to do that. Please provide them asset and subset names. - - - - \ No newline at end of file diff --git a/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py b/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py new file mode 100644 index 0000000000..d8e8554b68 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/reset_xgen_attributes.py @@ -0,0 +1,36 @@ +from maya import cmds + +import pyblish.api + + +class ResetXgenAttributes(pyblish.api.InstancePlugin): + """Reset Xgen attributes. + + When the incremental save of the workfile triggers, the Xgen attributes + changes so this plugin will change it back to the values before publishing. + """ + + label = "Reset Xgen Attributes." + # Offset to run after workfile increment plugin. + order = pyblish.api.IntegratorOrder + 10.0 + families = ["workfile"] + + def process(self, instance): + xgen_attributes = instance.data.get("xgenAttributes", {}) + if not xgen_attributes: + return + + for palette, data in xgen_attributes.items(): + for attr, value in data.items(): + node_attr = "{}.{}".format(palette, attr) + self.log.info( + "Setting \"{}\" on \"{}\"".format(value, node_attr) + ) + cmds.setAttr(node_attr, value, type="string") + cmds.setAttr(palette + ".xgExportAsDelta", True) + + # Need to save the scene, cause the attribute changes above does not + # mark the scene as modified so user can exit without committing the + # changes. + self.log.info("Saving changes.") + cmds.file(save=True) diff --git a/openpype/hosts/maya/plugins/publish/save_scene.py b/openpype/hosts/maya/plugins/publish/save_scene.py index 45e62e7b44..495c339731 100644 --- a/openpype/hosts/maya/plugins/publish/save_scene.py +++ b/openpype/hosts/maya/plugins/publish/save_scene.py @@ -31,5 +31,5 @@ class SaveCurrentScene(pyblish.api.ContextPlugin): # remove lockfile before saving if is_workfile_lock_enabled("maya", project_name, project_settings): remove_workfile_lock(current) - self.log.info("Saving current file..") + self.log.info("Saving current file: {}".format(current)) cmds.file(save=True, force=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py new file mode 100644 index 0000000000..7055dc145e --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source.py @@ -0,0 +1,122 @@ +import pyblish.api +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishValidationError +) + + +class ValidateArnoldSceneSource(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source. + + We require at least 1 root node/parent for the meshes. This is to ensure we + can duplicate the nodes and preserve the names. + + If using proxies we need the nodes to share the same names and not be + parent to the world. This ends up needing at least two groups with content + nodes and proxy nodes in another. + """ + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass"] + label = "Validate Arnold Scene Source" + + def _get_nodes_by_name(self, nodes): + ungrouped_nodes = [] + nodes_by_name = {} + parents = [] + same_named_nodes = {} + for node in nodes: + node_split = node.split("|") + if len(node_split) == 2: + ungrouped_nodes.append(node) + + parent = "|".join(node_split[:-1]) + if parent: + parents.append(parent) + + node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + + # Check for same same nodes, which can happen in different + # hierarchies. + if node_name in nodes_by_name: + try: + same_named_nodes[node_name].append(node) + except KeyError: + same_named_nodes[node_name] = [ + nodes_by_name[node_name], node + ] + + nodes_by_name[node_name] = node + + if same_named_nodes: + message = "Found nodes with the same name:" + for name, nodes in same_named_nodes.items(): + message += "\n\n\"{}\":\n{}".format(name, "\n".join(nodes)) + + raise PublishValidationError(message) + + return ungrouped_nodes, nodes_by_name, parents + + def process(self, instance): + ungrouped_nodes = [] + + nodes, content_nodes_by_name, content_parents = ( + self._get_nodes_by_name(instance.data["contentMembers"]) + ) + ungrouped_nodes.extend(nodes) + + nodes, proxy_nodes_by_name, proxy_parents = self._get_nodes_by_name( + instance.data.get("proxy", []) + ) + ungrouped_nodes.extend(nodes) + + # Validate against nodes directly parented to world. + if ungrouped_nodes: + raise PublishValidationError( + "Found nodes parented to the world: {}\n" + "All nodes need to be grouped.".format(ungrouped_nodes) + ) + + # Proxy validation. + if not instance.data.get("proxy", []): + return + + # Validate for content and proxy nodes amount being the same. + if len(instance.data["contentMembers"]) != len(instance.data["proxy"]): + raise PublishValidationError( + "Amount of content nodes ({}) and proxy nodes ({}) needs to " + "be the same.".format( + len(instance.data["contentMembers"]), + len(instance.data["proxy"]) + ) + ) + + # Validate against content and proxy nodes sharing same parent. + if list(set(content_parents) & set(proxy_parents)): + raise PublishValidationError( + "Content and proxy nodes cannot share the same parent." + ) + + # Validate for content and proxy nodes sharing same names. + sorted_content_names = sorted(content_nodes_by_name.keys()) + sorted_proxy_names = sorted(proxy_nodes_by_name.keys()) + odd_content_names = list( + set(sorted_content_names) - set(sorted_proxy_names) + ) + odd_content_nodes = [ + content_nodes_by_name[x] for x in odd_content_names + ] + odd_proxy_names = list( + set(sorted_proxy_names) - set(sorted_content_names) + ) + odd_proxy_nodes = [ + proxy_nodes_by_name[x] for x in odd_proxy_names + ] + if not sorted_content_names == sorted_proxy_names: + raise PublishValidationError( + "Content and proxy nodes need to share the same names.\n" + "Content nodes not matching: {}\n" + "Proxy nodes not matching: {}".format( + odd_content_nodes, odd_proxy_nodes + ) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py new file mode 100644 index 0000000000..8ce76c8d04 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_arnold_scene_source_cbid.py @@ -0,0 +1,74 @@ +import pyblish.api +from openpype.hosts.maya.api import lib +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishValidationError, RepairAction +) + + +class ValidateArnoldSceneSourceCbid(pyblish.api.InstancePlugin): + """Validate Arnold Scene Source Cbid. + + It is required for the proxy and content nodes to share the same cbid. + """ + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["ass"] + label = "Validate Arnold Scene Source CBID" + actions = [RepairAction] + + @staticmethod + def _get_nodes_by_name(nodes): + nodes_by_name = {} + for node in nodes: + node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + nodes_by_name[node_name] = node + + return nodes_by_name + + @classmethod + def get_invalid_couples(cls, instance): + content_nodes_by_name = cls._get_nodes_by_name( + instance.data["contentMembers"] + ) + proxy_nodes_by_name = cls._get_nodes_by_name( + instance.data.get("proxy", []) + ) + + invalid_couples = [] + for content_name, content_node in content_nodes_by_name.items(): + proxy_node = proxy_nodes_by_name.get(content_name, None) + + if not proxy_node: + cls.log.debug( + "Content node '{}' has no matching proxy node.".format( + content_node + ) + ) + continue + + content_id = lib.get_id(content_node) + proxy_id = lib.get_id(proxy_node) + if content_id != proxy_id: + invalid_couples.append((content_node, proxy_node)) + + return invalid_couples + + def process(self, instance): + # Proxy validation. + if not instance.data.get("proxy", []): + return + + # Validate for proxy nodes sharing the same cbId as content nodes. + invalid_couples = self.get_invalid_couples(instance) + if invalid_couples: + raise PublishValidationError( + "Found proxy nodes with mismatching cbid:\n{}".format( + invalid_couples + ) + ) + + @classmethod + def repair(cls, instance): + for content_node, proxy_node in cls.get_invalid_couples(instance): + lib.set_id(proxy_node, lib.get_id(content_node), overwrite=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py index ac6ce4d22d..6975d583bb 100644 --- a/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py +++ b/openpype/hosts/maya/plugins/publish/validate_ass_relative_paths.py @@ -2,11 +2,13 @@ import os import types import maya.cmds as cmds +from mtoa.core import createOptions import pyblish.api from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, + PublishValidationError ) @@ -34,8 +36,9 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): "defaultArnoldRenderOptions.pspath" ) except ValueError: - assert False, ("Can not validate, render setting were not opened " - "yet so Arnold setting cannot be validate") + raise PublishValidationError( + "Default Arnold options has not been created yet." + ) scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) @@ -66,6 +69,8 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): @classmethod def repair(cls, instance): + createOptions() + texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath") procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath") diff --git a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py b/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py index e8087a304f..d1bca4091b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py +++ b/openpype/hosts/maya/plugins/publish/validate_assembly_transforms.py @@ -93,12 +93,12 @@ class ValidateAssemblyModelTransforms(pyblish.api.InstancePlugin): from openpype.hosts.maya.api import lib # Store namespace in variable, cosmetics thingy - messagebox = QtWidgets.QMessageBox - mode = messagebox.StandardButton.Ok | messagebox.StandardButton.Cancel - choice = messagebox.warning(None, - "Matrix reset", - cls.prompt_message, - mode) + choice = QtWidgets.QMessageBox.warning( + None, + "Matrix reset", + cls.prompt_message, + QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel + ) invalid = cls.get_invalid(instance) if not invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_attributes.py b/openpype/hosts/maya/plugins/publish/validate_attributes.py index 136c38bc1d..7ebd9d7d03 100644 --- a/openpype/hosts/maya/plugins/publish/validate_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_attributes.py @@ -1,13 +1,17 @@ -import pymel.core as pm +from collections import defaultdict + +from maya import cmds import pyblish.api + +from openpype.hosts.maya.api.lib import set_attribute from openpype.pipeline.publish import ( - RepairContextAction, + RepairAction, ValidateContentsOrder, ) -class ValidateAttributes(pyblish.api.ContextPlugin): +class ValidateAttributes(pyblish.api.InstancePlugin): """Ensure attributes are consistent. Attributes to validate and their values comes from the @@ -22,91 +26,85 @@ class ValidateAttributes(pyblish.api.ContextPlugin): order = ValidateContentsOrder label = "Attributes" hosts = ["maya"] - actions = [RepairContextAction] + actions = [RepairAction] optional = True attributes = None - def process(self, context): + def process(self, instance): # Check for preset existence. - if not self.attributes: return - invalid = self.get_invalid(context, compute=True) + invalid = self.get_invalid(instance, compute=True) if invalid: raise RuntimeError( "Found attributes with invalid values: {}".format(invalid) ) @classmethod - def get_invalid(cls, context, compute=False): - invalid = context.data.get("invalid_attributes", []) + def get_invalid(cls, instance, compute=False): if compute: - invalid = cls.get_invalid_attributes(context) - - return invalid + return cls.get_invalid_attributes(instance) + else: + return instance.data.get("invalid_attributes", []) @classmethod - def get_invalid_attributes(cls, context): + def get_invalid_attributes(cls, instance): invalid_attributes = [] - for instance in context: - # Filter publisable instances. - if not instance.data["publish"]: + + # Filter families. + families = [instance.data["family"]] + families += instance.data.get("families", []) + families = set(families) & set(cls.attributes.keys()) + if not families: + return [] + + # Get all attributes to validate. + attributes = defaultdict(dict) + for family in families: + if family not in cls.attributes: + # No attributes to validate for family continue - # Filter families. - families = [instance.data["family"]] - families += instance.data.get("families", []) - families = list(set(families) & set(self.attributes.keys())) - if not families: + for preset_attr, preset_value in cls.attributes[family].items(): + node_name, attribute_name = preset_attr.split(".", 1) + attributes[node_name][attribute_name] = preset_value + + if not attributes: + return [] + + # Get invalid attributes. + nodes = cmds.ls(long=True) + for node in nodes: + node_name = node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + if node_name not in attributes: continue - # Get all attributes to validate. - attributes = {} - for family in families: - for preset in self.attributes[family]: - [node_name, attribute_name] = preset.split(".") - try: - attributes[node_name].update( - {attribute_name: self.attributes[family][preset]} - ) - except KeyError: - attributes.update({ - node_name: { - attribute_name: self.attributes[family][preset] - } - }) + for attr_name, expected in attributes[node_name].items(): - # Get invalid attributes. - nodes = pm.ls() - for node in nodes: - name = node.name(stripNamespace=True) - if name not in attributes.keys(): + # Skip if attribute does not exist + if not cmds.attributeQuery(attr_name, node=node, exists=True): continue - presets_to_validate = attributes[name] - for attribute in node.listAttr(): - names = [attribute.shortName(), attribute.longName()] - attribute_name = list( - set(names) & set(presets_to_validate.keys()) + plug = "{}.{}".format(node, attr_name) + value = cmds.getAttr(plug) + if value != expected: + invalid_attributes.append( + { + "attribute": plug, + "expected": expected, + "current": value + } ) - if attribute_name: - expected = presets_to_validate[attribute_name[0]] - if attribute.get() != expected: - invalid_attributes.append( - { - "attribute": attribute, - "expected": expected, - "current": attribute.get() - } - ) - context.data["invalid_attributes"] = invalid_attributes + instance.data["invalid_attributes"] = invalid_attributes return invalid_attributes @classmethod def repair(cls, instance): invalid = cls.get_invalid(instance) for data in invalid: - data["attribute"].set(data["expected"]) + node, attr = data["attribute"].split(".", 1) + value = data["expected"] + set_attribute(node=node, attribute=attr, value=value) diff --git a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py b/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py index bd1529e252..13ea53a357 100644 --- a/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_camera_attributes.py @@ -8,7 +8,7 @@ from openpype.pipeline.publish import ValidateContentsOrder class ValidateCameraAttributes(pyblish.api.InstancePlugin): """Validates Camera has no invalid attribute keys or values. - The Alembic file format does not a specifc subset of attributes as such + The Alembic file format does not a specific subset of attributes as such we validate that no values are set there as the output will not match the current scene. For example the preScale, film offsets and film roll. diff --git a/openpype/hosts/maya/plugins/publish/validate_color_sets.py b/openpype/hosts/maya/plugins/publish/validate_color_sets.py index 905417bafa..7ce3cca61a 100644 --- a/openpype/hosts/maya/plugins/publish/validate_color_sets.py +++ b/openpype/hosts/maya/plugins/publish/validate_color_sets.py @@ -19,7 +19,6 @@ class ValidateColorSets(pyblish.api.Validator): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh ColorSets' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_frame_range.py b/openpype/hosts/maya/plugins/publish/validate_frame_range.py index d86925184e..ccb351c880 100644 --- a/openpype/hosts/maya/plugins/publish/validate_frame_range.py +++ b/openpype/hosts/maya/plugins/publish/validate_frame_range.py @@ -4,6 +4,7 @@ from maya import cmds from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, + PublishValidationError ) from openpype.hosts.maya.api.lib_rendersetup import ( get_attr_overrides, @@ -49,7 +50,6 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): frame_start_handle = int(context.data.get("frameStartHandle")) frame_end_handle = int(context.data.get("frameEndHandle")) - handles = int(context.data.get("handles")) handle_start = int(context.data.get("handleStart")) handle_end = int(context.data.get("handleEnd")) frame_start = int(context.data.get("frameStart")) @@ -57,36 +57,51 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): inst_start = int(instance.data.get("frameStartHandle")) inst_end = int(instance.data.get("frameEndHandle")) + inst_frame_start = int(instance.data.get("frameStart")) + inst_frame_end = int(instance.data.get("frameEnd")) + inst_handle_start = int(instance.data.get("handleStart")) + inst_handle_end = int(instance.data.get("handleEnd")) # basic sanity checks assert frame_start_handle <= frame_end_handle, ( "start frame is lower then end frame") - assert handles >= 0, ("handles cannot have negative values") - # compare with data on instance errors = [] if [ef for ef in self.exclude_families if instance.data["family"] in ef]: return - if(inst_start != frame_start_handle): + if (inst_start != frame_start_handle): errors.append("Instance start frame [ {} ] doesn't " - "match the one set on instance [ {} ]: " + "match the one set on asset [ {} ]: " "{}/{}/{}/{} (handle/start/end/handle)".format( inst_start, frame_start_handle, handle_start, frame_start, frame_end, handle_end )) - if(inst_end != frame_end_handle): + if (inst_end != frame_end_handle): errors.append("Instance end frame [ {} ] doesn't " - "match the one set on instance [ {} ]: " + "match the one set on asset [ {} ]: " "{}/{}/{}/{} (handle/start/end/handle)".format( inst_end, frame_end_handle, handle_start, frame_start, frame_end, handle_end )) + checks = { + "frame start": (frame_start, inst_frame_start), + "frame end": (frame_end, inst_frame_end), + "handle start": (handle_start, inst_handle_start), + "handle end": (handle_end, inst_handle_end) + } + for label, values in checks.items(): + if values[0] != values[1]: + errors.append( + "{} on instance ({}) does not match with the asset " + "({}).".format(label.title(), values[1], values[0]) + ) + for e in errors: self.log.error(e) diff --git a/openpype/hosts/maya/plugins/publish/validate_glsl_material.py b/openpype/hosts/maya/plugins/publish/validate_glsl_material.py new file mode 100644 index 0000000000..10c48da404 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_glsl_material.py @@ -0,0 +1,207 @@ +import os +from maya import cmds + +import pyblish.api +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder +) +from openpype.pipeline import PublishValidationError + + +class ValidateGLSLMaterial(pyblish.api.InstancePlugin): + """ + Validate if the asset uses GLSL Shader + """ + + order = ValidateContentsOrder + 0.1 + families = ['gltf'] + hosts = ['maya'] + label = 'GLSL Shader for GLTF' + actions = [RepairAction] + optional = True + active = True + + def process(self, instance): + shading_grp = self.get_material_from_shapes(instance) + if not shading_grp: + raise PublishValidationError("No shading group found") + invalid = self.get_texture_shader_invalid(instance) + if invalid: + raise PublishValidationError("Non GLSL Shader found: " + "{0}".format(invalid)) + + def get_material_from_shapes(self, instance): + shapes = cmds.ls(instance, type="mesh", long=True) + for shape in shapes: + shading_grp = cmds.listConnections(shape, + destination=True, + type="shadingEngine") + + return shading_grp or [] + + def get_texture_shader_invalid(self, instance): + + invalid = set() + shading_grp = self.get_material_from_shapes(instance) + for shading_group in shading_grp: + material_name = "{}.surfaceShader".format(shading_group) + material = cmds.listConnections(material_name, + source=True, + destination=False, + type="GLSLShader") + + if not material: + # add material name + material = cmds.listConnections(material_name)[0] + invalid.add(material) + + return list(invalid) + + @classmethod + def repair(cls, instance): + """ + Repair instance by assigning GLSL Shader + to the material + """ + cls.assign_glsl_shader(instance) + return + + @classmethod + def assign_glsl_shader(cls, instance): + """ + Converting StingrayPBS material to GLSL Shaders + for the glb export through Maya2GLTF plugin + """ + + meshes = cmds.ls(instance, type="mesh", long=True) + cls.log.info("meshes: {}".format(meshes)) + # load the glsl shader plugin + cmds.loadPlugin("glslShader", quiet=True) + + for mesh in meshes: + # create glsl shader + glsl = cmds.createNode('GLSLShader') + glsl_shading_grp = cmds.sets(name=glsl + "SG", empty=True, + renderable=True, noSurfaceShader=True) + cmds.connectAttr(glsl + ".outColor", + glsl_shading_grp + ".surfaceShader") + + # load the maya2gltf shader + ogsfx_path = instance.context.data["project_settings"]["maya"]["publish"]["ExtractGLB"]["ogsfx_path"] # noqa + if not os.path.exists(ogsfx_path): + if ogsfx_path: + # if custom ogsfx path is not specified + # the log below is the warning for the user + cls.log.warning("ogsfx shader file " + "not found in {}".format(ogsfx_path)) + + cls.log.info("Find the ogsfx shader file in " + "default maya directory...") + # re-direct to search the ogsfx path in maya_dir + ogsfx_path = os.getenv("MAYA_APP_DIR") + ogsfx_path + if not os.path.exists(ogsfx_path): + raise PublishValidationError("The ogsfx shader file does not " # noqa + "exist: {}".format(ogsfx_path)) # noqa + + cmds.setAttr(glsl + ".shader", ogsfx_path, typ="string") + # list the materials used for the assets + shading_grp = cmds.listConnections(mesh, + destination=True, + type="shadingEngine") + + # get the materials related to the selected assets + for material in shading_grp: + pbs_shader = cmds.listConnections(material, + destination=True, + type="StingrayPBS") + if pbs_shader: + cls.pbs_shader_conversion(pbs_shader, glsl) + # setting up to relink the texture if + # the mesh is with aiStandardSurface + arnold_shader = cmds.listConnections(material, + destination=True, + type="aiStandardSurface") + if arnold_shader: + cls.arnold_shader_conversion(arnold_shader, glsl) + + cmds.sets(mesh, forceElement=str(glsl_shading_grp)) + + @classmethod + def pbs_shader_conversion(cls, main_shader, glsl): + + cls.log.info("StringrayPBS detected " + "-> Can do texture conversion") + + for shader in main_shader: + # get the file textures related to the PBS Shader + albedo = cmds.listConnections(shader + + ".TEX_color_map") + if albedo: + dif_output = albedo[0] + ".outColor" + # get the glsl_shader input + # reconnect the file nodes to maya2gltf shader + glsl_dif = glsl + ".u_BaseColorTexture" + cmds.connectAttr(dif_output, glsl_dif) + + # connect orm map if there is one + orm_packed = cmds.listConnections(shader + + ".TEX_ao_map") + if orm_packed: + orm_output = orm_packed[0] + ".outColor" + + mtl = glsl + ".u_MetallicTexture" + ao = glsl + ".u_OcclusionTexture" + rough = glsl + ".u_RoughnessTexture" + + cmds.connectAttr(orm_output, mtl) + cmds.connectAttr(orm_output, ao) + cmds.connectAttr(orm_output, rough) + + # connect nrm map if there is one + nrm = cmds.listConnections(shader + + ".TEX_normal_map") + if nrm: + nrm_output = nrm[0] + ".outColor" + glsl_nrm = glsl + ".u_NormalTexture" + cmds.connectAttr(nrm_output, glsl_nrm) + + @classmethod + def arnold_shader_conversion(cls, main_shader, glsl): + cls.log.info("aiStandardSurface detected " + "-> Can do texture conversion") + + for shader in main_shader: + # get the file textures related to the PBS Shader + albedo = cmds.listConnections(shader + ".baseColor") + if albedo: + dif_output = albedo[0] + ".outColor" + # get the glsl_shader input + # reconnect the file nodes to maya2gltf shader + glsl_dif = glsl + ".u_BaseColorTexture" + cmds.connectAttr(dif_output, glsl_dif) + + orm_packed = cmds.listConnections(shader + + ".specularRoughness") + if orm_packed: + orm_output = orm_packed[0] + ".outColor" + + mtl = glsl + ".u_MetallicTexture" + ao = glsl + ".u_OcclusionTexture" + rough = glsl + ".u_RoughnessTexture" + + cmds.connectAttr(orm_output, mtl) + cmds.connectAttr(orm_output, ao) + cmds.connectAttr(orm_output, rough) + + # connect nrm map if there is one + bump_node = cmds.listConnections(shader + + ".normalCamera") + if bump_node: + for bump in bump_node: + nrm = cmds.listConnections(bump + + ".bumpValue") + if nrm: + nrm_output = nrm[0] + ".outColor" + glsl_nrm = glsl + ".u_NormalTexture" + cmds.connectAttr(nrm_output, glsl_nrm) diff --git a/openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py b/openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py new file mode 100644 index 0000000000..53c2cf548a --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_glsl_plugin.py @@ -0,0 +1,31 @@ + +from maya import cmds + +import pyblish.api +from openpype.pipeline.publish import ( + RepairAction, + ValidateContentsOrder +) + + +class ValidateGLSLPlugin(pyblish.api.InstancePlugin): + """ + Validate if the asset uses GLSL Shader + """ + + order = ValidateContentsOrder + 0.15 + families = ['gltf'] + hosts = ['maya'] + label = 'maya2glTF plugin' + actions = [RepairAction] + + def process(self, instance): + if not cmds.pluginInfo("maya2glTF", query=True, loaded=True): + raise RuntimeError("maya2glTF is not loaded") + + @classmethod + def repair(cls, instance): + """ + Repair instance by enabling the plugin + """ + return cmds.loadPlugin("maya2glTF", quiet=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_attributes.py b/openpype/hosts/maya/plugins/publish/validate_instance_attributes.py new file mode 100644 index 0000000000..f870c9f8c4 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_instance_attributes.py @@ -0,0 +1,60 @@ +from maya import cmds + +import pyblish.api +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishValidationError, RepairAction +) +from openpype.pipeline import discover_legacy_creator_plugins +from openpype.hosts.maya.api.lib import imprint + + +class ValidateInstanceAttributes(pyblish.api.InstancePlugin): + """Validate Instance Attributes. + + New attributes can be introduced as new features come in. Old instances + will need to be updated with these attributes for the documentation to make + sense, and users do not have to recreate the instances. + """ + + order = ValidateContentsOrder + hosts = ["maya"] + families = ["*"] + label = "Instance Attributes" + plugins_by_family = { + p.family: p for p in discover_legacy_creator_plugins() + } + actions = [RepairAction] + + @classmethod + def get_missing_attributes(self, instance): + plugin = self.plugins_by_family[instance.data["family"]] + subset = instance.data["subset"] + asset = instance.data["asset"] + objset = instance.data["objset"] + + missing_attributes = {} + for key, value in plugin(subset, asset).data.items(): + if not cmds.objExists("{}.{}".format(objset, key)): + missing_attributes[key] = value + + return missing_attributes + + def process(self, instance): + objset = instance.data.get("objset") + if objset is None: + self.log.debug( + "Skipping {} because no objectset found.".format(instance) + ) + return + + missing_attributes = self.get_missing_attributes(instance) + if missing_attributes: + raise PublishValidationError( + "Missing attributes on {}:\n{}".format( + objset, missing_attributes + ) + ) + + @classmethod + def repair(cls, instance): + imprint(instance.data["objset"], cls.get_missing_attributes(instance)) diff --git a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py index 4870f27bff..63849cfd12 100644 --- a/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py +++ b/openpype/hosts/maya/plugins/publish/validate_instance_has_members.py @@ -13,7 +13,6 @@ class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance): - invalid = list() if not instance.data["setMembers"]: objectset_name = instance.data['name'] @@ -22,6 +21,10 @@ class ValidateInstanceHasMembers(pyblish.api.InstancePlugin): return invalid def process(self, instance): + # Allow renderlayer and workfile to be empty + skip_families = ["workfile", "renderlayer", "rendersetup"] + if instance.data.get("family") in skip_families: + return invalid = self.get_invalid(instance) if invalid: diff --git a/openpype/hosts/maya/plugins/publish/validate_look_contents.py b/openpype/hosts/maya/plugins/publish/validate_look_contents.py index 53501d11e5..2d38099f0f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_look_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_look_contents.py @@ -1,6 +1,7 @@ import pyblish.api import openpype.hosts.maya.api.action from openpype.pipeline.publish import ValidateContentsOrder +from maya import cmds # noqa class ValidateLookContents(pyblish.api.InstancePlugin): @@ -85,6 +86,7 @@ class ValidateLookContents(pyblish.api.InstancePlugin): invalid.add(instance.name) return list(invalid) + @classmethod def validate_looks(cls, instance): @@ -112,3 +114,23 @@ class ValidateLookContents(pyblish.api.InstancePlugin): invalid.append(node) return invalid + + @classmethod + def validate_renderer(cls, instance): + # TODO: Rewrite this to be more specific and configurable + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + do_maketx = instance.data.get("maketx", False) + do_rstex = instance.data.get("rstex", False) + processors = [] + + if do_maketx: + processors.append('arnold') + if do_rstex: + processors.append('redshift') + + for processor in processors: + if processor == renderer: + continue + else: + cls.log.error("Converted texture does not match current renderer.") # noqa diff --git a/openpype/hosts/maya/plugins/publish/validate_maya_units.py b/openpype/hosts/maya/plugins/publish/validate_maya_units.py index 5698d795ff..011df0846c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_maya_units.py +++ b/openpype/hosts/maya/plugins/publish/validate_maya_units.py @@ -4,17 +4,12 @@ import pyblish.api import openpype.hosts.maya.api.lib as mayalib from openpype.pipeline.context_tools import get_current_project_asset -from math import ceil from openpype.pipeline.publish import ( RepairContextAction, ValidateSceneOrder, ) -def float_round(num, places=0, direction=ceil): - return direction(num * (10**places)) / float(10**places) - - class ValidateMayaUnits(pyblish.api.ContextPlugin): """Check if the Maya units are set correct""" @@ -36,18 +31,12 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): # Collected units linearunits = context.data.get('linearUnits') angularunits = context.data.get('angularUnits') - # TODO(antirotor): This is hack as for framerates having multiple - # decimal places. FTrack is ceiling decimal values on - # fps to two decimal places but Maya 2019+ is reporting those fps - # with much higher resolution. As we currently cannot fix Ftrack - # rounding, we have to round those numbers coming from Maya. - # NOTE: this must be revisited yet again as it seems that Ftrack is - # now flooring the value? - fps = float_round(context.data.get('fps'), 2, ceil) - # TODO repace query with using 'context.data["assetEntity"]' + fps = context.data.get('fps') + + # TODO replace query with using 'context.data["assetEntity"]' asset_doc = get_current_project_asset() - asset_fps = asset_doc["data"]["fps"] + asset_fps = mayalib.convert_to_maya_fps(asset_doc["data"]["fps"]) self.log.info('Units (linear): {0}'.format(linearunits)) self.log.info('Units (angular): {0}'.format(angularunits)) @@ -97,7 +86,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin): cls.log.debug(current_linear) cls.log.info("Setting time unit to match project") - # TODO repace query with using 'context.data["assetEntity"]' + # TODO replace query with using 'context.data["assetEntity"]' asset_doc = get_current_project_asset() asset_fps = asset_doc["data"]["fps"] mayalib.set_scene_fps(asset_fps) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py index c1c0636b9e..a580a1c787 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_arnold_attributes.py @@ -1,8 +1,14 @@ -import pymel.core as pc from maya import cmds import pyblish.api + import openpype.hosts.maya.api.action -from openpype.hosts.maya.api.lib import maintained_selection +from openpype.hosts.maya.api.lib import ( + maintained_selection, + delete_after, + undo_chunk, + get_attribute, + set_attribute +) from openpype.pipeline.publish import ( RepairAction, ValidateMeshOrder, @@ -19,7 +25,6 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ["maya"] families = ["model"] - category = "geometry" label = "Mesh Arnold Attributes" actions = [ openpype.hosts.maya.api.action.SelectInvalidAction, @@ -32,60 +37,68 @@ class ValidateMeshArnoldAttributes(pyblish.api.InstancePlugin): else: active = False + @classmethod + def get_default_attributes(cls): + # Get default arnold attribute values for mesh type. + defaults = {} + with delete_after() as tmp: + transform = cmds.createNode("transform") + tmp.append(transform) + + mesh = cmds.createNode("mesh", parent=transform) + for attr in cmds.listAttr(mesh, string="ai*"): + plug = "{}.{}".format(mesh, attr) + try: + defaults[attr] = get_attribute(plug) + except RuntimeError: + cls.log.debug("Ignoring arnold attribute: {}".format(attr)) + + return defaults + @classmethod def get_invalid_attributes(cls, instance, compute=False): invalid = [] if compute: - # Get default arnold attributes. - temp_transform = pc.polyCube()[0] - for shape in pc.ls(instance, type="mesh"): - for attr in temp_transform.getShape().listAttr(): - if not attr.attrName().startswith("ai"): - continue + meshes = cmds.ls(instance, type="mesh", long=True) + if not meshes: + return [] - target_attr = pc.PyNode( - "{}.{}".format(shape.name(), attr.attrName()) - ) - if attr.get() != target_attr.get(): - invalid.append(target_attr) - - pc.delete(temp_transform) + # Compare the values against the defaults + defaults = cls.get_default_attributes() + for mesh in meshes: + for attr_name, default_value in defaults.items(): + plug = "{}.{}".format(mesh, attr_name) + if get_attribute(plug) != default_value: + invalid.append(plug) instance.data["nondefault_arnold_attributes"] = invalid - else: - invalid.extend(instance.data["nondefault_arnold_attributes"]) - return invalid + return instance.data.get("nondefault_arnold_attributes", []) @classmethod def get_invalid(cls, instance): - invalid = [] - - for attr in cls.get_invalid_attributes(instance, compute=False): - invalid.append(attr.node().name()) - - return invalid + invalid_attrs = cls.get_invalid_attributes(instance, compute=False) + invalid_nodes = set(attr.split(".", 1)[0] for attr in invalid_attrs) + return sorted(invalid_nodes) @classmethod def repair(cls, instance): with maintained_selection(): - with pc.UndoChunk(): - temp_transform = pc.polyCube()[0] - + with undo_chunk(): + defaults = cls.get_default_attributes() attributes = cls.get_invalid_attributes( instance, compute=False ) for attr in attributes: - source = pc.PyNode( - "{}.{}".format( - temp_transform.getShape(), attr.attrName() - ) + node, attr_name = attr.split(".", 1) + value = defaults[attr_name] + set_attribute( + node=node, + attribute=attr_name, + value=value ) - attr.set(source.get()) - - pc.delete(temp_transform) def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_empty.py b/openpype/hosts/maya/plugins/publish/validate_mesh_empty.py new file mode 100644 index 0000000000..848d66c4ae --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_empty.py @@ -0,0 +1,54 @@ +from maya import cmds + +import pyblish.api +import openpype.hosts.maya.api.action +from openpype.pipeline.publish import ( + RepairAction, + ValidateMeshOrder +) + + +class ValidateMeshEmpty(pyblish.api.InstancePlugin): + """Validate meshes have some vertices. + + Its possible to have meshes without any vertices. To replicate + this issue, delete all faces/polygons then all edges. + """ + + order = ValidateMeshOrder + hosts = ["maya"] + families = ["model"] + label = "Mesh Empty" + actions = [ + openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction + ] + + @classmethod + def repair(cls, instance): + invalid = cls.get_invalid(instance) + for node in invalid: + cmds.delete(node) + + @classmethod + def get_invalid(cls, instance): + invalid = [] + + meshes = cmds.ls(instance, type="mesh", long=True) + for mesh in meshes: + num_vertices = cmds.polyEvaluate(mesh, vertex=True) + + if num_vertices == 0: + cls.log.warning( + "\"{}\" does not have any vertices.".format(mesh) + ) + invalid.append(mesh) + + return invalid + + def process(self, instance): + + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + "Meshes found in instance without any vertices: %s" % invalid + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py index 36a0da7a59..b7836b3e92 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_has_uv.py @@ -1,39 +1,9 @@ -import re - from maya import cmds import pyblish.api import openpype.hosts.maya.api.action from openpype.pipeline.publish import ValidateMeshOrder - - -def len_flattened(components): - """Return the length of the list as if it was flattened. - - Maya will return consecutive components as a single entry - when requesting with `maya.cmds.ls` without the `flatten` - flag. Though enabling `flatten` on a large list (e.g. millions) - will result in a slow result. This command will return the amount - of entries in a non-flattened list by parsing the result with - regex. - - Args: - components (list): The non-flattened components. - - Returns: - int: The amount of entries. - - """ - assert isinstance(components, (list, tuple)) - n = 0 - for c in components: - match = re.search("\[([0-9]+):([0-9]+)\]", c) - if match: - start, end = match.groups() - n += int(end) - int(start) + 1 - else: - n += 1 - return n +from openpype.hosts.maya.api.lib import len_flattened class ValidateMeshHasUVs(pyblish.api.InstancePlugin): @@ -48,7 +18,6 @@ class ValidateMeshHasUVs(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh Has UVs' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] optional = True @@ -58,6 +27,15 @@ class ValidateMeshHasUVs(pyblish.api.InstancePlugin): invalid = [] for node in cmds.ls(instance, type='mesh'): + num_vertices = cmds.polyEvaluate(node, vertex=True) + + if num_vertices == 0: + cls.log.warning( + "Skipping \"{}\", cause it does not have any " + "vertices.".format(node) + ) + continue + uv = cmds.polyEvaluate(node, uv=True) if uv == 0: diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py b/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py index 4427c6eece..f120361583 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_lamina_faces.py @@ -15,8 +15,6 @@ class ValidateMeshLaminaFaces(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' - version = (0, 1, 0) label = 'Mesh Lamina Faces' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py index 0ef2716559..b49ba85648 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_non_zero_edge.py @@ -19,8 +19,6 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): order = ValidateMeshOrder families = ['model'] hosts = ['maya'] - category = 'geometry' - version = (0, 1, 0) label = 'Mesh Edge Length Non Zero' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] optional = True @@ -30,7 +28,10 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance): """Return the invalid edges. - Also see: http://help.autodesk.com/view/MAYAUL/2015/ENU/?guid=Mesh__Cleanup + + Also see: + + http://help.autodesk.com/view/MAYAUL/2015/ENU/?guid=Mesh__Cleanup """ @@ -38,8 +39,21 @@ class ValidateMeshNonZeroEdgeLength(pyblish.api.InstancePlugin): if not meshes: return list() + valid_meshes = [] + for mesh in meshes: + num_vertices = cmds.polyEvaluate(mesh, vertex=True) + + if num_vertices == 0: + cls.log.warning( + "Skipping \"{}\", cause it does not have any " + "vertices.".format(mesh) + ) + continue + + valid_meshes.append(mesh) + # Get all edges - edges = ['{0}.e[*]'.format(node) for node in meshes] + edges = ['{0}.e[*]'.format(node) for node in valid_meshes] # Filter by constraint on edge length invalid = lib.polyConstraint(edges, diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py index c8892a8e59..1b754a9829 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_normals_unlocked.py @@ -20,8 +20,6 @@ class ValidateMeshNormalsUnlocked(pyblish.api.Validator): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' - version = (0, 1, 0) label = 'Mesh Normals Unlocked' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py index be7324a68f..7dd66eed6c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_overlapping_uvs.py @@ -1,10 +1,11 @@ -import pyblish.api -import openpype.hosts.maya.api.action import math -import maya.api.OpenMaya as om -import pymel.core as pm - from six.moves import xrange + +from maya import cmds +import maya.api.OpenMaya as om +import pyblish.api + +import openpype.hosts.maya.api.action from openpype.pipeline.publish import ValidateMeshOrder @@ -185,8 +186,7 @@ class GetOverlappingUVs(object): center, radius = self._createBoundingCircle(meshfn) for i in xrange(meshfn.numPolygons): # noqa: F821 - rayb1, face1Orig, face1Vec = self._createRayGivenFace( - meshfn, i) + rayb1, face1Orig, face1Vec = self._createRayGivenFace(meshfn, i) if not rayb1: continue cui = center[2*i] @@ -206,8 +206,8 @@ class GetOverlappingUVs(object): if (dsqr >= (ri + rj) * (ri + rj)): continue - rayb2, face2Orig, face2Vec = self._createRayGivenFace( - meshfn, j) + rayb2, face2Orig, face2Vec = self._createRayGivenFace(meshfn, + j) if not rayb2: continue # Exclude the degenerate face @@ -235,43 +235,50 @@ class ValidateMeshHasOverlappingUVs(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh Has Overlapping UVs' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] optional = True @classmethod - def _get_overlapping_uvs(cls, node): - """ Check if mesh has overlapping UVs. + def _get_overlapping_uvs(cls, mesh): + """Return overlapping UVs of mesh. + + Args: + mesh (str): Mesh node name + + Returns: + list: Overlapping uvs for the input mesh in all uv sets. - :param node: node to check - :type node: str - :returns: True is has overlapping UVs, False otherwise - :rtype: bool """ ovl = GetOverlappingUVs() + # Store original uv set + original_current_uv_set = cmds.polyUVSet(mesh, + query=True, + currentUVSet=True)[0] + overlapping_faces = [] - for i, uv in enumerate(pm.polyUVSet(node, q=1, auv=1)): - pm.polyUVSet(node, cuv=1, uvSet=uv) - overlapping_faces.extend(ovl._getOverlapUVFaces(str(node))) + for uv_set in cmds.polyUVSet(mesh, query=True, allUVSets=True): + cmds.polyUVSet(mesh, currentUVSet=True, uvSet=uv_set) + overlapping_faces.extend(ovl._getOverlapUVFaces(mesh)) + + # Restore original uv set + cmds.polyUVSet(mesh, currentUVSet=True, uvSet=original_current_uv_set) return overlapping_faces @classmethod def get_invalid(cls, instance, compute=False): - invalid = [] + if compute: - instance.data["overlapping_faces"] = [] - for node in pm.ls(instance, type="mesh"): + invalid = [] + for node in cmds.ls(instance, type="mesh"): faces = cls._get_overlapping_uvs(node) invalid.extend(faces) - # Store values for later. - instance.data["overlapping_faces"].extend(faces) - else: - invalid.extend(instance.data["overlapping_faces"]) - return invalid + instance.data["overlapping_faces"] = invalid + + return instance.data.get("overlapping_faces", []) def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py index 6ca8c06ba5..faa360380e 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_single_uv_set.py @@ -21,9 +21,7 @@ class ValidateMeshSingleUVSet(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model', 'pointcache'] - category = 'uv' optional = True - version = (0, 1, 0) label = "Mesh Single UV Set" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py b/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py index 1e6d290ae7..d885158004 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py +++ b/openpype/hosts/maya/plugins/publish/validate_mesh_vertices_have_edges.py @@ -1,5 +1,3 @@ -import re - from maya import cmds import pyblish.api @@ -8,37 +6,7 @@ from openpype.pipeline.publish import ( RepairAction, ValidateMeshOrder, ) - - -def len_flattened(components): - """Return the length of the list as if it was flattened. - - Maya will return consecutive components as a single entry - when requesting with `maya.cmds.ls` without the `flatten` - flag. Though enabling `flatten` on a large list (e.g. millions) - will result in a slow result. This command will return the amount - of entries in a non-flattened list by parsing the result with - regex. - - Args: - components (list): The non-flattened components. - - Returns: - int: The amount of entries. - - """ - assert isinstance(components, (list, tuple)) - n = 0 - - pattern = re.compile(r"\[(\d+):(\d+)\]") - for c in components: - match = pattern.search(c) - if match: - start, end = match.groups() - n += int(end) - int(start) + 1 - else: - n += 1 - return n +from openpype.hosts.maya.api.lib import len_flattened class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin): @@ -63,7 +31,6 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ['maya'] families = ['model'] - category = 'geometry' label = 'Mesh Vertices Have Edges' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] @@ -88,6 +55,13 @@ class ValidateMeshVerticesHaveEdges(pyblish.api.InstancePlugin): for mesh in meshes: num_vertices = cmds.polyEvaluate(mesh, vertex=True) + if num_vertices == 0: + cls.log.warning( + "Skipping \"{}\", cause it does not have any " + "vertices.".format(mesh) + ) + continue + # Vertices from all edges edges = "%s.e[*]" % mesh vertices = cmds.polyListComponentConversion(edges, toVertex=True) diff --git a/openpype/hosts/maya/plugins/publish/validate_model_name.py b/openpype/hosts/maya/plugins/publish/validate_model_name.py index 2dec9ba267..0e7adc640f 100644 --- a/openpype/hosts/maya/plugins/publish/validate_model_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_model_name.py @@ -2,9 +2,11 @@ """Validate model nodes names.""" import os import re -from maya import cmds -import pyblish.api +import platform +from maya import cmds + +import pyblish.api from openpype.pipeline import legacy_io from openpype.pipeline.publish import ValidateContentsOrder import openpype.hosts.maya.api.action @@ -44,7 +46,7 @@ class ValidateModelName(pyblish.api.InstancePlugin): if not cmds.ls(child, transforms=True): return False return True - except: + except Exception: return False invalid = [] @@ -94,9 +96,10 @@ class ValidateModelName(pyblish.api.InstancePlugin): # load shader list file as utf-8 shaders = [] if not use_db: - if cls.material_file: - if os.path.isfile(cls.material_file): - shader_file = open(cls.material_file, "r") + material_file = cls.material_file[platform.system().lower()] + if material_file: + if os.path.isfile(material_file): + shader_file = open(material_file, "r") shaders = shader_file.readlines() shader_file.close() else: @@ -113,7 +116,7 @@ class ValidateModelName(pyblish.api.InstancePlugin): shader_file.close() # strip line endings from list - shaders = map(lambda s: s.rstrip(), shaders) + shaders = [s.rstrip() for s in shaders if s.rstrip()] # compile regex for testing names regex = cls.regex diff --git a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py index e583c1edba..04db5a061b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py +++ b/openpype/hosts/maya/plugins/publish/validate_mvlook_contents.py @@ -42,7 +42,8 @@ class ValidateMvLookContents(pyblish.api.InstancePlugin): resources = instance.data.get("resources", []) for resource in resources: files = resource["files"] - self.log.debug("Resouce '{}', files: [{}]".format(resource, files)) + self.log.debug( + "Resource '{}', files: [{}]".format(resource, files)) node = resource["node"] if len(files) == 0: self.log.error("File node '{}' uses no or non-existing " diff --git a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py b/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py index 1a5773e6a7..a4fb938d43 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_default_camera.py @@ -16,7 +16,6 @@ class ValidateNoDefaultCameras(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['camera'] - version = (0, 1, 0) label = "No Default Cameras" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py index 01c77e5b2e..0ff03f9165 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_namespace.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_namespace.py @@ -1,4 +1,3 @@ -import pymel.core as pm import maya.cmds as cmds import pyblish.api @@ -12,7 +11,7 @@ import openpype.hosts.maya.api.action def get_namespace(node_name): # ensure only node's name (not parent path) - node_name = node_name.rsplit("|")[-1] + node_name = node_name.rsplit("|", 1)[-1] # ensure only namespace return node_name.rpartition(":")[0] @@ -23,8 +22,6 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' - version = (0, 1, 0) label = 'No Namespaces' actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] @@ -47,13 +44,11 @@ class ValidateNoNamespace(pyblish.api.InstancePlugin): invalid = cls.get_invalid(instance) - # Get nodes with pymel since we'll be renaming them - # Since we don't want to keep checking the hierarchy - # or full paths - nodes = pm.ls(invalid) + # Iterate over the nodes by long to short names to iterate the lowest + # in hierarchy nodes first. This way we avoid having renamed parents + # before renaming children nodes + for node in sorted(invalid, key=len, reverse=True): - for node in nodes: - namespace = node.namespace() - if namespace: - name = node.nodeName() - node.rename(name[len(namespace):]) + node_name = node.rsplit("|", 1)[-1] + node_name_without_namespace = node_name.rsplit(":")[-1] + cmds.rename(node, node_name_without_namespace) diff --git a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py b/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py index b430c2b63c..f77fc81dc1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py +++ b/openpype/hosts/maya/plugins/publish/validate_no_null_transforms.py @@ -43,8 +43,6 @@ class ValidateNoNullTransforms(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' - version = (0, 1, 0) label = 'No Empty/Null Transforms' actions = [RepairAction, openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_plugin_path_attributes.py b/openpype/hosts/maya/plugins/publish/validate_plugin_path_attributes.py new file mode 100644 index 0000000000..6135c9c695 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_plugin_path_attributes.py @@ -0,0 +1,52 @@ +import os + +from maya import cmds + +import pyblish.api + +from openpype.pipeline.publish import ValidateContentsOrder + + +class ValidatePluginPathAttributes(pyblish.api.InstancePlugin): + """ + Validate plug-in path attributes point to existing file paths. + """ + + order = ValidateContentsOrder + hosts = ['maya'] + families = ["workfile"] + label = "Plug-in Path Attributes" + + def get_invalid(self, instance): + invalid = list() + + # get the project setting + validate_path = ( + instance.context.data["project_settings"]["maya"]["publish"] + ) + file_attr = validate_path["ValidatePluginPathAttributes"]["attribute"] + if not file_attr: + return invalid + + # get the nodes and file attributes + for node, attr in file_attr.items(): + # check the related nodes + targets = cmds.ls(type=node) + + for target in targets: + # get the filepath + file_attr = "{}.{}".format(target, attr) + filepath = cmds.getAttr(file_attr) + + if filepath and not os.path.exists(filepath): + self.log.error("File {0} not exists".format(filepath)) # noqa + invalid.append(target) + + return invalid + + def process(self, instance): + """Process all directories Set as Filenames in Non-Maya Nodes""" + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError("Non-existent Path " + "found: {0}".format(invalid)) diff --git a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py index 6b6fb03eec..7919a6eaa1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py +++ b/openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py @@ -37,8 +37,8 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): project_name = legacy_io.active_project() asset_doc = instance.data["assetEntity"] - render_passses = instance.data.get("renderPasses", []) - for render_pass in render_passses: + render_passes = instance.data.get("renderPasses", []) + for render_pass in render_passes: is_valid = self.validate_subset_registered( project_name, asset_doc, render_pass ) diff --git a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py index 94e2633593..71b91b8e54 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rendersettings.py +++ b/openpype/hosts/maya/plugins/publish/validate_rendersettings.py @@ -13,6 +13,22 @@ from openpype.pipeline.publish import ( from openpype.hosts.maya.api import lib +def convert_to_int_or_float(string_value): + # Order of types are important here since float can convert string + # representation of integer. + types = [int, float] + for t in types: + try: + result = t(string_value) + except ValueError: + continue + else: + return result + + # Neither integer or float. + return string_value + + def get_redshift_image_format_labels(): """Return nice labels for Redshift image formats.""" var = "$g_redshiftImageFormatLabels" @@ -242,10 +258,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.DEFAULT_PADDING, "0" * cls.DEFAULT_PADDING)) # load validation definitions from settings - validation_settings = ( - instance.context.data["project_settings"]["maya"]["publish"]["ValidateRenderSettings"].get( # noqa: E501 - "{}_render_attributes".format(renderer)) or [] - ) settings_lights_flag = instance.context.data["project_settings"].get( "maya", {}).get( "RenderSettings", {}).get( @@ -253,15 +265,68 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): instance_lights_flag = instance.data.get("renderSetupIncludeLights") if settings_lights_flag != instance_lights_flag: - cls.log.warning('Instance flag for "Render Setup Include Lights" is set to {0} and Settings flag is set to {1}'.format(instance_lights_flag, settings_lights_flag)) # noqa + cls.log.warning( + "Instance flag for \"Render Setup Include Lights\" is set to " + "{} and Settings flag is set to {}".format( + instance_lights_flag, settings_lights_flag + ) + ) # go through definitions and test if such node.attribute exists. # if so, compare its value from the one required. - for attr, value in OrderedDict(validation_settings).items(): - cls.log.debug("{}: {}".format(attr, value)) + for data in cls.get_nodes(instance, renderer): + for node in data["nodes"]: + try: + render_value = cmds.getAttr( + "{}.{}".format(node, data["attribute"]) + ) + except RuntimeError: + invalid = True + cls.log.error( + "Cannot get value of {}.{}".format( + node, data["attribute"] + ) + ) + else: + if render_value not in data["values"]: + invalid = True + cls.log.error( + "Invalid value {} set on {}.{}. Expecting " + "{}".format( + render_value, + node, + data["attribute"], + data["values"] + ) + ) + + return invalid + + @classmethod + def get_nodes(cls, instance, renderer): + maya_settings = instance.context.data["project_settings"]["maya"] + validation_settings = ( + maya_settings["publish"]["ValidateRenderSettings"].get( + "{}_render_attributes".format(renderer) + ) or [] + ) + result = [] + for attr, values in OrderedDict(validation_settings).items(): + values = [convert_to_int_or_float(v) for v in values if v] + + # Validate the settings has values. + if not values: + cls.log.error( + "Settings for {} is missing values.".format(attr) + ) + continue + + cls.log.debug("{}: {}".format(attr, values)) if "." not in attr: - cls.log.warning("Skipping invalid attribute defined in " - "validation settings: '{}'".format(attr)) + cls.log.warning( + "Skipping invalid attribute defined in validation " + "settings: \"{}\"".format(attr) + ) continue node_type, attribute_name = attr.split(".", 1) @@ -271,28 +336,19 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): if not nodes: cls.log.warning( - "No nodes of type '{}' found.".format(node_type)) + "No nodes of type \"{}\" found.".format(node_type) + ) continue - for node in nodes: - try: - render_value = cmds.getAttr( - "{}.{}".format(node, attribute_name)) - except RuntimeError: - invalid = True - cls.log.error( - "Cannot get value of {}.{}".format( - node, attribute_name)) - else: - if str(value) != str(render_value): - invalid = True - cls.log.error( - ("Invalid value {} set on {}.{}. " - "Expecting {}").format( - render_value, node, attribute_name, value) - ) + result.append( + { + "attribute": attribute_name, + "nodes": nodes, + "values": values + } + ) - return invalid + return result @classmethod def repair(cls, instance): @@ -305,6 +361,12 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): "{aov_separator}", instance.data.get("aovSeparator", "_") ) + for data in cls.get_nodes(instance, renderer): + if not data["values"]: + continue + for node in data["nodes"]: + lib.set_attribute(data["attribute"], data["values"][0], node) + with lib.renderlayer(layer_node): default = lib.RENDER_ATTRS['default'] render_attrs = lib.RENDER_ATTRS.get(renderer, default) @@ -313,6 +375,17 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cmds.setAttr("defaultRenderGlobals.animation", True) # Repair prefix + if renderer == "arnold": + multipart = cmds.getAttr("defaultArnoldDriver.mergeAOVs") + if multipart: + separator_variations = [ + "_", + "_", + "", + ] + for variant in separator_variations: + default_prefix = default_prefix.replace(variant, "") + if renderer != "renderman": node = render_attrs["node"] prefix_attr = render_attrs["prefix"] diff --git a/openpype/hosts/maya/plugins/publish/validate_review.py b/openpype/hosts/maya/plugins/publish/validate_review.py new file mode 100644 index 0000000000..12a2e7f86f --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_review.py @@ -0,0 +1,30 @@ +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, PublishValidationError +) + + +class ValidateReview(pyblish.api.InstancePlugin): + """Validate review.""" + + order = ValidateContentsOrder + label = "Validate Review" + families = ["review"] + + def process(self, instance): + cameras = instance.data["cameras"] + + # validate required settings + if len(cameras) == 0: + raise PublishValidationError( + "No camera found in review instance: {}".format(instance) + ) + elif len(cameras) > 2: + raise PublishValidationError( + "Only a single camera is allowed for a review instance but " + "more than one camera found in review instance: {}. " + "Cameras found: {}".format(instance, ", ".join(cameras)) + ) + + self.log.debug('camera: {}'.format(instance.data["review_camera"])) diff --git a/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py b/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py deleted file mode 100644 index 361c594013..0000000000 --- a/openpype/hosts/maya/plugins/publish/validate_review_subset_uniqueness.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -import collections -import pyblish.api -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateReviewSubsetUniqueness(pyblish.api.ContextPlugin): - """Validates that review subset has unique name.""" - - order = ValidateContentsOrder - hosts = ["maya"] - families = ["review"] - label = "Validate Review Subset Unique" - - def process(self, context): - subset_names = [] - - for instance in context: - self.log.debug("Instance: {}".format(instance.data)) - if instance.data.get('publish'): - subset_names.append(instance.data.get('subset')) - - non_unique = \ - [item - for item, count in collections.Counter(subset_names).items() - if count > 1] - msg = ("Instance subset names {} are not unique. ".format(non_unique) + - "Ask admin to remove subset from DB for multiple reviews.") - formatting_data = { - "non_unique": ",".join(non_unique) - } - - if non_unique: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py b/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py index d5bf7fd1cf..30d95128a2 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_joints_hidden.py @@ -24,7 +24,6 @@ class ValidateRigJointsHidden(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['rig'] - version = (0, 1, 0) label = "Joints Hidden" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py index f3ed1a36ef..cba70a21b7 100644 --- a/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py +++ b/openpype/hosts/maya/plugins/publish/validate_rig_output_ids.py @@ -1,14 +1,22 @@ -import pymel.core as pc +from collections import defaultdict + +from maya import cmds import pyblish.api import openpype.hosts.maya.api.action +from openpype.hosts.maya.api.lib import get_id, set_id from openpype.pipeline.publish import ( RepairAction, ValidateContentsOrder, ) +def get_basename(node): + """Return node short name without namespace""" + return node.rsplit("|", 1)[-1].rsplit(":", 1)[-1] + + class ValidateRigOutputIds(pyblish.api.InstancePlugin): """Validate rig output ids. @@ -30,43 +38,49 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin): @classmethod def get_invalid(cls, instance, compute=False): - invalid = cls.get_invalid_matches(instance, compute=compute) - return [x["node"].longName() for x in invalid] + invalid_matches = cls.get_invalid_matches(instance, compute=compute) + return list(invalid_matches.keys()) @classmethod def get_invalid_matches(cls, instance, compute=False): - invalid = [] + invalid = {} if compute: out_set = next(x for x in instance if x.endswith("out_SET")) - instance_nodes = pc.sets(out_set, query=True) - instance_nodes.extend( - [x.getShape() for x in instance_nodes if x.getShape()]) - scene_nodes = pc.ls(type="transform") + pc.ls(type="mesh") + instance_nodes = cmds.sets(out_set, query=True, nodesOnly=True) + instance_nodes = cmds.ls(instance_nodes, long=True) + for node in instance_nodes: + shapes = cmds.listRelatives(node, shapes=True, fullPath=True) + if shapes: + instance_nodes.extend(shapes) + + scene_nodes = cmds.ls(type="transform", long=True) + scene_nodes += cmds.ls(type="mesh", long=True) scene_nodes = set(scene_nodes) - set(instance_nodes) + scene_nodes_by_basename = defaultdict(list) + for node in scene_nodes: + basename = get_basename(node) + scene_nodes_by_basename[basename].append(node) + for instance_node in instance_nodes: - matches = [] - basename = instance_node.name(stripNamespace=True) - for scene_node in scene_nodes: - if scene_node.name(stripNamespace=True) == basename: - matches.append(scene_node) + basename = get_basename(instance_node) + if basename not in scene_nodes_by_basename: + continue - if matches: - ids = [instance_node.cbId.get()] - ids.extend([x.cbId.get() for x in matches]) - ids = set(ids) + matches = scene_nodes_by_basename[basename] - if len(ids) > 1: - cls.log.error( - "\"{}\" id mismatch to: {}".format( - instance_node.longName(), matches - ) - ) - invalid.append( - {"node": instance_node, "matches": matches} + ids = set(get_id(node) for node in matches) + ids.add(get_id(instance_node)) + + if len(ids) > 1: + cls.log.error( + "\"{}\" id mismatch to: {}".format( + instance_node, matches ) + ) + invalid[instance_node] = matches instance.data["mismatched_output_ids"] = invalid else: @@ -76,19 +90,21 @@ class ValidateRigOutputIds(pyblish.api.InstancePlugin): @classmethod def repair(cls, instance): - invalid = cls.get_invalid_matches(instance) + invalid_matches = cls.get_invalid_matches(instance) multiple_ids_match = [] - for data in invalid: - ids = [x.cbId.get() for x in data["matches"]] + for instance_node, matches in invalid_matches.items(): + ids = set(get_id(node) for node in matches) # If there are multiple scene ids matched, and error needs to be # raised for manual correction. if len(ids) > 1: - multiple_ids_match.append(data) + multiple_ids_match.append({"node": instance_node, + "matches": matches}) continue - data["node"].cbId.set(ids[0]) + id_to_set = next(iter(ids)) + set_id(instance_node, id_to_set, overwrite=True) if multiple_ids_match: raise RuntimeError( diff --git a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py b/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py index ec2bea220d..f1fa4d3c4c 100644 --- a/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py +++ b/openpype/hosts/maya/plugins/publish/validate_scene_set_workspace.py @@ -31,8 +31,6 @@ class ValidateSceneSetWorkspace(pyblish.api.ContextPlugin): order = ValidatePipelineOrder hosts = ['maya'] - category = 'scene' - version = (0, 1, 0) label = 'Maya Workspace Set' def process(self, context): diff --git a/openpype/hosts/maya/plugins/publish/validate_shader_name.py b/openpype/hosts/maya/plugins/publish/validate_shader_name.py index b3e51f011d..034db471da 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shader_name.py +++ b/openpype/hosts/maya/plugins/publish/validate_shader_name.py @@ -50,7 +50,8 @@ class ValidateShaderName(pyblish.api.InstancePlugin): asset_name = instance.data.get("asset", None) # Check the number of connected shadingEngines per shape - r = re.compile(cls.regex) + regex_compile = re.compile(cls.regex) + error_message = "object {0} has invalid shader name {1}" for shape in shapes: shading_engines = cmds.listConnections(shape, destination=True, @@ -60,19 +61,18 @@ class ValidateShaderName(pyblish.api.InstancePlugin): ) for shader in shaders: - m = r.match(cls.regex, shader) + m = regex_compile.match(shader) if m is None: invalid.append(shape) - cls.log.error( - "object {0} has invalid shader name {1}".format(shape, - shader) - ) + cls.log.error(error_message.format(shape, shader)) else: - if 'asset' in r.groupindex: + if 'asset' in regex_compile.groupindex: if m.group('asset') != asset_name: invalid.append(shape) - cls.log.error(("object {0} has invalid " - "shader name {1}").format(shape, - shader)) + message = error_message + message += " with missing asset name \"{2}\"" + cls.log.error( + message.format(shape, shader, asset_name) + ) return invalid diff --git a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py index 651c6bcec9..4ab669f46b 100644 --- a/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py +++ b/openpype/hosts/maya/plugins/publish/validate_shape_default_names.py @@ -38,9 +38,7 @@ class ValidateShapeDefaultNames(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' optional = True - version = (0, 1, 0) label = "Shape Default Naming" actions = [openpype.hosts.maya.api.action.SelectInvalidAction, RepairAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_single_assembly.py b/openpype/hosts/maya/plugins/publish/validate_single_assembly.py index 8771ca58d1..b768c9c4e8 100644 --- a/openpype/hosts/maya/plugins/publish/validate_single_assembly.py +++ b/openpype/hosts/maya/plugins/publish/validate_single_assembly.py @@ -19,7 +19,7 @@ class ValidateSingleAssembly(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] - families = ['rig', 'animation'] + families = ['rig'] label = 'Single Assembly' def process(self, instance): diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py index 65551c8d5e..b2a83a80fb 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_naming_suffix.py @@ -21,7 +21,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): - nurbsSurface: _NRB - locator: _LOC - null/group: _GRP - Suffices can also be overriden by project settings. + Suffices can also be overridden by project settings. .. warning:: This grabs the first child shape as a reference and doesn't use the @@ -32,9 +32,7 @@ class ValidateTransformNamingSuffix(pyblish.api.InstancePlugin): order = ValidateContentsOrder hosts = ['maya'] families = ['model'] - category = 'cleanup' optional = True - version = (0, 1, 0) label = 'Suffix Naming Conventions' actions = [openpype.hosts.maya.api.action.SelectInvalidAction] SUFFIX_NAMING_TABLE = {"mesh": ["_GEO", "_GES", "_GEP", "_OSD"], diff --git a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py index da569195e8..abd9e00af1 100644 --- a/openpype/hosts/maya/plugins/publish/validate_transform_zero.py +++ b/openpype/hosts/maya/plugins/publish/validate_transform_zero.py @@ -18,8 +18,6 @@ class ValidateTransformZero(pyblish.api.Validator): order = ValidateContentsOrder hosts = ["maya"] families = ["model"] - category = "geometry" - version = (0, 1, 0) label = "Transform Zero (Freeze)" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] diff --git a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py index 4211e76a73..e78962bf97 100644 --- a/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py +++ b/openpype/hosts/maya/plugins/publish/validate_unreal_mesh_triangulated.py @@ -13,7 +13,6 @@ class ValidateUnrealMeshTriangulated(pyblish.api.InstancePlugin): order = ValidateMeshOrder hosts = ["maya"] families = ["staticMesh"] - category = "geometry" label = "Mesh is Triangulated" actions = [openpype.hosts.maya.api.action.SelectInvalidAction] active = False diff --git a/openpype/hosts/maya/plugins/publish/validate_vray.py b/openpype/hosts/maya/plugins/publish/validate_vray.py new file mode 100644 index 0000000000..045ac258a1 --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_vray.py @@ -0,0 +1,18 @@ +from maya import cmds + +import pyblish.api +from openpype.pipeline import PublishValidationError + + +class ValidateVray(pyblish.api.InstancePlugin): + """Validate general Vray setup.""" + + order = pyblish.api.ValidatorOrder + label = 'VRay' + hosts = ["maya"] + families = ["vrayproxy"] + + def process(self, instance): + # Validate vray plugin is loaded. + if not cmds.pluginInfo("vrayformaya", query=True, loaded=True): + raise PublishValidationError("Vray plugin is not loaded.") diff --git a/openpype/hosts/maya/plugins/publish/validate_vrayproxy.py b/openpype/hosts/maya/plugins/publish/validate_vrayproxy.py index 3eceace76d..a106b970b4 100644 --- a/openpype/hosts/maya/plugins/publish/validate_vrayproxy.py +++ b/openpype/hosts/maya/plugins/publish/validate_vrayproxy.py @@ -1,27 +1,31 @@ import pyblish.api +from openpype.pipeline import KnownPublishError + class ValidateVrayProxy(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder - label = 'VRay Proxy Settings' - hosts = ['maya'] - families = ['studio.vrayproxy'] + label = "VRay Proxy Settings" + hosts = ["maya"] + families = ["vrayproxy"] def process(self, instance): - - invalid = self.get_invalid(instance) - if invalid: - raise RuntimeError("'%s' has invalid settings for VRay Proxy " - "export!" % instance.name) - - @classmethod - def get_invalid(cls, instance): data = instance.data if not data["setMembers"]: - cls.log.error("'%s' is empty! This is a bug" % instance.name) + raise KnownPublishError( + "'%s' is empty! This is a bug" % instance.name + ) if data["animation"]: if data["frameEnd"] < data["frameStart"]: - cls.log.error("End frame is smaller than start frame") + raise KnownPublishError( + "End frame is smaller than start frame" + ) + + if not data["vrmesh"] and not data["alembic"]: + raise KnownPublishError( + "Both vrmesh and alembic are off. Needs at least one to" + " publish." + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_xgen.py b/openpype/hosts/maya/plugins/publish/validate_xgen.py new file mode 100644 index 0000000000..47b24e218c --- /dev/null +++ b/openpype/hosts/maya/plugins/publish/validate_xgen.py @@ -0,0 +1,72 @@ +import json + +import maya.cmds as cmds +import xgenm + +import pyblish.api +from openpype.pipeline.publish import PublishValidationError + + +class ValidateXgen(pyblish.api.InstancePlugin): + """Validate Xgen data.""" + + label = "Validate Xgen" + order = pyblish.api.ValidatorOrder + host = ["maya"] + families = ["xgen"] + + def process(self, instance): + set_members = instance.data.get("setMembers") + + # Only 1 collection/node per instance. + if len(set_members) != 1: + raise PublishValidationError( + "Only one collection per instance is allowed." + " Found:\n{}".format(set_members) + ) + + # Only xgen palette node is allowed. + node_type = cmds.nodeType(set_members[0]) + if node_type != "xgmPalette": + raise PublishValidationError( + "Only node of type \"xgmPalette\" are allowed. Referred to as" + " \"collection\" in the Maya UI." + " Node type found: {}".format(node_type) + ) + + # Cant have inactive modifiers in collection cause Xgen will try and + # look for them when loading. + palette = instance.data["xgmPalette"].replace("|", "") + inactive_modifiers = {} + for description in instance.data["xgmDescriptions"]: + description = description.split("|")[-2] + modifier_names = xgenm.fxModules(palette, description) + for name in modifier_names: + attr = xgenm.getAttr("active", palette, description, name) + # Attribute value are lowercase strings of false/true. + if attr == "false": + try: + inactive_modifiers[description].append(name) + except KeyError: + inactive_modifiers[description] = [name] + + if inactive_modifiers: + raise PublishValidationError( + "There are inactive modifiers on the collection. " + "Please delete these:\n{}".format( + json.dumps(inactive_modifiers, indent=4, sort_keys=True) + ) + ) + + # We need a namespace else there will be a naming conflict when + # extracting because of stripping namespaces and parenting to world. + node_names = [instance.data["xgmPalette"]] + for _, connections in instance.data["xgenConnections"].items(): + node_names.append(connections["transform"].split(".")[0]) + + non_namespaced_nodes = [n for n in node_names if ":" not in n] + if non_namespaced_nodes: + raise PublishValidationError( + "Could not find namespace on {}. Namespace is required for" + " xgen publishing.".format(non_namespaced_nodes) + ) diff --git a/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py b/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py index a864a18cee..06250f5779 100644 --- a/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py +++ b/openpype/hosts/maya/plugins/publish/validate_yeti_renderscript_callbacks.py @@ -48,6 +48,18 @@ class ValidateYetiRenderScriptCallbacks(pyblish.api.InstancePlugin): yeti_loaded = cmds.pluginInfo("pgYetiMaya", query=True, loaded=True) + if not yeti_loaded and not cmds.ls(type="pgYetiMaya"): + # The yeti plug-in is available and loaded so at + # this point we don't really care whether the scene + # has any yeti callback set or not since if the callback + # is there it wouldn't error and if it weren't then + # nothing happens because there are no yeti nodes. + cls.log.info( + "Yeti is loaded but no yeti nodes were found. " + "Callback validation skipped.." + ) + return False + renderer = instance.data["renderer"] if renderer == "redshift": cls.log.info("Redshift ignores any pre and post render callbacks") diff --git a/openpype/hosts/maya/startup/userSetup.py b/openpype/hosts/maya/startup/userSetup.py index 40cd51f2d8..ae6a999d98 100644 --- a/openpype/hosts/maya/startup/userSetup.py +++ b/openpype/hosts/maya/startup/userSetup.py @@ -1,18 +1,52 @@ import os + from openpype.settings import get_project_settings from openpype.pipeline import install_host from openpype.hosts.maya.api import MayaHost + from maya import cmds + host = MayaHost() install_host(host) +print("Starting OpenPype usersetup...") -print("starting OpenPype usersetup") +project_settings = get_project_settings(os.environ['AVALON_PROJECT']) -# build a shelf -settings = get_project_settings(os.environ['AVALON_PROJECT']) -shelf_preset = settings['maya'].get('project_shelf') +# Loading plugins explicitly. +explicit_plugins_loading = project_settings["maya"]["explicit_plugins_loading"] +if explicit_plugins_loading["enabled"]: + def _explicit_load_plugins(): + for plugin in explicit_plugins_loading["plugins_to_load"]: + if plugin["enabled"]: + print("Loading plug-in: " + plugin["name"]) + try: + cmds.loadPlugin(plugin["name"], quiet=True) + except RuntimeError as e: + print(e) + + # We need to load plugins deferred as loading them directly does not work + # correctly due to Maya's initialization. + cmds.evalDeferred( + _explicit_load_plugins, + lowestPriority=True + ) + +# Open Workfile Post Initialization. +key = "OPENPYPE_OPEN_WORKFILE_POST_INITIALIZATION" +if bool(int(os.environ.get(key, "0"))): + def _log_and_open(): + path = os.environ["AVALON_LAST_WORKFILE"] + print("Opening \"{}\"".format(path)) + cmds.file(path, open=True, force=True) + cmds.evalDeferred( + _log_and_open, + lowestPriority=True + ) + +# Build a shelf. +shelf_preset = project_settings['maya'].get('project_shelf') if shelf_preset: project = os.environ["AVALON_PROJECT"] @@ -26,7 +60,10 @@ if shelf_preset: print(import_string) exec(import_string) - cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") + cmds.evalDeferred( + "mlib.shelf(name=shelf_preset['name'], iconPath=icon_path," + " preset=shelf_preset)" + ) -print("finished OpenPype usersetup") +print("Finished OpenPype usersetup.") diff --git a/openpype/hosts/maya/tools/__init__.py b/openpype/hosts/maya/tools/__init__.py new file mode 100644 index 0000000000..bd1e302cd2 --- /dev/null +++ b/openpype/hosts/maya/tools/__init__.py @@ -0,0 +1,27 @@ +from openpype.tools.utils.host_tools import qt_app_context + + +class MayaToolsSingleton: + _look_assigner = None + + +def get_look_assigner_tool(parent): + """Create, cache and return look assigner tool window.""" + if MayaToolsSingleton._look_assigner is None: + from .mayalookassigner import MayaLookAssignerWindow + mayalookassigner_window = MayaLookAssignerWindow(parent) + MayaToolsSingleton._look_assigner = mayalookassigner_window + return MayaToolsSingleton._look_assigner + + +def show_look_assigner(parent=None): + """Look manager is Maya specific tool for look management.""" + + with qt_app_context(): + look_assigner_tool = get_look_assigner_tool(parent) + look_assigner_tool.show() + + # Pull window to the front. + look_assigner_tool.raise_() + look_assigner_tool.activateWindow() + look_assigner_tool.showNormal() diff --git a/openpype/tools/mayalookassigner/LICENSE b/openpype/hosts/maya/tools/mayalookassigner/LICENSE similarity index 100% rename from openpype/tools/mayalookassigner/LICENSE rename to openpype/hosts/maya/tools/mayalookassigner/LICENSE diff --git a/openpype/tools/mayalookassigner/__init__.py b/openpype/hosts/maya/tools/mayalookassigner/__init__.py similarity index 100% rename from openpype/tools/mayalookassigner/__init__.py rename to openpype/hosts/maya/tools/mayalookassigner/__init__.py diff --git a/openpype/hosts/maya/tools/mayalookassigner/alembic.py b/openpype/hosts/maya/tools/mayalookassigner/alembic.py new file mode 100644 index 0000000000..6885e923d3 --- /dev/null +++ b/openpype/hosts/maya/tools/mayalookassigner/alembic.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +"""Tools for loading looks to vray proxies.""" +import os +from collections import defaultdict +import logging + +import six + +import alembic.Abc + + +log = logging.getLogger(__name__) + + +def get_alembic_paths_by_property(filename, attr, verbose=False): + # type: (str, str, bool) -> dict + """Return attribute value per objects in the Alembic file. + + Reads an Alembic archive hierarchy and retrieves the + value from the `attr` properties on the objects. + + Args: + filename (str): Full path to Alembic archive to read. + attr (str): Id attribute. + verbose (bool): Whether to verbosely log missing attributes. + + Returns: + dict: Mapping of node full path with its id + + """ + # Normalize alembic path + filename = os.path.normpath(filename) + filename = filename.replace("\\", "/") + filename = str(filename) # path must be string + + try: + archive = alembic.Abc.IArchive(filename) + except RuntimeError: + # invalid alembic file - probably vrmesh + log.warning("{} is not an alembic file".format(filename)) + return {} + root = archive.getTop() + + iterator = list(root.children) + obj_ids = {} + + for obj in iterator: + name = obj.getFullName() + + # include children for coming iterations + iterator.extend(obj.children) + + props = obj.getProperties() + if props.getNumProperties() == 0: + # Skip those without properties, e.g. '/materials' in a gpuCache + continue + + # THe custom attribute is under the properties' first container under + # the ".arbGeomParams" + prop = props.getProperty(0) # get base property + + _property = None + try: + geo_params = prop.getProperty('.arbGeomParams') + _property = geo_params.getProperty(attr) + except KeyError: + if verbose: + log.debug("Missing attr on: {0}".format(name)) + continue + + if not _property.isConstant(): + log.warning("Id not constant on: {0}".format(name)) + + # Get first value sample + value = _property.getValue()[0] + + obj_ids[name] = value + + return obj_ids + + +def get_alembic_ids_cache(path): + # type: (str) -> dict + """Build a id to node mapping in Alembic file. + + Nodes without IDs are ignored. + + Returns: + dict: Mapping of id to nodes in the Alembic. + + """ + node_ids = get_alembic_paths_by_property(path, attr="cbId") + id_nodes = defaultdict(list) + for node, _id in six.iteritems(node_ids): + id_nodes[_id].append(node) + + return dict(six.iteritems(id_nodes)) diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/hosts/maya/tools/mayalookassigner/app.py similarity index 88% rename from openpype/tools/mayalookassigner/app.py rename to openpype/hosts/maya/tools/mayalookassigner/app.py index 5665acea42..13da999c2d 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/hosts/maya/tools/mayalookassigner/app.py @@ -2,7 +2,7 @@ import sys import time import logging -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore from openpype.client import get_last_version_by_subset_id from openpype import style @@ -24,6 +24,7 @@ from .commands import ( remove_unused_looks ) from .vray_proxies import vrayproxy_assign_look +from . import arnold_standin module = sys.modules[__name__] module.window = None @@ -43,7 +44,7 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): filename = get_workfile() self.setObjectName("lookManager") - self.setWindowTitle("Look Manager 1.3.0 - [{}]".format(filename)) + self.setWindowTitle("Look Manager 1.4.0 - [{}]".format(filename)) self.setWindowFlags(QtCore.Qt.Window) self.setParent(parent) @@ -240,18 +241,38 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): )) nodes = item["nodes"] + # Assign Vray Proxy look. if cmds.pluginInfo('vrayformaya', query=True, loaded=True): self.echo("Getting vray proxy nodes ...") vray_proxies = set(cmds.ls(type="VRayProxy", long=True)) - if vray_proxies: - for vp in vray_proxies: - if vp in nodes: - vrayproxy_assign_look(vp, subset_name) + for vp in vray_proxies: + if vp in nodes: + vrayproxy_assign_look(vp, subset_name) - nodes = list(set(item["nodes"]).difference(vray_proxies)) + nodes = list(set(nodes).difference(vray_proxies)) + else: + self.echo( + "Could not assign to VRayProxy because vrayformaya plugin " + "is not loaded." + ) - # Assign look + # Assign Arnold Standin look. + if cmds.pluginInfo("mtoa", query=True, loaded=True): + arnold_standins = set(cmds.ls(type="aiStandIn", long=True)) + + for standin in arnold_standins: + if standin in nodes: + arnold_standin.assign_look(standin, subset_name) + + nodes = list(set(nodes).difference(arnold_standins)) + else: + self.echo( + "Could not assign to aiStandIn because mtoa plugin is not " + "loaded." + ) + + # Assign look if nodes: assign_look_by_version(nodes, version_id=version["_id"]) diff --git a/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py b/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py new file mode 100644 index 0000000000..0ce2b21dcd --- /dev/null +++ b/openpype/hosts/maya/tools/mayalookassigner/arnold_standin.py @@ -0,0 +1,253 @@ +import os +import json +from collections import defaultdict +import logging + +from maya import cmds + +from openpype.pipeline import legacy_io +from openpype.client import get_last_version_by_subset_name +from openpype.hosts.maya import api +from . import lib +from .alembic import get_alembic_ids_cache + + +log = logging.getLogger(__name__) + + +ATTRIBUTE_MAPPING = { + "primaryVisibility": "visibility", # Camera + "castsShadows": "visibility", # Shadow + "receiveShadows": "receive_shadows", + "aiSelfShadows": "self_shadows", + "aiOpaque": "opaque", + "aiMatte": "matte", + "aiVisibleInDiffuseTransmission": "visibility", + "aiVisibleInSpecularTransmission": "visibility", + "aiVisibleInVolume": "visibility", + "aiVisibleInDiffuseReflection": "visibility", + "aiVisibleInSpecularReflection": "visibility", + "aiSubdivUvSmoothing": "subdiv_uv_smoothing", + "aiDispHeight": "disp_height", + "aiDispPadding": "disp_padding", + "aiDispZeroValue": "disp_zero_value", + "aiStepSize": "step_size", + "aiVolumePadding": "volume_padding", + "aiSubdivType": "subdiv_type", + "aiSubdivIterations": "subdiv_iterations" +} + + +def calculate_visibility_mask(attributes): + # https://arnoldsupport.com/2018/11/21/backdoor-setting-visibility/ + mapping = { + "primaryVisibility": 1, # Camera + "castsShadows": 2, # Shadow + "aiVisibleInDiffuseTransmission": 4, + "aiVisibleInSpecularTransmission": 8, + "aiVisibleInVolume": 16, + "aiVisibleInDiffuseReflection": 32, + "aiVisibleInSpecularReflection": 64 + } + mask = 255 + for attr, value in mapping.items(): + if attributes.get(attr, True): + continue + + mask -= value + + return mask + + +def get_nodes_by_id(standin): + """Get node id from aiStandIn via json sidecar. + + Args: + standin (string): aiStandIn node. + + Returns: + (dict): Dictionary with node full name/path and id. + """ + path = cmds.getAttr(standin + ".dso") + + if path.endswith(".abc"): + # Support alembic files directly + return get_alembic_ids_cache(path) + + json_path = None + for f in os.listdir(os.path.dirname(path)): + if f.endswith(".json"): + json_path = os.path.join(os.path.dirname(path), f) + break + + if not json_path: + log.warning("Could not find json file for {}.".format(standin)) + return {} + + with open(json_path, "r") as f: + return json.load(f) + + +def shading_engine_assignments(shading_engine, attribute, nodes, assignments): + """Full assignments with shader or disp_map. + + Args: + shading_engine (string): Shading engine for material. + attribute (string): "surfaceShader" or "displacementShader" + nodes: (list): Nodes paths relative to aiStandIn. + assignments (dict): Assignments by nodes. + """ + shader_inputs = cmds.listConnections( + shading_engine + "." + attribute, source=True + ) + if not shader_inputs: + log.info( + "Shading engine \"{}\" missing input \"{}\"".format( + shading_engine, attribute + ) + ) + return + + # Strip off component assignments + for i, node in enumerate(nodes): + if "." in node: + log.warning( + "Converting face assignment to full object assignment. This " + "conversion can be lossy: {}".format(node) + ) + nodes[i] = node.split(".")[0] + + shader_type = "shader" if attribute == "surfaceShader" else "disp_map" + assignment = "{}='{}'".format(shader_type, shader_inputs[0]) + for node in nodes: + assignments[node].append(assignment) + + +def assign_look(standin, subset): + log.info("Assigning {} to {}.".format(subset, standin)) + + nodes_by_id = get_nodes_by_id(standin) + + # Group by asset id so we run over the look per asset + node_ids_by_asset_id = defaultdict(set) + for node_id in nodes_by_id: + asset_id = node_id.split(":", 1)[0] + node_ids_by_asset_id[asset_id].add(node_id) + + project_name = legacy_io.active_project() + for asset_id, node_ids in node_ids_by_asset_id.items(): + + # Get latest look version + version = get_last_version_by_subset_name( + project_name, + subset_name=subset, + asset_id=asset_id, + fields=["_id"] + ) + if not version: + log.info("Didn't find last version for subset name {}".format( + subset + )) + continue + + relationships = lib.get_look_relationships(version["_id"]) + shader_nodes, container_node = lib.load_look(version["_id"]) + namespace = shader_nodes[0].split(":")[0] + + # Get only the node ids and paths related to this asset + # And get the shader edits the look supplies + asset_nodes_by_id = { + node_id: nodes_by_id[node_id] for node_id in node_ids + } + edits = list( + api.lib.iter_shader_edits( + relationships, shader_nodes, asset_nodes_by_id + ) + ) + + # Create assignments + node_assignments = {} + for edit in edits: + for node in edit["nodes"]: + if node not in node_assignments: + node_assignments[node] = [] + + if edit["action"] == "assign": + if not cmds.ls(edit["shader"], type="shadingEngine"): + log.info("Skipping non-shader: %s" % edit["shader"]) + continue + + shading_engine_assignments( + shading_engine=edit["shader"], + attribute="surfaceShader", + nodes=edit["nodes"], + assignments=node_assignments + ) + shading_engine_assignments( + shading_engine=edit["shader"], + attribute="displacementShader", + nodes=edit["nodes"], + assignments=node_assignments + ) + + if edit["action"] == "setattr": + visibility = False + for attr, value in edit["attributes"].items(): + if attr not in ATTRIBUTE_MAPPING: + log.warning( + "Skipping setting attribute {} on {} because it is" + " not recognized.".format(attr, edit["nodes"]) + ) + continue + + if isinstance(value, str): + value = "'{}'".format(value) + + if ATTRIBUTE_MAPPING[attr] == "visibility": + visibility = True + continue + + assignment = "{}={}".format(ATTRIBUTE_MAPPING[attr], value) + + for node in edit["nodes"]: + node_assignments[node].append(assignment) + + if visibility: + mask = calculate_visibility_mask(edit["attributes"]) + assignment = "visibility={}".format(mask) + + for node in edit["nodes"]: + node_assignments[node].append(assignment) + + # Assign shader + # Clear all current shader assignments + plug = standin + ".operators" + num = cmds.getAttr(plug, size=True) + for i in reversed(range(num)): + cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True) + + # Create new assignment overrides + index = 0 + for node, assignments in node_assignments.items(): + if not assignments: + continue + + with api.lib.maintained_selection(): + operator = cmds.createNode("aiSetParameter") + operator = cmds.rename(operator, namespace + ":" + operator) + + cmds.setAttr(operator + ".selection", node, type="string") + for i, assignment in enumerate(assignments): + cmds.setAttr( + "{}.assignment[{}]".format(operator, i), + assignment, + type="string" + ) + + cmds.connectAttr( + operator + ".out", "{}[{}]".format(plug, index) + ) + + index += 1 + + cmds.sets(operator, edit=True, addElement=container_node) diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/hosts/maya/tools/mayalookassigner/commands.py similarity index 79% rename from openpype/tools/mayalookassigner/commands.py rename to openpype/hosts/maya/tools/mayalookassigner/commands.py index 2e7a51efde..c5e6c973cf 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/hosts/maya/tools/mayalookassigner/commands.py @@ -13,6 +13,7 @@ from openpype.pipeline import ( from openpype.hosts.maya.api import lib from .vray_proxies import get_alembic_ids_cache +from . import arnold_standin log = logging.getLogger(__name__) @@ -44,33 +45,11 @@ def get_namespace_from_node(node): return parts[0] if len(parts) > 1 else u":" -def list_descendents(nodes): - """Include full descendant hierarchy of given nodes. - - This is a workaround to cmds.listRelatives(allDescendents=True) because - this way correctly keeps children instance paths (see Maya documentation) - - This fixes LKD-26: assignments not working as expected on instanced shapes. - - Return: - list: List of children descendents of nodes - - """ - result = [] - while True: - nodes = cmds.listRelatives(nodes, - fullPath=True) - if nodes: - result.extend(nodes) - else: - return result - - def get_selected_nodes(): """Get information from current selection""" selection = cmds.ls(selection=True, long=True) - hierarchy = list_descendents(selection) + hierarchy = lib.get_all_children(selection) return list(set(selection + hierarchy)) @@ -80,21 +59,7 @@ def get_all_asset_nodes(): Returns: list: list of dictionaries """ - - host = registered_host() - - nodes = [] - for container in host.ls(): - # We are not interested in looks but assets! - if container["loader"] == "LookLoader": - continue - - # Gather all information - container_name = container["objectName"] - nodes += lib.get_container_members(container_name) - - nodes = list(set(nodes)) - return nodes + return cmds.ls(dag=True, noIntermediate=True, long=True) def create_asset_id_hash(nodes): @@ -119,10 +84,12 @@ def create_asset_id_hash(nodes): path = cmds.getAttr("{}.fileName".format(node)) ids = get_alembic_ids_cache(path) for k, _ in ids.items(): - pid = k.split(":")[0] - if node not in node_id_hash[pid]: - node_id_hash[pid].append(node) - + id = k.split(":")[0] + node_id_hash[id].append(node) + elif cmds.nodeType(node) == "aiStandIn": + for id, _ in arnold_standin.get_nodes_by_id(node).items(): + id = id.split(":")[0] + node_id_hash[id].append(node) else: value = lib.get_id(node) if value is None: diff --git a/openpype/hosts/maya/tools/mayalookassigner/lib.py b/openpype/hosts/maya/tools/mayalookassigner/lib.py new file mode 100644 index 0000000000..fddaf6112d --- /dev/null +++ b/openpype/hosts/maya/tools/mayalookassigner/lib.py @@ -0,0 +1,87 @@ +import json +import logging + +from openpype.pipeline import ( + legacy_io, + get_representation_path, + registered_host, + discover_loader_plugins, + loaders_from_representation, + load_container +) +from openpype.client import get_representation_by_name +from openpype.hosts.maya.api import lib + + +log = logging.getLogger(__name__) + + +def get_look_relationships(version_id): + # type: (str) -> dict + """Get relations for the look. + + Args: + version_id (str): Parent version Id. + + Returns: + dict: Dictionary of relations. + """ + + project_name = legacy_io.active_project() + json_representation = get_representation_by_name( + project_name, representation_name="json", version_id=version_id + ) + + # Load relationships + shader_relation = get_representation_path(json_representation) + with open(shader_relation, "r") as f: + relationships = json.load(f) + + return relationships + + +def load_look(version_id): + # type: (str) -> list + """Load look from version. + + Get look from version and invoke Loader for it. + + Args: + version_id (str): Version ID + + Returns: + list of shader nodes. + + """ + + project_name = legacy_io.active_project() + # Get representations of shader file and relationships + look_representation = get_representation_by_name( + project_name, representation_name="ma", version_id=version_id + ) + + # See if representation is already loaded, if so reuse it. + host = registered_host() + representation_id = str(look_representation['_id']) + for container in host.ls(): + if (container['loader'] == "LookLoader" and + container['representation'] == representation_id): + log.info("Reusing loaded look ...") + container_node = container['objectName'] + break + else: + log.info("Using look for the first time ...") + + # Load file + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation(all_loaders, representation_id) + loader = next( + (i for i in loaders if i.__name__ == "LookLoader"), None) + if loader is None: + raise RuntimeError("Could not find LookLoader, this is a bug") + + # Reference the look file + with lib.maintained_selection(): + container_node = load_container(loader, look_representation)[0] + + return lib.get_container_members(container_node), container_node diff --git a/openpype/tools/mayalookassigner/models.py b/openpype/hosts/maya/tools/mayalookassigner/models.py similarity index 99% rename from openpype/tools/mayalookassigner/models.py rename to openpype/hosts/maya/tools/mayalookassigner/models.py index 77a3c8a590..ed6a68bee0 100644 --- a/openpype/tools/mayalookassigner/models.py +++ b/openpype/hosts/maya/tools/mayalookassigner/models.py @@ -1,6 +1,6 @@ from collections import defaultdict -from Qt import QtCore +from qtpy import QtCore import qtawesome from openpype.tools.utils import models diff --git a/openpype/tools/mayalookassigner/views.py b/openpype/hosts/maya/tools/mayalookassigner/views.py similarity index 92% rename from openpype/tools/mayalookassigner/views.py rename to openpype/hosts/maya/tools/mayalookassigner/views.py index 8e676ebc7f..489c194f60 100644 --- a/openpype/tools/mayalookassigner/views.py +++ b/openpype/hosts/maya/tools/mayalookassigner/views.py @@ -1,4 +1,4 @@ -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore class View(QtWidgets.QTreeView): @@ -10,7 +10,7 @@ class View(QtWidgets.QTreeView): # view settings self.setAlternatingRowColors(False) self.setSortingEnabled(True) - self.setSelectionMode(self.ExtendedSelection) + self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) def get_indices(self): diff --git a/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py b/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py new file mode 100644 index 0000000000..c875fec7f0 --- /dev/null +++ b/openpype/hosts/maya/tools/mayalookassigner/vray_proxies.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +"""Tools for loading looks to vray proxies.""" +from collections import defaultdict +import logging + +from maya import cmds + +from openpype.client import get_last_version_by_subset_name +from openpype.pipeline import legacy_io +import openpype.hosts.maya.lib as maya_lib +from . import lib +from .alembic import get_alembic_ids_cache + + +log = logging.getLogger(__name__) + + +def assign_vrayproxy_shaders(vrayproxy, assignments): + # type: (str, dict) -> None + """Assign shaders to content of Vray Proxy. + + This will create shader overrides on Vray Proxy to assign shaders to its + content. + + Todo: + Allow to optimize and assign a single shader to multiple shapes at + once or maybe even set it to the highest available path? + + Args: + vrayproxy (str): Name of Vray Proxy + assignments (dict): Mapping of shader assignments. + + Returns: + None + + """ + # Clear all current shader assignments + plug = vrayproxy + ".shaders" + num = cmds.getAttr(plug, size=True) + for i in reversed(range(num)): + cmds.removeMultiInstance("{}[{}]".format(plug, i), b=True) + + # Create new assignment overrides + index = 0 + for material, paths in assignments.items(): + for path in paths: + plug = "{}.shaders[{}]".format(vrayproxy, index) + cmds.setAttr(plug + ".shadersNames", path, type="string") + cmds.connectAttr(material + ".outColor", + plug + ".shadersConnections", force=True) + index += 1 + + +def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): + # type: (str, str) -> None + """Assign look to vray proxy. + + Args: + vrayproxy (str): Name of vrayproxy to apply look to. + subset (str): Name of look subset. + + Returns: + None + + """ + path = cmds.getAttr(vrayproxy + ".fileName") + + nodes_by_id = get_alembic_ids_cache(path) + if not nodes_by_id: + log.warning("Alembic file has no cbId attributes: %s" % path) + return + + # Group by asset id so we run over the look per asset + node_ids_by_asset_id = defaultdict(set) + for node_id in nodes_by_id: + asset_id = node_id.split(":", 1)[0] + node_ids_by_asset_id[asset_id].add(node_id) + + project_name = legacy_io.active_project() + for asset_id, node_ids in node_ids_by_asset_id.items(): + + # Get latest look version + version = get_last_version_by_subset_name( + project_name, + subset_name=subset, + asset_id=asset_id, + fields=["_id"] + ) + if not version: + print("Didn't find last version for subset name {}".format( + subset + )) + continue + + relationships = lib.get_look_relationships(version["_id"]) + shadernodes, _ = lib.load_look(version["_id"]) + + # Get only the node ids and paths related to this asset + # And get the shader edits the look supplies + asset_nodes_by_id = { + node_id: nodes_by_id[node_id] for node_id in node_ids + } + edits = list( + maya_lib.iter_shader_edits( + relationships, shadernodes, asset_nodes_by_id + ) + ) + + # Create assignments + assignments = {} + for edit in edits: + if edit["action"] == "assign": + nodes = edit["nodes"] + shader = edit["shader"] + if not cmds.ls(shader, type="shadingEngine"): + print("Skipping non-shader: %s" % shader) + continue + + inputs = cmds.listConnections( + shader + ".surfaceShader", source=True) + if not inputs: + print("Shading engine missing material: %s" % shader) + + # Strip off component assignments + for i, node in enumerate(nodes): + if "." in node: + log.warning( + ("Converting face assignment to full object " + "assignment. This conversion can be lossy: " + "{}").format(node)) + nodes[i] = node.split(".")[0] + + material = inputs[0] + assignments[material] = nodes + + assign_vrayproxy_shaders(vrayproxy, assignments) diff --git a/openpype/tools/mayalookassigner/widgets.py b/openpype/hosts/maya/tools/mayalookassigner/widgets.py similarity index 99% rename from openpype/tools/mayalookassigner/widgets.py rename to openpype/hosts/maya/tools/mayalookassigner/widgets.py index 10e573342a..f2df17e68c 100644 --- a/openpype/tools/mayalookassigner/widgets.py +++ b/openpype/hosts/maya/tools/mayalookassigner/widgets.py @@ -1,7 +1,7 @@ import logging from collections import defaultdict -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore from openpype.tools.utils.models import TreeModel from openpype.tools.utils.lib import ( diff --git a/openpype/hosts/nuke/addon.py b/openpype/hosts/nuke/addon.py index 9d25afe2b6..6a4b91a76d 100644 --- a/openpype/hosts/nuke/addon.py +++ b/openpype/hosts/nuke/addon.py @@ -63,5 +63,12 @@ class NukeAddon(OpenPypeModule, IHostAddon): path_paths.append(quick_time_path) env["PATH"] = os.pathsep.join(path_paths) + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(NUKE_ROOT_DIR, "hooks") + ] + def get_workfile_extensions(self): return [".nk"] diff --git a/openpype/hosts/nuke/api/__init__.py b/openpype/hosts/nuke/api/__init__.py index c65058874b..1af5ff365d 100644 --- a/openpype/hosts/nuke/api/__init__.py +++ b/openpype/hosts/nuke/api/__init__.py @@ -6,32 +6,45 @@ from .workio import ( current_file, work_root, ) - from .command import ( viewer_update_and_undo_stop ) - -from .plugin import OpenPypeCreator +from .plugin import ( + NukeCreator, + NukeWriteCreator, + NukeCreatorError, + OpenPypeCreator, + get_instance_group_node_childs, + get_colorspace_from_node +) from .pipeline import ( - install, - uninstall, + NukeHost, ls, + list_instances, + remove_instance, + select_instance, + containerise, parse_container, update_container, - get_workfile_build_placeholder_plugins, ) from .lib import ( + INSTANCE_DATA_KNOB, + ROOT_DATA_KNOB, maintained_selection, reset_selection, + select_nodes, get_view_process_node, duplicate_node, - convert_knob_value_to_correct_type + convert_knob_value_to_correct_type, + get_node_data, + set_node_data, + update_node_data, + create_write_node ) - from .utils import ( colorspace_exists_on_node, get_colorspace_list @@ -47,23 +60,36 @@ __all__ = ( "viewer_update_and_undo_stop", + "NukeCreator", + "NukeWriteCreator", + "NukeCreatorError", "OpenPypeCreator", - "install", - "uninstall", + "NukeHost", + "get_instance_group_node_childs", + "get_colorspace_from_node", "ls", + "list_instances", + "remove_instance", + "select_instance", + "containerise", "parse_container", "update_container", - "get_workfile_build_placeholder_plugins", - + "INSTANCE_DATA_KNOB", + "ROOT_DATA_KNOB", "maintained_selection", "reset_selection", + "select_nodes", "get_view_process_node", "duplicate_node", "convert_knob_value_to_correct_type", + "get_node_data", + "set_node_data", + "update_node_data", + "create_write_node", "colorspace_exists_on_node", "get_colorspace_list" diff --git a/openpype/hosts/nuke/api/constants.py b/openpype/hosts/nuke/api/constants.py new file mode 100644 index 0000000000..110199720f --- /dev/null +++ b/openpype/hosts/nuke/api/constants.py @@ -0,0 +1,4 @@ +import os + + +ASSIST = bool(os.getenv("NUKEASSIST")) diff --git a/openpype/hosts/nuke/api/gizmo_menu.py b/openpype/hosts/nuke/api/gizmo_menu.py index 9edfc62e3b..5838ee8a8a 100644 --- a/openpype/hosts/nuke/api/gizmo_menu.py +++ b/openpype/hosts/nuke/api/gizmo_menu.py @@ -53,12 +53,18 @@ class GizmoMenu(): item_type = item.get("sourcetype") - if item_type == ("python" or "file"): + if item_type == "python": parent.addCommand( item["title"], command=str(item["command"]), icon=item.get("icon"), - shortcut=item.get("hotkey") + shortcut=item.get("shortcut") + ) + elif item_type == "file": + parent.addCommand( + item['title'], + "nuke.createNode('{}')".format(item.get('file_name')), + shortcut=item.get('shortcut') ) # add separator diff --git a/openpype/hosts/nuke/api/lib.py b/openpype/hosts/nuke/api/lib.py index 2fdf446357..777f4454dc 100644 --- a/openpype/hosts/nuke/api/lib.py +++ b/openpype/hosts/nuke/api/lib.py @@ -1,14 +1,15 @@ import os from pprint import pformat import re +import json import six +import functools +import warnings import platform import tempfile import contextlib from collections import OrderedDict -import clique - import nuke from qtpy import QtCore, QtWidgets @@ -22,6 +23,9 @@ from openpype.client import ( from openpype.host import HostDirmap from openpype.tools.utils import host_tools +from openpype.pipeline.workfile.workfile_template_builder import ( + TemplateProfileNotFound +) from openpype.lib import ( env_value_to_bool, Logger, @@ -30,12 +34,12 @@ from openpype.lib import ( from openpype.settings import ( get_project_settings, - get_anatomy_settings, get_current_project_settings, ) from openpype.modules import ModulesManager from openpype.pipeline.template_data import get_template_data_with_names from openpype.pipeline import ( + get_current_project_name, discover_legacy_creator_plugins, legacy_io, Anatomy, @@ -44,9 +48,12 @@ from openpype.pipeline.context_tools import ( get_current_project_asset, get_custom_workfile_template_from_session ) +from openpype.pipeline.colorspace import ( + get_imageio_config +) from openpype.pipeline.workfile import BuildWorkfile - from . import gizmo_menu +from .constants import ASSIST from .workio import ( save_file, @@ -64,6 +71,54 @@ EXCLUDED_KNOB_TYPE_ON_READ = ( 26, # Text Knob (But for backward compatibility, still be read # if value is not an empty string.) ) +JSON_PREFIX = "JSON:::" +ROOT_DATA_KNOB = "publish_context" +INSTANCE_DATA_KNOB = "publish_instance" + + +class DeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) class Context: @@ -94,8 +149,78 @@ def get_main_window(): return Context.main_window +def set_node_data(node, knobname, data): + """Write data to node invisible knob + + Will create new in case it doesn't exists + or update the one already created. + + Args: + node (nuke.Node): node object + knobname (str): knob name + data (dict): data to be stored in knob + """ + # if exists then update data + if knobname in node.knobs(): + log.debug("Updating knobname `{}` on node `{}`".format( + knobname, node.name() + )) + update_node_data(node, knobname, data) + return + + log.debug("Creating knobname `{}` on node `{}`".format( + knobname, node.name() + )) + # else create new + knob_value = JSON_PREFIX + json.dumps(data) + knob = nuke.String_Knob(knobname) + knob.setValue(knob_value) + knob.setFlag(nuke.INVISIBLE) + node.addKnob(knob) + + +def get_node_data(node, knobname): + """Read data from node. + + Args: + node (nuke.Node): node object + knobname (str): knob name + + Returns: + dict: data stored in knob + """ + if knobname not in node.knobs(): + return + + rawdata = node[knobname].getValue() + if ( + isinstance(rawdata, six.string_types) + and rawdata.startswith(JSON_PREFIX) + ): + try: + return json.loads(rawdata[len(JSON_PREFIX):]) + except json.JSONDecodeError: + return + + +def update_node_data(node, knobname, data): + """Update already present data. + + Args: + node (nuke.Node): node object + knobname (str): knob name + data (dict): data to update knob value + """ + knob = node[knobname] + node_data = get_node_data(node, knobname) or {} + node_data.update(data) + knob_value = JSON_PREFIX + json.dumps(node_data) + knob.setValue(knob_value) + + class Knobby(object): - """For creating knob which it's type isn't mapped in `create_knobs` + """[DEPRECATED] For creating knob which it's type isn't + mapped in `create_knobs` Args: type (string): Nuke knob type name @@ -120,6 +245,12 @@ class Knobby(object): knob.setFlag(flag) return knob + @staticmethod + def nice_naming(key): + """Convert camelCase name into UI Display Name""" + words = re.findall('[A-Z][^A-Z]*', key[0].upper() + key[1:]) + return " ".join(words) + def create_knobs(data, tab=None): """Create knobs by data @@ -271,8 +402,9 @@ def imprint(node, data, tab=None): node.addKnob(knob) +@deprecated def add_publish_knob(node): - """Add Publish knob to node + """[DEPRECATED] Add Publish knob to node Arguments: node (nuke.Node): nuke node to be processed @@ -289,8 +421,9 @@ def add_publish_knob(node): return node +@deprecated def set_avalon_knob_data(node, data=None, prefix="avalon:"): - """ Sets data into nodes's avalon knob + """[DEPRECATED] Sets data into nodes's avalon knob Arguments: node (nuke.Node): Nuke node to imprint with data, @@ -351,8 +484,9 @@ def set_avalon_knob_data(node, data=None, prefix="avalon:"): return node -def get_avalon_knob_data(node, prefix="avalon:"): - """ Gets a data from nodes's avalon knob +@deprecated +def get_avalon_knob_data(node, prefix="avalon:", create=True): + """[DEPRECATED] Gets a data from nodes's avalon knob Arguments: node (obj): Nuke node to search for data, @@ -362,26 +496,28 @@ def get_avalon_knob_data(node, prefix="avalon:"): data (dict) """ + data = {} + if AVALON_TAB not in node.knobs(): + return data + # check if lists if not isinstance(prefix, list): - prefix = list([prefix]) - - data = dict() + prefix = [prefix] # loop prefix for p in prefix: # check if the node is avalon tracked - if AVALON_TAB not in node.knobs(): - continue try: # check if data available on the node test = node[AVALON_DATA_GROUP].value() - log.debug("Only testing if data avalable: `{}`".format(test)) + log.debug("Only testing if data available: `{}`".format(test)) except NameError as e: # if it doesn't then create it log.debug("Creating avalon knob: `{}`".format(e)) - node = set_avalon_knob_data(node) - return get_avalon_knob_data(node) + if create: + node = set_avalon_knob_data(node) + return get_avalon_knob_data(node) + return {} # get data from filtered knobs data.update({k.replace(p, ''): node[k].value() @@ -391,8 +527,9 @@ def get_avalon_knob_data(node, prefix="avalon:"): return data +@deprecated def fix_data_for_node_create(data): - """Fixing data to be used for nuke knobs + """[DEPRECATED] Fixing data to be used for nuke knobs """ for k, v in data.items(): if isinstance(v, six.text_type): @@ -402,8 +539,9 @@ def fix_data_for_node_create(data): return data +@deprecated def add_write_node_legacy(name, **kwarg): - """Adding nuke write node + """[DEPRECATED] Adding nuke write node Arguments: name (str): nuke node name kwarg (attrs): data for nuke knobs @@ -562,19 +700,12 @@ def get_node_path(path, padding=4): def get_nuke_imageio_settings(): - project_imageio = get_project_settings( - Context.project_name)["nuke"]["imageio"] - - # backward compatibility for project started before 3.10 - # those are still having `__legacy__` knob types - if not project_imageio["enabled"]: - return get_anatomy_settings(Context.project_name)["imageio"]["nuke"] - return get_project_settings(Context.project_name)["nuke"]["imageio"] +@deprecated("openpype.hosts.nuke.api.lib.get_nuke_imageio_settings") def get_created_node_imageio_setting_legacy(nodeclass, creator, subset): - ''' Get preset data for dataflow (fileType, compression, bitDepth) + '''[DEPRECATED] Get preset data for dataflow (fileType, compression, bitDepth) ''' assert any([creator, nodeclass]), nuke.message( @@ -764,15 +895,33 @@ def get_imageio_input_colorspace(filename): def get_view_process_node(): reset_selection() - ipn_orig = None - for v in nuke.allNodes(filter="Viewer"): - ipn = v['input_process_node'].getValue() - if "VIEWER_INPUT" not in ipn: - ipn_orig = nuke.toNode(ipn) - ipn_orig.setSelected(True) + ipn_node = None + for v_ in nuke.allNodes(filter="Viewer"): + ipn = v_['input_process_node'].getValue() + ipn_node = nuke.toNode(ipn) - if ipn_orig: - return duplicate_node(ipn_orig) + # skip if no input node is set + if not ipn: + continue + + if ipn == "VIEWER_INPUT" and not ipn_node: + # since it is set by default we can ignore it + # nobody usually use this but use it if + # it exists in nodes + continue + + if not ipn_node: + # in case a Viewer node is transferred from + # different workfile with old values + raise NameError(( + "Input process node name '{}' set in " + "Viewer '{}' is doesn't exists in nodes" + ).format(ipn, v_.name())) + + ipn_node.setSelected(True) + + if ipn_node: + return duplicate_node(ipn_node) def on_script_load(): @@ -971,27 +1120,14 @@ def format_anatomy(data): Return: path (str) ''' - # TODO: perhaps should be nonPublic - anatomy = Anatomy() log.debug("__ anatomy.templates: {}".format(anatomy.templates)) - try: - # TODO: bck compatibility with old anatomy template - padding = int( - anatomy.templates["render"].get( - "frame_padding", - anatomy.templates["render"].get("padding") - ) + padding = int( + anatomy.templates["render"].get( + "frame_padding" ) - except KeyError as e: - msg = ("`padding` key is not in `render` " - "or `frame_padding` on is not available in " - "Anatomy template. Please, add it there and restart " - "the pipeline (padding: \"4\"): `{}`").format(e) - - log.error(msg) - nuke.message(msg) + ) version = data.get("version", None) if not version: @@ -999,16 +1135,16 @@ def format_anatomy(data): data["version"] = get_version_from_path(file) project_name = anatomy.project_name - asset_name = data["avalon"]["asset"] - task_name = os.environ["AVALON_TASK"] + asset_name = data["asset"] + task_name = data["task"] host_name = os.environ["AVALON_APP"] context_data = get_template_data_with_names( project_name, asset_name, task_name, host_name ) data.update(context_data) data.update({ - "subset": data["avalon"]["subset"], - "family": data["avalon"]["family"], + "subset": data["subset"], + "family": data["family"], "frame": "#" * padding, }) return anatomy.format(data) @@ -1100,8 +1236,6 @@ def create_write_node( data, input=None, prenodes=None, - review=True, - farm=True, linked_knobs=None, **kwargs ): @@ -1115,7 +1249,7 @@ def create_write_node( nodes to be created before write with dependency review (bool)[optional]: adding review knob farm (bool)[optional]: rendering workflow target - kwargs (dict)[optional]: additional key arguments for formating + kwargs (dict)[optional]: additional key arguments for formatting Example: prenodes = { @@ -1143,35 +1277,26 @@ def create_write_node( ''' prenodes = prenodes or {} - # group node knob overrides - knob_overrides = data.pop("knobs", []) - # filtering variables plugin_name = data["creator"] subset = data["subset"] # get knob settings for write node imageio_writes = get_imageio_node_setting( - node_class=data["nodeclass"], + node_class="Write", plugin_name=plugin_name, subset=subset ) for knob in imageio_writes["knobs"]: if knob["name"] == "file_type": - representation = knob["value"] + ext = knob["value"] - try: - data.update({ - "imageio_writes": imageio_writes, - "representation": representation, - }) - anatomy_filled = format_anatomy(data) - - except Exception as e: - msg = "problem with resolving anatomy template: {}".format(e) - log.error(msg) - nuke.message(msg) + data.update({ + "imageio_writes": imageio_writes, + "ext": ext + }) + anatomy_filled = format_anatomy(data) # build file path to workfiles fdir = str(anatomy_filled["work"]["folder"]).replace("\\", "/") @@ -1180,7 +1305,7 @@ def create_write_node( version=data["version"], subset=data["subset"], frame=data["frame"], - ext=representation + ext=ext ) # create directory @@ -1234,14 +1359,6 @@ def create_write_node( # connect to previous node now_node.setInput(0, prev_node) - # imprinting group node - set_avalon_knob_data(GN, data["avalon"]) - add_publish_knob(GN) - add_rendering_knobs(GN, farm) - - if review: - add_review_knob(GN) - # add divider GN.addKnob(nuke.Text_Knob('', 'Rendering')) @@ -1287,12 +1404,6 @@ def create_write_node( # adding write to read button add_button_clear_rendered(GN, os.path.dirname(fpath)) - # Deadline tab. - add_deadline_tab(GN) - - # open the our Tab as default - GN[_NODE_TAB_NAME].setFlag(0) - # set tile color tile_color = next( iter( @@ -1303,12 +1414,10 @@ def create_write_node( GN["tile_color"].setValue( color_gui_to_int(tile_color)) - # finally add knob overrides - set_node_knobs_from_settings(GN, knob_overrides, **kwargs) - return GN +@deprecated("openpype.hosts.nuke.api.lib.create_write_node") def create_write_node_legacy( name, data, input=None, prenodes=None, review=True, linked_knobs=None, farm=True @@ -1554,7 +1663,7 @@ def create_write_node_legacy( tile_color = _data.get("tile_color", "0xff0000ff") GN["tile_color"].setValue(tile_color) - # overrie knob values from settings + # override knob values from settings for knob in knob_overrides: knob_type = knob["type"] knob_name = knob["name"] @@ -1599,6 +1708,13 @@ def set_node_knobs_from_settings(node, knob_settings, **kwargs): if knob_name not in node.knobs(): continue + if knob_type == "expression": + knob_expression = knob["expression"] + node[knob_name].setExpression( + knob_expression + ) + continue + # first deal with formatable knob settings if knob_type == "formatable": template = knob["template"] @@ -1607,7 +1723,6 @@ def set_node_knobs_from_settings(node, knob_settings, **kwargs): _knob_value = template.format( **kwargs ) - log.debug("__ knob_value0: {}".format(_knob_value)) except KeyError as msg: log.warning("__ msg: {}".format(msg)) raise KeyError(msg) @@ -1661,6 +1776,7 @@ def color_gui_to_int(color_gui): return int(hex_value, 16) +@deprecated def add_rendering_knobs(node, farm=True): ''' Adds additional rendering knobs to given node @@ -1681,6 +1797,7 @@ def add_rendering_knobs(node, farm=True): return node +@deprecated def add_review_knob(node): ''' Adds additional review knob to given node @@ -1697,7 +1814,9 @@ def add_review_knob(node): return node +@deprecated def add_deadline_tab(node): + # TODO: remove this as it is only linked to legacy create node.addKnob(nuke.Tab_Knob("Deadline")) knob = nuke.Int_Knob("deadlinePriority", "Priority") @@ -1723,7 +1842,10 @@ def add_deadline_tab(node): node.addKnob(knob) +@deprecated def get_deadline_knob_names(): + # TODO: remove this as it is only linked to legacy + # validate_write_deadline_tab return [ "Deadline", "deadlineChunkSize", @@ -1880,67 +2002,72 @@ class WorkfileSettings(object): "Attention! Viewer nodes {} were erased." "It had wrong color profile".format(erased_viewers)) - def set_root_colorspace(self, root_dict): + def set_root_colorspace(self, imageio_host): ''' Adds correct colorspace to root Arguments: - root_dict (dict): adjustmensts from presets + imageio_host (dict): host colorspace configurations ''' - if not isinstance(root_dict, dict): - msg = "set_root_colorspace(): argument should be dictionary" - log.error(msg) - nuke.message(msg) + config_data = get_imageio_config( + project_name=get_current_project_name(), + host_name="nuke" + ) - log.debug(">> root_dict: {}".format(root_dict)) + workfile_settings = imageio_host["workfile"] - # first set OCIO - if self._root_node["colorManagement"].value() \ - not in str(root_dict["colorManagement"]): - self._root_node["colorManagement"].setValue( - str(root_dict["colorManagement"])) - log.debug("nuke.root()['{0}'] changed to: {1}".format( - "colorManagement", root_dict["colorManagement"])) - root_dict.pop("colorManagement") + if not config_data: + # TODO: backward compatibility for old projects - remove later + # perhaps old project overrides is having it set to older version + # with use of `customOCIOConfigPath` + if workfile_settings.get("customOCIOConfigPath"): + unresolved_path = workfile_settings["customOCIOConfigPath"] + ocio_paths = unresolved_path[platform.system().lower()] - # second set ocio version - if self._root_node["OCIO_config"].value() \ - not in str(root_dict["OCIO_config"]): - self._root_node["OCIO_config"].setValue( - str(root_dict["OCIO_config"])) - log.debug("nuke.root()['{0}'] changed to: {1}".format( - "OCIO_config", root_dict["OCIO_config"])) - root_dict.pop("OCIO_config") - - # third set ocio custom path - if root_dict.get("customOCIOConfigPath"): - unresolved_path = root_dict["customOCIOConfigPath"] - ocio_paths = unresolved_path[platform.system().lower()] - - resolved_path = None - for ocio_p in ocio_paths: - resolved_path = str(ocio_p).format(**os.environ) - if not os.path.exists(resolved_path): - continue + resolved_path = None + for ocio_p in ocio_paths: + resolved_path = str(ocio_p).format(**os.environ) + if not os.path.exists(resolved_path): + continue if resolved_path: + # set values to root + self._root_node["colorManagement"].setValue("OCIO") + self._root_node["OCIO_config"].setValue("custom") self._root_node["customOCIOConfigPath"].setValue( - str(resolved_path).replace("\\", "/") - ) - log.debug("nuke.root()['{}'] changed to: {}".format( - "customOCIOConfigPath", resolved_path)) - root_dict.pop("customOCIOConfigPath") + resolved_path) + else: + # no ocio config found and no custom path used + if self._root_node["colorManagement"].value() \ + not in str(workfile_settings["colorManagement"]): + self._root_node["colorManagement"].setValue( + str(workfile_settings["colorManagement"])) + + # second set ocio version + if self._root_node["OCIO_config"].value() \ + not in str(workfile_settings["OCIO_config"]): + self._root_node["OCIO_config"].setValue( + str(workfile_settings["OCIO_config"])) + + else: + # set values to root + self._root_node["colorManagement"].setValue("OCIO") + + # we dont need the key anymore + workfile_settings.pop("customOCIOConfigPath") + workfile_settings.pop("colorManagement") + workfile_settings.pop("OCIO_config") # then set the rest - for knob, value in root_dict.items(): + for knob, value_ in workfile_settings.items(): # skip unfilled ocio config path # it will be dict in value - if isinstance(value, dict): + if isinstance(value_, dict): continue - if self._root_node[knob].value() not in value: - self._root_node[knob].setValue(str(value)) + if self._root_node[knob].value() not in value_: + self._root_node[knob].setValue(str(value_)) log.debug("nuke.root()['{}'] changed to: {}".format( - knob, value)) + knob, value_)) def set_writes_colorspace(self): ''' Adds correct colorspace to write node dict @@ -2000,7 +2127,7 @@ class WorkfileSettings(object): write_node[knob["name"]].setValue(value) except TypeError: log.warning( - "Legacy workflow didnt work, switching to current") + "Legacy workflow didn't work, switching to current") set_node_knobs_from_settings( write_node, nuke_imageio_writes["knobs"]) @@ -2062,7 +2189,7 @@ class WorkfileSettings(object): log.info("Setting colorspace to workfile...") try: - self.set_root_colorspace(nuke_colorspace["workfile"]) + self.set_root_colorspace(nuke_colorspace) except AttributeError: msg = "set_colorspace(): missing `workfile` settings in template" nuke.message(msg) @@ -2120,13 +2247,13 @@ class WorkfileSettings(object): handle_end = data["handleEnd"] fps = float(data["fps"]) - frame_start = int(data["frameStart"]) - handle_start - frame_end = int(data["frameEnd"]) + handle_end + frame_start_handle = int(data["frameStart"]) - handle_start + frame_end_handle = int(data["frameEnd"]) + handle_end self._root_node["lock_range"].setValue(False) self._root_node["fps"].setValue(fps) - self._root_node["first_frame"].setValue(frame_start) - self._root_node["last_frame"].setValue(frame_end) + self._root_node["first_frame"].setValue(frame_start_handle) + self._root_node["last_frame"].setValue(frame_end_handle) self._root_node["lock_range"].setValue(True) # setting active viewers @@ -2137,7 +2264,8 @@ class WorkfileSettings(object): range = '{0}-{1}'.format( int(data["frameStart"]), - int(data["frameEnd"])) + int(data["frameEnd"]) + ) for node in nuke.allNodes(filter="Viewer"): node['frame_range'].setValue(range) @@ -2145,12 +2273,20 @@ class WorkfileSettings(object): node['frame_range'].setValue(range) node['frame_range_lock'].setValue(True) - # adding handle_start/end to root avalon knob - if not set_avalon_knob_data(self._root_node, { - "handleStart": int(handle_start), - "handleEnd": int(handle_end) - }): - log.warning("Cannot set Avalon knob to Root node!") + if not ASSIST: + set_node_data( + self._root_node, + INSTANCE_DATA_KNOB, + { + "handleStart": int(handle_start), + "handleEnd": int(handle_end) + } + ) + else: + log.warning( + "NukeAssist mode is not allowing " + "updating custom knobs..." + ) def reset_resolution(self): """Set resolution to project resolution.""" @@ -2264,29 +2400,25 @@ def get_write_node_template_attr(node): ''' Gets all defined data from presets ''' + + # TODO: add identifiers to settings and rename settings key + plugin_names_mapping = { + "create_write_image": "CreateWriteImage", + "create_write_prerender": "CreateWritePrerender", + "create_write_render": "CreateWriteRender" + } # get avalon data from node - avalon_knob_data = read_avalon_data(node) - # get template data - nuke_imageio_writes = get_imageio_node_setting( - node_class=avalon_knob_data["families"], - plugin_name=avalon_knob_data["creator"], - subset=avalon_knob_data["subset"] + node_data = get_node_data(node, INSTANCE_DATA_KNOB) + identifier = node_data["creator_identifier"] + + # return template data + return get_imageio_node_setting( + node_class="Write", + plugin_name=plugin_names_mapping[identifier], + subset=node_data["subset"] ) - # collecting correct data - correct_data = OrderedDict() - - # adding imageio knob presets - for k, v in nuke_imageio_writes.items(): - if k in ["_id", "_previous"]: - continue - correct_data[k] = v - - # fix badly encoded data - return fix_data_for_node_create(correct_data) - - def get_dependent_nodes(nodes): """Get all dependent nodes connected to the list of nodes. @@ -2325,10 +2457,11 @@ def get_dependent_nodes(nodes): def find_free_space_to_paste_nodes( - nodes, - group=nuke.root(), - direction="right", - offset=300): + nodes, + group=nuke.root(), + direction="right", + offset=300 +): """ For getting coordinates in DAG (node graph) for placing new nodes @@ -2420,7 +2553,7 @@ def reset_selection(): def select_nodes(nodes): - """Selects all inputed nodes + """Selects all inputted nodes Arguments: nodes (list): nuke nodes to be selected @@ -2437,7 +2570,7 @@ def launch_workfiles_app(): Trigger to show workfiles tool on application launch. Can be executed only once all other calls are ignored. - Workfiles tool show is deffered after application initialization using + Workfiles tool show is deferred after application initialization using QTimer. """ @@ -2458,7 +2591,7 @@ def launch_workfiles_app(): # Show workfiles tool using timer # - this will be probably triggered during initialization in that case # the application is not be able to show uis so it must be - # deffered using timer + # deferred using timer # - timer should be processed when initialization ends # When applications starts to process events. timer = QtCore.QTimer() @@ -2554,6 +2687,22 @@ def process_workfile_builder(): open_file(last_workfile_path) +def start_workfile_template_builder(): + from .workfile_template_builder import ( + build_workfile_template + ) + + # to avoid looping of the callback, remove it! + log.info("Starting workfile template builder...") + try: + build_workfile_template(workfile_creation_enabled=True) + except TemplateProfileNotFound: + log.warning("Template profile not found. Skipping...") + + # remove callback since it would be duplicating the workfile + nuke.removeOnCreate(start_workfile_template_builder, nodeClass="Root") + +@deprecated def recreate_instance(origin_node, avalon_data=None): """Recreate input instance to different data @@ -2619,6 +2768,32 @@ def recreate_instance(origin_node, avalon_data=None): return new_node +def add_scripts_menu(): + try: + from scriptsmenu import launchfornuke + except ImportError: + log.warning( + "Skipping studio.menu install, because " + "'scriptsmenu' module seems unavailable." + ) + return + + # load configuration of custom menu + project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) + config = project_settings["nuke"]["scriptsmenu"]["definition"] + _menu = project_settings["nuke"]["scriptsmenu"]["name"] + + if not config: + log.warning("Skipping studio menu, no definition found.") + return + + # run the launcher for Maya menu + studio_menu = launchfornuke.main(title=_menu.title()) + + # apply configuration + studio_menu.build_from_configuration(studio_menu, config) + + def add_scripts_gizmo(): # load configuration of custom menu @@ -2699,10 +2874,10 @@ class NukeDirmap(HostDirmap): pass def dirmap_routine(self, source_path, destination_path): - log.debug("{}: {}->{}".format(self.file_name, - source_path, destination_path)) source_path = source_path.lower().replace(os.sep, '/') destination_path = destination_path.lower().replace(os.sep, '/') + log.debug("Map: {} with: {}->{}".format(self.file_name, + source_path, destination_path)) if platform.system().lower() == "windows": self.file_name = self.file_name.lower().replace( source_path, destination_path) @@ -2716,6 +2891,7 @@ class DirmapCache: _project_name = None _project_settings = None _sync_module = None + _mapping = None @classmethod def project_name(cls): @@ -2735,6 +2911,36 @@ class DirmapCache: cls._sync_module = ModulesManager().modules_by_name["sync_server"] return cls._sync_module + @classmethod + def mapping(cls): + return cls._mapping + + @classmethod + def set_mapping(cls, mapping): + cls._mapping = mapping + + +def dirmap_file_name_filter(file_name): + """Nuke callback function with single full path argument. + + Checks project settings for potential mapping from source to dest. + """ + + dirmap_processor = NukeDirmap( + file_name, + "nuke", + DirmapCache.project_name(), + DirmapCache.project_settings(), + DirmapCache.sync_module(), + ) + if not DirmapCache.mapping(): + DirmapCache.set_mapping(dirmap_processor.get_mappings()) + + dirmap_processor.process_dirmap(DirmapCache.mapping()) + if os.path.exists(dirmap_processor.file_name): + return dirmap_processor.file_name + return file_name + @contextlib.contextmanager def node_tempfile(): @@ -2780,67 +2986,6 @@ def duplicate_node(node): return dupli_node -def dirmap_file_name_filter(file_name): - """Nuke callback function with single full path argument. - - Checks project settings for potential mapping from source to dest. - """ - - dirmap_processor = NukeDirmap( - file_name, - "nuke", - DirmapCache.project_name(), - DirmapCache.project_settings(), - DirmapCache.sync_module(), - ) - dirmap_processor.process_dirmap() - if os.path.exists(dirmap_processor.file_name): - return dirmap_processor.file_name - return file_name - - -# ------------------------------------ -# This function seems to be deprecated -# ------------------------------------ -def ls_img_sequence(path): - """Listing all available coherent image sequence from path - - Arguments: - path (str): A nuke's node object - - Returns: - data (dict): with nuke formated path and frameranges - """ - file = os.path.basename(path) - dirpath = os.path.dirname(path) - base, ext = os.path.splitext(file) - name, padding = os.path.splitext(base) - - # populate list of files - files = [ - f for f in os.listdir(dirpath) - if name in f - if ext in f - ] - - # create collection from list of files - collections, reminder = clique.assemble(files) - - if len(collections) > 0: - head = collections[0].format("{head}") - padding = collections[0].format("{padding}") % 1 - padding = "#" * len(padding) - tail = collections[0].format("{tail}") - file = head + padding + tail - - return { - "path": os.path.join(dirpath, file).replace("\\", "/"), - "frames": collections[0].format("[{ranges}]") - } - - return False - - def get_group_io_nodes(nodes): """Get the input and the output of a group of nodes.""" diff --git a/openpype/hosts/nuke/api/pipeline.py b/openpype/hosts/nuke/api/pipeline.py index bdf12b7dc4..8406a251e9 100644 --- a/openpype/hosts/nuke/api/pipeline.py +++ b/openpype/hosts/nuke/api/pipeline.py @@ -1,21 +1,24 @@ +import nuke + import os import importlib from collections import OrderedDict -import nuke - import pyblish.api import openpype +from openpype.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost +) from openpype.settings import get_current_project_settings from openpype.lib import register_event_callback, Logger from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, register_inventory_action_path, - deregister_loader_plugin_path, - deregister_creator_plugin_path, - deregister_inventory_action_path, AVALON_CONTAINER_ID, ) from openpype.pipeline.workfile import BuildWorkfile @@ -24,23 +27,40 @@ from openpype.tools.utils import host_tools from .command import viewer_update_and_undo_stop from .lib import ( Context, + ROOT_DATA_KNOB, + INSTANCE_DATA_KNOB, get_main_window, add_publish_knob, WorkfileSettings, process_workfile_builder, + start_workfile_template_builder, launch_workfiles_app, check_inventory_versions, set_avalon_knob_data, read_avalon_data, + on_script_load, + dirmap_file_name_filter, + add_scripts_menu, + add_scripts_gizmo, + get_node_data, + set_node_data ) from .workfile_template_builder import ( NukePlaceholderLoadPlugin, NukePlaceholderCreatePlugin, build_workfile_template, - update_workfile_template, create_placeholder, update_placeholder, ) +from .workio import ( + open_file, + save_file, + file_extensions, + has_unsaved_changes, + work_root, + current_file +) +from .constants import ASSIST log = Logger.get_logger(__name__) @@ -53,12 +73,111 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") MENU_LABEL = os.environ["AVALON_LABEL"] - # registering pyblish gui regarding settings in presets if os.getenv("PYBLISH_GUI", None): pyblish.api.register_gui(os.getenv("PYBLISH_GUI", None)) +class NukeHost( + HostBase, IWorkfileHost, ILoadHost, IPublishHost +): + name = "nuke" + + def open_workfile(self, filepath): + return open_file(filepath) + + def save_workfile(self, filepath=None): + return save_file(filepath) + + def work_root(self, session): + return work_root(session) + + def get_current_workfile(self): + return current_file() + + def workfile_has_unsaved_changes(self): + return has_unsaved_changes() + + def get_workfile_extensions(self): + return file_extensions() + + def get_workfile_build_placeholder_plugins(self): + return [ + NukePlaceholderLoadPlugin, + NukePlaceholderCreatePlugin + ] + + def get_containers(self): + return ls() + + def install(self): + ''' Installing all requarements for Nuke host + ''' + + pyblish.api.register_host("nuke") + + self.log.info("Registering Nuke plug-ins..") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + register_inventory_action_path(INVENTORY_PATH) + + # Register Avalon event for workfiles loading. + register_event_callback("workio.open_file", check_inventory_versions) + register_event_callback("taskChanged", change_context_label) + + pyblish.api.register_callback( + "instanceToggled", on_pyblish_instance_toggled) + + _install_menu() + + # add script menu + add_scripts_menu() + add_scripts_gizmo() + + add_nuke_callbacks() + + launch_workfiles_app() + + def get_context_data(self): + root_node = nuke.root() + return get_node_data(root_node, ROOT_DATA_KNOB) + + def update_context_data(self, data, changes): + root_node = nuke.root() + set_node_data(root_node, ROOT_DATA_KNOB, data) + + +def add_nuke_callbacks(): + """ Adding all available nuke callbacks + """ + nuke_settings = get_current_project_settings()["nuke"] + workfile_settings = WorkfileSettings() + # Set context settings. + nuke.addOnCreate( + workfile_settings.set_context_settings, nodeClass="Root") + nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") + nuke.addOnCreate(start_workfile_template_builder, nodeClass="Root") + nuke.addOnCreate(process_workfile_builder, nodeClass="Root") + + # fix ffmpeg settings on script + nuke.addOnScriptLoad(on_script_load) + + # set checker for last versions on loaded containers + nuke.addOnScriptLoad(check_inventory_versions) + nuke.addOnScriptSave(check_inventory_versions) + + # # set apply all workfile settings on script load and save + nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) + + if nuke_settings["nuke-dirmap"]["enabled"]: + log.info("Added Nuke's dirmaping callback ...") + # Add dirmap for file paths. + nuke.addFilenameFilter(dirmap_file_name_filter) + + log.info("Added Nuke callbacks ...") + + def reload_config(): """Attempt to reload pipeline at run-time. @@ -84,52 +203,6 @@ def reload_config(): reload(module) -def install(): - ''' Installing all requarements for Nuke host - ''' - - pyblish.api.register_host("nuke") - - log.info("Registering Nuke plug-ins..") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - register_inventory_action_path(INVENTORY_PATH) - - # Register Avalon event for workfiles loading. - register_event_callback("workio.open_file", check_inventory_versions) - register_event_callback("taskChanged", change_context_label) - - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled) - workfile_settings = WorkfileSettings() - - # Set context settings. - nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") - nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") - nuke.addOnCreate(process_workfile_builder, nodeClass="Root") - - _install_menu() - launch_workfiles_app() - - -def uninstall(): - '''Uninstalling host's integration - ''' - log.info("Deregistering Nuke plug-ins..") - pyblish.deregister_host("nuke") - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - deregister_inventory_action_path(INVENTORY_PATH) - - pyblish.api.deregister_callback( - "instanceToggled", on_pyblish_instance_toggled) - - reload_config() - _uninstall_menu() - - def _show_workfiles(): # Make sure parent is not set # - this makes Workfiles tool as separated window which @@ -138,37 +211,55 @@ def _show_workfiles(): host_tools.show_workfiles(parent=None, on_top=False) -def get_workfile_build_placeholder_plugins(): - return [ - NukePlaceholderLoadPlugin, - NukePlaceholderCreatePlugin - ] - - def _install_menu(): + """Install Avalon menu into Nuke's main menu bar.""" + # uninstall original avalon menu main_window = get_main_window() menubar = nuke.menu("Nuke") menu = menubar.addMenu(MENU_LABEL) - label = "{0}, {1}".format( - os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"] - ) - Context.context_label = label - context_action = menu.addCommand(label) - context_action.setEnabled(False) + if not ASSIST: + label = "{0}, {1}".format( + os.environ["AVALON_ASSET"], os.environ["AVALON_TASK"] + ) + Context.context_label = label + context_action = menu.addCommand(label) + context_action.setEnabled(False) + + # add separator after context label + menu.addSeparator() - menu.addSeparator() menu.addCommand( "Work Files...", _show_workfiles ) menu.addSeparator() - menu.addCommand( - "Create...", - lambda: host_tools.show_creator(parent=main_window) - ) + if not ASSIST: + # only add parent if nuke version is 14 or higher + # known issue with no solution yet + menu.addCommand( + "Create...", + lambda: host_tools.show_publisher( + parent=( + main_window if nuke.NUKE_VERSION_RELEASE >= 14 else None + ), + tab="create" + ) + ) + # only add parent if nuke version is 14 or higher + # known issue with no solution yet + menu.addCommand( + "Publish...", + lambda: host_tools.show_publisher( + parent=( + main_window if nuke.NUKE_VERSION_RELEASE >= 14 else None + ), + tab="publish" + ) + ) + menu.addCommand( "Load...", lambda: host_tools.show_loader( @@ -176,14 +267,11 @@ def _install_menu(): use_context=True ) ) - menu.addCommand( - "Publish...", - lambda: host_tools.show_publish(parent=main_window) - ) menu.addCommand( "Manage...", lambda: host_tools.show_scene_inventory(parent=main_window) ) + menu.addSeparator() menu.addCommand( "Library...", lambda: host_tools.show_library_loader( @@ -219,21 +307,24 @@ def _install_menu(): "Build Workfile from template", lambda: build_workfile_template() ) - menu_template.addSeparator() - menu_template.addCommand( - "Create Place Holder", - lambda: create_placeholder() - ) - menu_template.addCommand( - "Update Place Holder", - lambda: update_placeholder() - ) + + if not ASSIST: + menu_template.addSeparator() + menu_template.addCommand( + "Create Place Holder", + lambda: create_placeholder() + ) + menu_template.addCommand( + "Update Place Holder", + lambda: update_placeholder() + ) + menu.addSeparator() menu.addCommand( "Experimental tools...", lambda: host_tools.show_experimental_tools_dialog(parent=main_window) ) - + menu.addSeparator() # add reload pipeline only in debug mode if bool(os.getenv("NUKE_DEBUG")): menu.addSeparator() @@ -243,15 +334,6 @@ def _install_menu(): add_shortcuts_from_presets() -def _uninstall_menu(): - menubar = nuke.menu("Nuke") - menu = menubar.findItem(MENU_LABEL) - - for item in menu.items(): - log.info("Removing menu item: {}".format(item.name())) - menu.removeItem(item.name()) - - def change_context_label(): menubar = nuke.menu("Nuke") menu = menubar.findItem(MENU_LABEL) @@ -283,8 +365,8 @@ def add_shortcuts_from_presets(): if nuke_presets.get("menu"): menu_label_mapping = { - "manage": "Manage...", "create": "Create...", + "manage": "Manage...", "load": "Load...", "build_workfile": "Build Workfile", "publish": "Publish..." @@ -302,7 +384,7 @@ def add_shortcuts_from_presets(): item_label = menu_label_mapping[command_name] menuitem = menu.findItem(item_label) menuitem.setShortcut(shortcut_str) - except AttributeError as e: + except (AttributeError, KeyError) as e: log.error(e) @@ -434,11 +516,73 @@ def ls(): """ all_nodes = nuke.allNodes(recurseGroups=False) - # TODO: add readgeo, readcamera, readimage nodes = [n for n in all_nodes] for n in nodes: - log.debug("name: `{}`".format(n.name())) container = parse_container(n) if container: yield container + + +def list_instances(creator_id=None): + """List all created instances to publish from current workfile. + + For SubsetManager + + Returns: + (list) of dictionaries matching instances format + """ + listed_instances = [] + for node in nuke.allNodes(recurseGroups=True): + + if node.Class() in ["Viewer", "Dot"]: + continue + + try: + if node["disable"].value(): + continue + except NameError: + # pass if disable knob doesn't exist + pass + + # get data from avalon knob + instance_data = get_node_data( + node, INSTANCE_DATA_KNOB) + + if not instance_data: + continue + + if instance_data["id"] != "pyblish.avalon.instance": + continue + + if creator_id and instance_data["creator_identifier"] != creator_id: + continue + + listed_instances.append((node, instance_data)) + + return listed_instances + + +def remove_instance(instance): + """Remove instance from current workfile metadata. + + For SubsetManager + + Args: + instance (dict): instance representation from subsetmanager model + """ + instance_node = instance.transient_data["node"] + instance_knob = instance_node.knobs()[INSTANCE_DATA_KNOB] + instance_node.removeKnob(instance_knob) + nuke.delete(instance_node) + + +def select_instance(instance): + """ + Select instance in Node View + + Args: + instance (dict): instance representation from subsetmanager model + """ + instance_node = instance.transient_data["node"] + instance_node["selected"].setValue(True) diff --git a/openpype/hosts/nuke/api/plugin.py b/openpype/hosts/nuke/api/plugin.py index 5981a8b386..7035da2bb5 100644 --- a/openpype/hosts/nuke/api/plugin.py +++ b/openpype/hosts/nuke/api/plugin.py @@ -1,27 +1,386 @@ +import nuke +import re import os +import sys +import six import random import string -from collections import OrderedDict +from collections import OrderedDict, defaultdict from abc import abstractmethod -import nuke - from openpype.settings import get_current_project_settings +from openpype.lib import ( + BoolDef, + EnumDef +) from openpype.pipeline import ( LegacyCreator, LoaderPlugin, + CreatorError, + Creator as NewCreator, + CreatedInstance, + legacy_io ) from .lib import ( + INSTANCE_DATA_KNOB, Knobby, check_subsetname_exists, maintained_selection, + get_avalon_knob_data, set_avalon_knob_data, add_publish_knob, get_nuke_imageio_settings, set_node_knobs_from_settings, + set_node_data, + get_node_data, get_view_process_node, - get_viewer_config_from_string + get_viewer_config_from_string, + deprecated ) +from .pipeline import ( + list_instances, + remove_instance +) + + +def _collect_and_cache_nodes(creator): + key = "openpype.nuke.nodes" + if key not in creator.collection_shared_data: + instances_by_identifier = defaultdict(list) + for item in list_instances(): + _, instance_data = item + identifier = instance_data["creator_identifier"] + instances_by_identifier[identifier].append(item) + creator.collection_shared_data[key] = instances_by_identifier + return creator.collection_shared_data[key] + + +class NukeCreatorError(CreatorError): + pass + + +class NukeCreator(NewCreator): + selected_nodes = [] + + def pass_pre_attributes_to_instance( + self, + instance_data, + pre_create_data, + keys=None + ): + if not keys: + keys = pre_create_data.keys() + + creator_attrs = instance_data["creator_attributes"] = {} + for pass_key in keys: + creator_attrs[pass_key] = pre_create_data[pass_key] + + def check_existing_subset(self, subset_name): + """Make sure subset name is unique. + + It search within all nodes recursively + and checks if subset name is found in + any node having instance data knob. + + Arguments: + subset_name (str): Subset name + """ + + for node in nuke.allNodes(recurseGroups=True): + # make sure testing node is having instance knob + if INSTANCE_DATA_KNOB not in node.knobs().keys(): + continue + node_data = get_node_data(node, INSTANCE_DATA_KNOB) + + if not node_data: + # a node has no instance data + continue + + # test if subset name is matching + if node_data.get("subset") == subset_name: + raise NukeCreatorError( + ( + "A publish instance for '{}' already exists " + "in nodes! Please change the variant " + "name to ensure unique output." + ).format(subset_name) + ) + + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + """Create node representing instance. + + Arguments: + node_name (str): Name of the new node. + knobs (OrderedDict): node knobs name and values + parent (str): Name of the parent node. + node_type (str, optional): Nuke node Class. + + Returns: + nuke.Node: Newly created instance node. + + """ + node_type = node_type or "NoOp" + + node_knobs = knobs or {} + + # set parent node + parent_node = nuke.root() + if parent: + parent_node = nuke.toNode(parent) + + try: + with parent_node: + created_node = nuke.createNode(node_type) + created_node["name"].setValue(node_name) + + for key, values in node_knobs.items(): + if key in created_node.knobs(): + created_node["key"].setValue(values) + except Exception as _err: + raise NukeCreatorError("Creating have failed: {}".format(_err)) + + return created_node + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + else: + self.selected_nodes = [] + + def create(self, subset_name, instance_data, pre_create_data): + + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) + + # make sure subset name is unique + self.check_existing_subset(subset_name) + + try: + instance_node = self.create_instance_node( + subset_name, + node_type=instance_data.pop("node_type", None) + ) + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + set_node_data( + instance_node, INSTANCE_DATA_KNOB, instance.data_to_store()) + + return instance + + except Exception as er: + six.reraise( + NukeCreatorError, + NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2]) + + def collect_instances(self): + cached_instances = _collect_and_cache_nodes(self) + attr_def_keys = { + attr_def.key + for attr_def in self.get_instance_attr_defs() + } + attr_def_keys.discard(None) + + for (node, data) in cached_instances[self.identifier]: + created_instance = CreatedInstance.from_existing( + data, self + ) + created_instance.transient_data["node"] = node + self._add_instance_to_context(created_instance) + + for key in ( + set(created_instance["creator_attributes"].keys()) + - attr_def_keys + ): + created_instance["creator_attributes"].pop(key) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + instance_node = created_inst.transient_data["node"] + + # in case node is not existing anymore (user erased it manually) + try: + instance_node.fullName() + except ValueError: + self.remove_instances([created_inst]) + continue + + set_node_data( + instance_node, + INSTANCE_DATA_KNOB, + created_inst.data_to_store() + ) + + def remove_instances(self, instances): + for instance in instances: + remove_instance(instance) + self._remove_instance_from_context(instance) + + def get_pre_create_attr_defs(self): + return [ + BoolDef( + "use_selection", + default=not self.create_context.headless, + label="Use selection" + ) + ] + + def get_creator_settings(self, project_settings, settings_key=None): + if not settings_key: + settings_key = self.__class__.__name__ + return project_settings["nuke"]["create"][settings_key] + + +class NukeWriteCreator(NukeCreator): + """Add Publishable Write node""" + + identifier = "create_write" + label = "Create Write" + family = "write" + icon = "sign-out" + + def integrate_links(self, node, outputs=True): + # skip if no selection + if not self.selected_node: + return + + # collect dependencies + input_nodes = [self.selected_node] + dependent_nodes = self.selected_node.dependent() if outputs else [] + + # relinking to collected connections + for i, input in enumerate(input_nodes): + node.setInput(i, input) + + # make it nicer in graph + node.autoplace() + + # relink also dependent nodes + for dep_nodes in dependent_nodes: + dep_nodes.setInput(0, node) + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + selected_nodes = nuke.selectedNodes() + if selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + elif len(selected_nodes) > 1: + NukeCreatorError("Creator error: Select only one camera node") + self.selected_node = selected_nodes[0] + else: + self.selected_node = None + + def get_pre_create_attr_defs(self): + attr_defs = [ + BoolDef("use_selection", label="Use selection"), + self._get_render_target_enum() + ] + return attr_defs + + def get_instance_attr_defs(self): + attr_defs = [ + self._get_render_target_enum(), + ] + # add reviewable attribute + if "reviewable" in self.instance_attributes: + attr_defs.append(self._get_reviewable_bool()) + + return attr_defs + + def _get_render_target_enum(self): + rendering_targets = { + "local": "Local machine rendering", + "frames": "Use existing frames" + } + if ("farm_rendering" in self.instance_attributes): + rendering_targets["farm"] = "Farm rendering" + + return EnumDef( + "render_target", + items=rendering_targets, + label="Render target" + ) + + def _get_reviewable_bool(self): + return BoolDef( + "review", + default=True, + label="Review" + ) + + def create(self, subset_name, instance_data, pre_create_data): + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) + + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance_node = self.create_instance_node( + subset_name, + instance_data + ) + + try: + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + set_node_data( + instance_node, INSTANCE_DATA_KNOB, instance.data_to_store()) + + return instance + + except Exception as er: + six.reraise( + NukeCreatorError, + NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2] + ) + + def apply_settings( + self, + project_settings, + system_settings + ): + """Method called on initialization of plugin to apply settings.""" + + # plugin settings + plugin_settings = self.get_creator_settings(project_settings) + + # individual attributes + self.instance_attributes = plugin_settings.get( + "instance_attributes") or self.instance_attributes + self.prenodes = plugin_settings["prenodes"] + self.default_variants = plugin_settings.get( + "default_variants") or self.default_variants + self.temp_rendering_path_template = ( + plugin_settings.get("temp_rendering_path_template") + or self.temp_rendering_path_template + ) class OpenPypeCreator(LegacyCreator): @@ -72,6 +431,41 @@ class OpenPypeCreator(LegacyCreator): return instance +def get_instance_group_node_childs(instance): + """Return list of instance group node children + + Args: + instance (pyblish.Instance): pyblish instance + + Returns: + list: [nuke.Node] + """ + node = instance.data["transientData"]["node"] + + if node.Class() != "Group": + return + + # collect child nodes + child_nodes = [] + # iterate all nodes + for node in nuke.allNodes(group=node): + # add contained nodes to instance's node list + child_nodes.append(node) + + return child_nodes + + +def get_colorspace_from_node(node): + # Add version data to instance + colorspace = node["colorspace"].value() + + # remove default part of the string + if "default (" in colorspace: + colorspace = re.sub(r"default.\(|\)", "", colorspace) + + return colorspace + + def get_review_presets_config(): settings = get_current_project_settings() review_profiles = ( @@ -167,13 +561,10 @@ class ExporterReview(object): self.path_in = self.instance.data.get("path", None) self.staging_dir = self.instance.data["stagingDir"] self.collection = self.instance.data.get("collection", None) - self.data = dict({ - "representations": list() - }) + self.data = {"representations": []} def get_file_info(self): if self.collection: - self.log.debug("Collection: `{}`".format(self.collection)) # get path self.fname = os.path.basename(self.collection.format( "{head}{padding}{tail}")) @@ -202,7 +593,7 @@ class ExporterReview(object): Defaults to None. range (bool, optional): flag for adding ranges. Defaults to False. - custom_tags (list[str], optional): user inputed custom tags. + custom_tags (list[str], optional): user inputted custom tags. Defaults to None. """ add_tags = tags or [] @@ -236,7 +627,7 @@ class ExporterReview(object): nuke_imageio = opnlib.get_nuke_imageio_settings() # TODO: this is only securing backward compatibility lets remove - # this once all projects's anotomy are updated to newer config + # this once all projects's anatomy are updated to newer config if "baking" in nuke_imageio.keys(): return nuke_imageio["baking"]["viewerProcess"] else: @@ -308,7 +699,6 @@ class ExporterReviewLut(ExporterReview): # connect self._temp_nodes.append(cms_node) self.previous_node = cms_node - self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes)) if bake_viewer_process: # Node View Process @@ -341,8 +731,6 @@ class ExporterReviewLut(ExporterReview): # connect gen_lut_node.setInput(0, self.previous_node) self._temp_nodes.append(gen_lut_node) - self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes)) - # ---------- end nodes creation # Export lut file @@ -356,8 +744,6 @@ class ExporterReviewLut(ExporterReview): # ---------- generate representation data self.get_representation_data() - self.log.debug("Representation... `{}`".format(self.data)) - # ---------- Clean up self.clean_nodes() @@ -427,6 +813,8 @@ class ExporterReviewMov(ExporterReview): # create nk path path = os.path.splitext(self.path)[0] + ".nk" # save file to the path + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) shutil.copyfile(self.instance.context.data["currentFile"], path) self.log.info("Nodes exported...") @@ -436,8 +824,41 @@ class ExporterReviewMov(ExporterReview): add_tags = [] self.publish_on_farm = farm read_raw = kwargs["read_raw"] + + # TODO: remove this when `reformat_nodes_config` + # is changed in settings reformat_node_add = kwargs["reformat_node_add"] reformat_node_config = kwargs["reformat_node_config"] + + # TODO: make this required in future + reformat_nodes_config = kwargs.get("reformat_nodes_config", {}) + + # TODO: remove this once deprecated is removed + # make sure only reformat_nodes_config is used in future + if reformat_node_add and reformat_nodes_config.get("enabled"): + self.log.warning( + "`reformat_node_add` is deprecated. " + "Please use only `reformat_nodes_config` instead.") + reformat_nodes_config = None + + # TODO: reformat code when backward compatibility is not needed + # warning if reformat_nodes_config is not set + if not reformat_nodes_config: + self.log.warning( + "Please set `reformat_nodes_config` in settings. " + "Using `reformat_node_config` instead." + ) + reformat_nodes_config = { + "enabled": reformat_node_add, + "reposition_nodes": [ + { + "node_class": "Reformat", + "knobs": reformat_node_config + } + ] + } + + bake_viewer_process = kwargs["bake_viewer_process"] bake_viewer_input_process_node = kwargs[ "bake_viewer_input_process"] @@ -459,7 +880,6 @@ class ExporterReviewMov(ExporterReview): subset = self.instance.data["subset"] self._temp_nodes[subset] = [] - # ---------- start nodes creation # Read node r_node = nuke.createNode("Read") @@ -473,44 +893,39 @@ class ExporterReviewMov(ExporterReview): if read_raw: r_node["raw"].setValue(1) - # connect - self._temp_nodes[subset].append(r_node) - self.previous_node = r_node - self.log.debug("Read... `{}`".format(self._temp_nodes[subset])) + # connect to Read node + self._shift_to_previous_node_and_temp(subset, r_node, "Read... `{}`") # add reformat node - if reformat_node_add: + if reformat_nodes_config["enabled"]: + reposition_nodes = reformat_nodes_config["reposition_nodes"] + for reposition_node in reposition_nodes: + node_class = reposition_node["node_class"] + knobs = reposition_node["knobs"] + node = nuke.createNode(node_class) + set_node_knobs_from_settings(node, knobs) + + # connect in order + self._connect_to_above_nodes( + node, subset, "Reposition node... `{}`" + ) # append reformated tag add_tags.append("reformated") - rf_node = nuke.createNode("Reformat") - set_node_knobs_from_settings(rf_node, reformat_node_config) - - # connect - rf_node.setInput(0, self.previous_node) - self._temp_nodes[subset].append(rf_node) - self.previous_node = rf_node - self.log.debug( - "Reformat... `{}`".format(self._temp_nodes[subset])) - # only create colorspace baking if toggled on if bake_viewer_process: if bake_viewer_input_process_node: # View Process node ipn = get_view_process_node() if ipn is not None: - # connect - ipn.setInput(0, self.previous_node) - self._temp_nodes[subset].append(ipn) - self.previous_node = ipn - self.log.debug( - "ViewProcess... `{}`".format( - self._temp_nodes[subset])) + # connect to ViewProcess node + self._connect_to_above_nodes(ipn, subset, "ViewProcess... `{}`") if not self.viewer_lut_raw: # OCIODisplay dag_node = nuke.createNode("OCIODisplay") + # assign display display, viewer = get_viewer_config_from_string( str(baking_view_profile) ) @@ -520,13 +935,7 @@ class ExporterReviewMov(ExporterReview): # assign viewer dag_node["view"].setValue(viewer) - # connect - dag_node.setInput(0, self.previous_node) - self._temp_nodes[subset].append(dag_node) - self.previous_node = dag_node - self.log.debug("OCIODisplay... `{}`".format( - self._temp_nodes[subset])) - + self._connect_to_above_nodes(dag_node, subset, "OCIODisplay... `{}`") # Write node write_node = nuke.createNode("Write") self.log.debug("Path: {}".format(self.path)) @@ -580,7 +989,17 @@ class ExporterReviewMov(ExporterReview): return self.data + def _shift_to_previous_node_and_temp(self, subset, node, message): + self._temp_nodes[subset].append(node) + self.previous_node = node + self.log.debug(message.format(self._temp_nodes[subset])) + def _connect_to_above_nodes(self, node, subset, message): + node.setInput(0, self.previous_node) + self._shift_to_previous_node_and_temp(subset, node, message) + + +@deprecated("openpype.hosts.nuke.api.plugin.NukeWriteCreator") class AbstractWriteRender(OpenPypeCreator): """Abstract creator to gather similar implementation for Write creators""" name = "" @@ -607,7 +1026,6 @@ class AbstractWriteRender(OpenPypeCreator): self.data = data self.nodes = nuke.selectedNodes() - self.log.debug("_ self.data: '{}'".format(self.data)) def process(self): @@ -691,7 +1109,7 @@ class AbstractWriteRender(OpenPypeCreator): def is_legacy(self): """Check if it needs to run legacy code - In case where `type` key is missing in singe + In case where `type` key is missing in single knob it is legacy project anatomy. Returns: @@ -732,3 +1150,149 @@ class AbstractWriteRender(OpenPypeCreator): node (nuke.Node): group node with data as Knobs """ pass + + +def convert_to_valid_instaces(): + """ Check and convert to latest publisher instances + + Also save as new minor version of workfile. + """ + def family_to_identifier(family): + mapping = { + "render": "create_write_render", + "prerender": "create_write_prerender", + "still": "create_write_image", + "model": "create_model", + "camera": "create_camera", + "nukenodes": "create_backdrop", + "gizmo": "create_gizmo", + "source": "create_source" + + } + return mapping[family] + + from openpype.hosts.nuke.api import workio + + task_name = legacy_io.Session["AVALON_TASK"] + + # save into new workfile + current_file = workio.current_file() + + # add file suffex if not + if "_publisherConvert" not in current_file: + new_workfile = ( + current_file[:-3] + + "_publisherConvert" + + current_file[-3:] + ) + else: + new_workfile = current_file + + path = new_workfile.replace("\\", "/") + nuke.scriptSaveAs(new_workfile, overwrite=1) + nuke.Root()["name"].setValue(path) + nuke.Root()["project_directory"].setValue(os.path.dirname(path)) + nuke.Root().setModified(False) + + _remove_old_knobs(nuke.Root()) + + # loop all nodes and convert + for node in nuke.allNodes(recurseGroups=True): + transfer_data = { + "creator_attributes": {} + } + creator_attr = transfer_data["creator_attributes"] + + if node.Class() in ["Viewer", "Dot"]: + continue + + if get_node_data(node, INSTANCE_DATA_KNOB): + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data( + node, ["avalon:", "ak:"]) + + if not avalon_knob_data: + continue + + if avalon_knob_data["id"] != "pyblish.avalon.instance": + continue + + transfer_data.update({ + k: v for k, v in avalon_knob_data.items() + if k not in ["families", "creator"] + }) + + transfer_data["task"] = task_name + + family = avalon_knob_data["family"] + # establish families + families_ak = avalon_knob_data.get("families", []) + + if "suspend_publish" in node.knobs(): + creator_attr["suspended_publish"] = ( + node["suspend_publish"].value()) + + # get review knob value + if "review" in node.knobs(): + creator_attr["review"] = ( + node["review"].value()) + + if "publish" in node.knobs(): + transfer_data["active"] = ( + node["publish"].value()) + + # add idetifier + transfer_data["creator_identifier"] = family_to_identifier(family) + + # Add all nodes in group instances. + if node.Class() == "Group": + # only alter families for render family + if families_ak and "write" in families_ak.lower(): + target = node["render"].value() + if target == "Use existing frames": + creator_attr["render_target"] = "frames" + elif target == "Local": + # Local rendering + creator_attr["render_target"] = "local" + elif target == "On farm": + # Farm rendering + creator_attr["render_target"] = "farm" + + if "deadlinePriority" in node.knobs(): + transfer_data["farm_priority"] = ( + node["deadlinePriority"].value()) + if "deadlineChunkSize" in node.knobs(): + creator_attr["farm_chunk"] = ( + node["deadlineChunkSize"].value()) + if "deadlineConcurrentTasks" in node.knobs(): + creator_attr["farm_concurrency"] = ( + node["deadlineConcurrentTasks"].value()) + + _remove_old_knobs(node) + + # add new instance knob with transfer data + set_node_data( + node, INSTANCE_DATA_KNOB, transfer_data) + + nuke.scriptSave() + + +def _remove_old_knobs(node): + remove_knobs = [ + "review", "publish", "render", "suspend_publish", "warn", "divd", + "OpenpypeDataGroup", "OpenpypeDataGroup_End", "deadlinePriority", + "deadlineChunkSize", "deadlineConcurrentTasks", "Deadline" + ] + print(node.name()) + + # remove all old knobs + for knob in node.allKnobs(): + try: + if knob.name() in remove_knobs: + node.removeKnob(knob) + elif "avalon" in knob.name(): + node.removeKnob(knob) + except ValueError: + pass diff --git a/openpype/hosts/nuke/api/utils.py b/openpype/hosts/nuke/api/utils.py index 6bcb752dd1..2b3c35c23a 100644 --- a/openpype/hosts/nuke/api/utils.py +++ b/openpype/hosts/nuke/api/utils.py @@ -87,7 +87,7 @@ def bake_gizmos_recursively(in_group=None): def colorspace_exists_on_node(node, colorspace_name): """ Check if colorspace exists on node - Look through all options in the colorpsace knob, and see if we have an + Look through all options in the colorspace knob, and see if we have an exact match to one of the items. Args: diff --git a/openpype/hosts/nuke/api/workfile_template_builder.py b/openpype/hosts/nuke/api/workfile_template_builder.py index 1b81f24e86..72d4ffb476 100644 --- a/openpype/hosts/nuke/api/workfile_template_builder.py +++ b/openpype/hosts/nuke/api/workfile_template_builder.py @@ -1,7 +1,5 @@ import collections - import nuke - from openpype.pipeline import registered_host from openpype.pipeline.workfile.workfile_template_builder import ( AbstractTemplateBuilder, @@ -14,7 +12,6 @@ from openpype.pipeline.workfile.workfile_template_builder import ( from openpype.tools.workfile_template_build import ( WorkfileBuildPlaceholderDialog, ) - from .lib import ( find_free_space_to_paste_nodes, get_extreme_positions, @@ -45,7 +42,7 @@ class NukeTemplateBuilder(AbstractTemplateBuilder): get_template_preset implementation) Returns: - bool: Wether the template was succesfully imported or not + bool: Whether the template was successfully imported or not """ # TODO check if the template is already imported @@ -55,7 +52,6 @@ class NukeTemplateBuilder(AbstractTemplateBuilder): return True - class NukePlaceholderPlugin(PlaceholderPlugin): node_color = 4278190335 @@ -223,19 +219,22 @@ class NukePlaceholderLoadPlugin(NukePlaceholderPlugin, PlaceholderLoadMixin): # fix the problem of z_order for backdrops self._fix_z_order(placeholder) - self._imprint_siblings(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) if placeholder.data["nb_children"] == 0: - # save initial nodes postions and dimensions, update them + # save initial nodes positions and dimensions, update them # and set inputs and outputs of loaded nodes + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_loaded) self._set_loaded_connections(placeholder) elif placeholder.data["siblings"]: # create copies of placeholder siblings for the new loaded nodes, - # set their inputs and outpus and update all nodes positions and + # set their inputs and outputs and update all nodes positions and # dimensions and siblings names siblings = get_nodes_by_names(placeholder.data["siblings"]) @@ -633,19 +632,23 @@ class NukePlaceholderCreatePlugin( # fix the problem of z_order for backdrops self._fix_z_order(placeholder) - self._imprint_siblings(placeholder) + + if placeholder.data.get("keep_placeholder"): + self._imprint_siblings(placeholder) if placeholder.data["nb_children"] == 0: - # save initial nodes postions and dimensions, update them + # save initial nodes positions and dimensions, update them # and set inputs and outputs of created nodes - self._imprint_inits() - self._update_nodes(placeholder, nuke.allNodes(), nodes_created) + if placeholder.data.get("keep_placeholder"): + self._imprint_inits() + self._update_nodes(placeholder, nuke.allNodes(), nodes_created) + self._set_created_connections(placeholder) elif placeholder.data["siblings"]: # create copies of placeholder siblings for the new created nodes, - # set their inputs and outpus and update all nodes positions and + # set their inputs and outputs and update all nodes positions and # dimensions and siblings names siblings = get_nodes_by_names(placeholder.data["siblings"]) @@ -947,9 +950,9 @@ class NukePlaceholderCreatePlugin( siblings_input.setInput(0, copy_output) -def build_workfile_template(*args): +def build_workfile_template(*args, **kwargs): builder = NukeTemplateBuilder(registered_host()) - builder.build_template() + builder.build_template(*args, **kwargs) def update_workfile_template(*args): diff --git a/openpype/hosts/nuke/api/workio.py b/openpype/hosts/nuke/api/workio.py index 65b86bf01b..5692f8e63c 100644 --- a/openpype/hosts/nuke/api/workio.py +++ b/openpype/hosts/nuke/api/workio.py @@ -13,7 +13,7 @@ def has_unsaved_changes(): def save_file(filepath): path = filepath.replace("\\", "/") - nuke.scriptSaveAs(path) + nuke.scriptSaveAs(path, overwrite=1) nuke.Root()["name"].setValue(path) nuke.Root()["project_directory"].setValue(os.path.dirname(path)) nuke.Root().setModified(False) diff --git a/openpype/hosts/nuke/hooks/pre_nukeassist_setup.py b/openpype/hosts/nuke/hooks/pre_nukeassist_setup.py new file mode 100644 index 0000000000..3948a665c6 --- /dev/null +++ b/openpype/hosts/nuke/hooks/pre_nukeassist_setup.py @@ -0,0 +1,11 @@ +from openpype.lib import PreLaunchHook + + +class PrelaunchNukeAssistHook(PreLaunchHook): + """ + Adding flag when nukeassist + """ + app_groups = ["nukeassist"] + + def execute(self): + self.launch_context.env["NUKEASSIST"] = "1" diff --git a/openpype/hosts/nuke/plugins/create/convert_legacy.py b/openpype/hosts/nuke/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..377e9f78f6 --- /dev/null +++ b/openpype/hosts/nuke/plugins/create/convert_legacy.py @@ -0,0 +1,52 @@ +from openpype.pipeline.create.creator_plugins import SubsetConvertorPlugin +from openpype.hosts.nuke.api.lib import ( + INSTANCE_DATA_KNOB, + get_node_data, + get_avalon_knob_data, + AVALON_TAB, +) +from openpype.hosts.nuke.api.plugin import convert_to_valid_instaces + +import nuke + + +class LegacyConverted(SubsetConvertorPlugin): + identifier = "legacy.converter" + + def find_instances(self): + + legacy_found = False + # search for first available legacy item + for node in nuke.allNodes(recurseGroups=True): + if node.Class() in ["Viewer", "Dot"]: + continue + + if get_node_data(node, INSTANCE_DATA_KNOB): + continue + + if AVALON_TAB not in node.knobs(): + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data( + node, ["avalon:", "ak:"], create=False) + + if not avalon_knob_data: + continue + + if avalon_knob_data["id"] != "pyblish.avalon.instance": + continue + + # catch and break + legacy_found = True + break + + if legacy_found: + # if not item do not add legacy instance converter + self.add_convertor_item("Convert legacy instances") + + def convert(self): + # loop all instances and convert them + convert_to_valid_instaces() + # remove legacy item if all is fine + self.remove_convertor_item() diff --git a/openpype/hosts/nuke/plugins/create/create_backdrop.py b/openpype/hosts/nuke/plugins/create/create_backdrop.py index 0c11b3f274..52959bbef2 100644 --- a/openpype/hosts/nuke/plugins/create/create_backdrop.py +++ b/openpype/hosts/nuke/plugins/create/create_backdrop.py @@ -1,56 +1,51 @@ -import nuke -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - select_nodes, - set_avalon_knob_data +from nukescripts import autoBackdrop + +from openpype.hosts.nuke.api import ( + NukeCreator, + maintained_selection, + select_nodes ) -class CreateBackdrop(plugin.OpenPypeCreator): +class CreateBackdrop(NukeCreator): """Add Publishable Backdrop""" - name = "nukenodes" - label = "Create Backdrop" + identifier = "create_backdrop" + label = "Nukenodes (backdrop)" family = "nukenodes" icon = "file-archive-o" - defaults = ["Main"] + maintain_selection = True - def __init__(self, *args, **kwargs): - super(CreateBackdrop, self).__init__(*args, **kwargs) - self.nodes = nuke.selectedNodes() - self.node_color = "0xdfea5dff" - return + # plugin attributes + node_color = "0xdfea5dff" - def process(self): - from nukescripts import autoBackdrop - nodes = list() - if (self.options or {}).get("useSelection"): - nodes = self.nodes + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + with maintained_selection(): + if len(self.selected_nodes) >= 1: + select_nodes(self.selected_nodes) - if len(nodes) >= 1: - select_nodes(nodes) - bckd_node = autoBackdrop() - bckd_node["name"].setValue("{}_BDN".format(self.name)) - bckd_node["tile_color"].setValue(int(self.node_color, 16)) - bckd_node["note_font_size"].setValue(24) - bckd_node["label"].setValue("[{}]".format(self.name)) - # add avalon knobs - instance = set_avalon_knob_data(bckd_node, self.data) + created_node = autoBackdrop() + created_node["name"].setValue(node_name) + created_node["tile_color"].setValue(int(self.node_color, 16)) + created_node["note_font_size"].setValue(24) + created_node["label"].setValue("[{}]".format(node_name)) - return instance - else: - msg = str("Please select nodes you " - "wish to add to a container") - self.log.error(msg) - nuke.message(msg) - return - else: - bckd_node = autoBackdrop() - bckd_node["name"].setValue("{}_BDN".format(self.name)) - bckd_node["tile_color"].setValue(int(self.node_color, 16)) - bckd_node["note_font_size"].setValue(24) - bckd_node["label"].setValue("[{}]".format(self.name)) - # add avalon knobs - instance = set_avalon_knob_data(bckd_node, self.data) + return created_node - return instance + def create(self, subset_name, instance_data, pre_create_data): + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance = super(CreateBackdrop, self).create( + subset_name, + instance_data, + pre_create_data + ) + + return instance diff --git a/openpype/hosts/nuke/plugins/create/create_camera.py b/openpype/hosts/nuke/plugins/create/create_camera.py index 3b13c80dc4..b84280b11b 100644 --- a/openpype/hosts/nuke/plugins/create/create_camera.py +++ b/openpype/hosts/nuke/plugins/create/create_camera.py @@ -1,55 +1,66 @@ import nuke -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - set_avalon_knob_data +from openpype.hosts.nuke.api import ( + NukeCreator, + NukeCreatorError, + maintained_selection ) -class CreateCamera(plugin.OpenPypeCreator): - """Add Publishable Backdrop""" +class CreateCamera(NukeCreator): + """Add Publishable Camera""" - name = "camera" - label = "Create 3d Camera" + identifier = "create_camera" + label = "Camera (3d)" family = "camera" icon = "camera" - defaults = ["Main"] - def __init__(self, *args, **kwargs): - super(CreateCamera, self).__init__(*args, **kwargs) - self.nodes = nuke.selectedNodes() - self.node_color = "0xff9100ff" - return + # plugin attributes + node_color = "0xff9100ff" - def process(self): - nodes = list() - if (self.options or {}).get("useSelection"): - nodes = self.nodes - - if len(nodes) >= 1: - # loop selected nodes - for n in nodes: - data = self.data.copy() - if len(nodes) > 1: - # rename subset name only if more - # then one node are selected - subset = self.family + n["name"].value().capitalize() - data["subset"] = subset - - # change node color - n["tile_color"].setValue(int(self.node_color, 16)) - # add avalon knobs - set_avalon_knob_data(n, data) - return True + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + with maintained_selection(): + if self.selected_nodes: + node = self.selected_nodes[0] + if node.Class() != "Camera3": + raise NukeCreatorError( + "Creator error: Select only camera node type") + created_node = self.selected_nodes[0] else: - msg = str("Please select nodes you " - "wish to add to a container") - self.log.error(msg) - nuke.message(msg) - return + created_node = nuke.createNode("Camera2") + + created_node["tile_color"].setValue( + int(self.node_color, 16)) + + created_node["name"].setValue(node_name) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance = super(CreateCamera, self).create( + subset_name, + instance_data, + pre_create_data + ) + + return instance + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError( + "Creator error: No active selection") + elif len(self.selected_nodes) > 1: + raise NukeCreatorError( + "Creator error: Select only one camera node") else: - # if selected is off then create one node - camera_node = nuke.createNode("Camera2") - camera_node["tile_color"].setValue(int(self.node_color, 16)) - # add avalon knobs - instance = set_avalon_knob_data(camera_node, self.data) - return instance + self.selected_nodes = [] diff --git a/openpype/hosts/nuke/plugins/create/create_gizmo.py b/openpype/hosts/nuke/plugins/create/create_gizmo.py index d616f6f7ad..cbe2f635c9 100644 --- a/openpype/hosts/nuke/plugins/create/create_gizmo.py +++ b/openpype/hosts/nuke/plugins/create/create_gizmo.py @@ -1,87 +1,65 @@ import nuke - -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - maintained_selection, - select_nodes, - set_avalon_knob_data +from openpype.hosts.nuke.api import ( + NukeCreator, + NukeCreatorError, + maintained_selection ) -class CreateGizmo(plugin.OpenPypeCreator): - """Add Publishable "gizmo" group +class CreateGizmo(NukeCreator): + """Add Publishable Group as gizmo""" - The name is symbolically gizmo as presumably - it is something familiar to nuke users as group of nodes - distributed downstream in workflow - """ - - name = "gizmo" - label = "Gizmo" + identifier = "create_gizmo" + label = "Gizmo (group)" family = "gizmo" icon = "file-archive-o" - defaults = ["ViewerInput", "Lut", "Effect"] + default_variants = ["ViewerInput", "Lut", "Effect"] - def __init__(self, *args, **kwargs): - super(CreateGizmo, self).__init__(*args, **kwargs) - self.nodes = nuke.selectedNodes() - self.node_color = "0x7533c1ff" - return - - def process(self): - if (self.options or {}).get("useSelection"): - nodes = self.nodes - self.log.info(len(nodes)) - if len(nodes) == 1: - select_nodes(nodes) - node = nodes[-1] - # check if Group node - if node.Class() in "Group": - node["name"].setValue("{}_GZM".format(self.name)) - node["tile_color"].setValue(int(self.node_color, 16)) - return set_avalon_knob_data(node, self.data) - else: - msg = ("Please select a group node " - "you wish to publish as the gizmo") - self.log.error(msg) - nuke.message(msg) - - if len(nodes) >= 2: - select_nodes(nodes) - nuke.makeGroup() - gizmo_node = nuke.selectedNode() - gizmo_node["name"].setValue("{}_GZM".format(self.name)) - gizmo_node["tile_color"].setValue(int(self.node_color, 16)) - - # add sticky node with guide - with gizmo_node: - sticky = nuke.createNode("StickyNote") - sticky["label"].setValue( - "Add following:\n- set Input" - " nodes\n- set one Output1\n" - "- create User knobs on the group") - - # add avalon knobs - return set_avalon_knob_data(gizmo_node, self.data) + # plugin attributes + node_color = "0x7533c1ff" + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + with maintained_selection(): + if self.selected_nodes: + node = self.selected_nodes[0] + if node.Class() != "Group": + raise NukeCreatorError( + "Creator error: Select only 'Group' node type") + created_node = node else: - msg = "Please select nodes you wish to add to the gizmo" - self.log.error(msg) - nuke.message(msg) - return + created_node = nuke.collapseToGroup() + + created_node["tile_color"].setValue( + int(self.node_color, 16)) + + created_node["name"].setValue(node_name) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance = super(CreateGizmo, self).create( + subset_name, + instance_data, + pre_create_data + ) + + return instance + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + elif len(self.selected_nodes) > 1: + NukeCreatorError("Creator error: Select only one 'Group' node") else: - with maintained_selection(): - gizmo_node = nuke.createNode("Group") - gizmo_node["name"].setValue("{}_GZM".format(self.name)) - gizmo_node["tile_color"].setValue(int(self.node_color, 16)) - - # add sticky node with guide - with gizmo_node: - sticky = nuke.createNode("StickyNote") - sticky["label"].setValue( - "Add following:\n- add Input" - " nodes\n- add one Output1\n" - "- create User knobs on the group") - - # add avalon knobs - return set_avalon_knob_data(gizmo_node, self.data) + self.selected_nodes = [] diff --git a/openpype/hosts/nuke/plugins/create/create_model.py b/openpype/hosts/nuke/plugins/create/create_model.py index 15a4e3ab8a..a94c9f0313 100644 --- a/openpype/hosts/nuke/plugins/create/create_model.py +++ b/openpype/hosts/nuke/plugins/create/create_model.py @@ -1,87 +1,65 @@ import nuke -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - set_avalon_knob_data +from openpype.hosts.nuke.api import ( + NukeCreator, + NukeCreatorError, + maintained_selection ) -class CreateModel(plugin.OpenPypeCreator): - """Add Publishable Model Geometry""" +class CreateModel(NukeCreator): + """Add Publishable Camera""" - name = "model" - label = "Create 3d Model" + identifier = "create_model" + label = "Model (3d)" family = "model" icon = "cube" - defaults = ["Main"] + default_variants = ["Main"] - def __init__(self, *args, **kwargs): - super(CreateModel, self).__init__(*args, **kwargs) - self.nodes = nuke.selectedNodes() - self.node_color = "0xff3200ff" - return + # plugin attributes + node_color = "0xff3200ff" - def process(self): - nodes = list() - if (self.options or {}).get("useSelection"): - nodes = self.nodes - for n in nodes: - n['selected'].setValue(0) - end_nodes = list() - - # get the latest nodes in tree for selecion - for n in nodes: - x = n - end = 0 - while end == 0: - try: - x = x.dependent()[0] - except: - end_node = x - end = 1 - end_nodes.append(end_node) - - # set end_nodes - end_nodes = list(set(end_nodes)) - - # check if nodes is 3d nodes - for n in end_nodes: - n['selected'].setValue(1) - sn = nuke.createNode("Scene") - if not sn.input(0): - end_nodes.remove(n) - nuke.delete(sn) - - # loop over end nodes - for n in end_nodes: - n['selected'].setValue(1) - - self.nodes = nuke.selectedNodes() - nodes = self.nodes - if len(nodes) >= 1: - # loop selected nodes - for n in nodes: - data = self.data.copy() - if len(nodes) > 1: - # rename subset name only if more - # then one node are selected - subset = self.family + n["name"].value().capitalize() - data["subset"] = subset - - # change node color - n["tile_color"].setValue(int(self.node_color, 16)) - # add avalon knobs - set_avalon_knob_data(n, data) - return True + def create_instance_node( + self, + node_name, + knobs=None, + parent=None, + node_type=None + ): + with maintained_selection(): + if self.selected_nodes: + node = self.selected_nodes[0] + if node.Class() != "Scene": + raise NukeCreatorError( + "Creator error: Select only 'Scene' node type") + created_node = node else: - msg = str("Please select nodes you " - "wish to add to a container") - self.log.error(msg) - nuke.message(msg) - return + created_node = nuke.createNode("Scene") + + created_node["tile_color"].setValue( + int(self.node_color, 16)) + + created_node["name"].setValue(node_name) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance = super(CreateModel, self).create( + subset_name, + instance_data, + pre_create_data + ) + + return instance + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + elif len(self.selected_nodes) > 1: + NukeCreatorError("Creator error: Select only one 'Scene' node") else: - # if selected is off then create one node - model_node = nuke.createNode("WriteGeo") - model_node["tile_color"].setValue(int(self.node_color, 16)) - # add avalon knobs - instance = set_avalon_knob_data(model_node, self.data) - return instance + self.selected_nodes = [] diff --git a/openpype/hosts/nuke/plugins/create/create_read.py b/openpype/hosts/nuke/plugins/create/create_read.py deleted file mode 100644 index 87a9dff0f8..0000000000 --- a/openpype/hosts/nuke/plugins/create/create_read.py +++ /dev/null @@ -1,57 +0,0 @@ -from collections import OrderedDict - -import nuke - -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - set_avalon_knob_data -) - - -class CrateRead(plugin.OpenPypeCreator): - # change this to template preset - name = "ReadCopy" - label = "Create Read Copy" - hosts = ["nuke"] - family = "source" - families = family - icon = "film" - defaults = ["Effect", "Backplate", "Fire", "Smoke"] - - def __init__(self, *args, **kwargs): - super(CrateRead, self).__init__(*args, **kwargs) - self.nodes = nuke.selectedNodes() - data = OrderedDict() - data['family'] = self.family - data['families'] = self.families - - for k, v in self.data.items(): - if k not in data.keys(): - data.update({k: v}) - - self.data = data - - def process(self): - self.name = self.data["subset"] - nodes = self.nodes - - if not nodes or len(nodes) == 0: - msg = "Please select Read node" - self.log.error(msg) - nuke.message(msg) - else: - count_reads = 0 - for node in nodes: - if node.Class() != 'Read': - continue - avalon_data = self.data - avalon_data['subset'] = "{}".format(self.name) - set_avalon_knob_data(node, avalon_data) - node['tile_color'].setValue(16744935) - count_reads += 1 - - if count_reads < 1: - msg = "Please select Read node" - self.log.error(msg) - nuke.message(msg) - return diff --git a/openpype/hosts/nuke/plugins/create/create_source.py b/openpype/hosts/nuke/plugins/create/create_source.py new file mode 100644 index 0000000000..8419c3ef33 --- /dev/null +++ b/openpype/hosts/nuke/plugins/create/create_source.py @@ -0,0 +1,88 @@ +import nuke +import six +import sys +from openpype.hosts.nuke.api import ( + INSTANCE_DATA_KNOB, + NukeCreator, + NukeCreatorError, + set_node_data +) +from openpype.pipeline import ( + CreatedInstance +) + + +class CreateSource(NukeCreator): + """Add Publishable Read with source""" + + identifier = "create_source" + label = "Source (read)" + family = "source" + icon = "film" + default_variants = ["Effect", "Backplate", "Fire", "Smoke"] + + # plugin attributes + node_color = "0xff9100ff" + + def create_instance_node( + self, + node_name, + read_node + ): + read_node["tile_color"].setValue( + int(self.node_color, 16)) + read_node["name"].setValue(node_name) + + return read_node + + def create(self, subset_name, instance_data, pre_create_data): + + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) + + try: + for read_node in self.selected_nodes: + if read_node.Class() != 'Read': + continue + + node_name = read_node.name() + _subset_name = subset_name + node_name + + # make sure subset name is unique + self.check_existing_subset(_subset_name) + + instance_node = self.create_instance_node( + _subset_name, + read_node + ) + instance = CreatedInstance( + self.family, + _subset_name, + instance_data, + self + ) + + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + set_node_data( + instance_node, + INSTANCE_DATA_KNOB, + instance.data_to_store() + ) + + except Exception as er: + six.reraise( + NukeCreatorError, + NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2]) + + def set_selected_nodes(self, pre_create_data): + if pre_create_data.get("use_selection"): + self.selected_nodes = nuke.selectedNodes() + if self.selected_nodes == []: + raise NukeCreatorError("Creator error: No active selection") + else: + NukeCreatorError( + "Creator error: only supported with active selection") diff --git a/openpype/hosts/nuke/plugins/create/create_write_image.py b/openpype/hosts/nuke/plugins/create/create_write_image.py new file mode 100644 index 0000000000..0c8adfb75c --- /dev/null +++ b/openpype/hosts/nuke/plugins/create/create_write_image.py @@ -0,0 +1,165 @@ +import nuke +import sys +import six + +from openpype.pipeline import ( + CreatedInstance +) +from openpype.lib import ( + BoolDef, + NumberDef, + UISeparatorDef, + EnumDef +) +from openpype.hosts.nuke import api as napi + + +class CreateWriteImage(napi.NukeWriteCreator): + identifier = "create_write_image" + label = "Image (write)" + family = "image" + icon = "sign-out" + + instance_attributes = [ + "use_range_limit" + ] + default_variants = [ + "StillFrame", + "MPFrame", + "LayoutFrame" + ] + temp_rendering_path_template = ( + "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}") + + def get_pre_create_attr_defs(self): + attr_defs = [ + BoolDef( + "use_selection", + default=not self.create_context.headless, + label="Use selection" + ), + self._get_render_target_enum(), + UISeparatorDef(), + self._get_frame_source_number() + ] + return attr_defs + + def _get_render_target_enum(self): + rendering_targets = { + "local": "Local machine rendering", + "frames": "Use existing frames" + } + + return EnumDef( + "render_target", + items=rendering_targets, + label="Render target" + ) + + def _get_frame_source_number(self): + return NumberDef( + "active_frame", + label="Active frame", + default=nuke.frame() + ) + + def create_instance_node(self, subset_name, instance_data): + linked_knobs_ = [] + if "use_range_limit" in self.instance_attributes: + linked_knobs_ = ["channels", "___", "first", "last", "use_limit"] + + # add fpath_template + write_data = { + "creator": self.__class__.__name__, + "subset": subset_name, + "fpath_template": self.temp_rendering_path_template + } + write_data.update(instance_data) + + created_node = napi.create_write_node( + subset_name, + write_data, + input=self.selected_node, + prenodes=self.prenodes, + linked_knobs=linked_knobs_, + **{ + "frame": nuke.frame() + } + ) + + self._add_frame_range_limit(created_node, instance_data) + + self.integrate_links(created_node, outputs=True) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + subset_name = subset_name.format(**pre_create_data) + + # pass values from precreate to instance + self.pass_pre_attributes_to_instance( + instance_data, + pre_create_data, + [ + "active_frame", + "render_target" + ] + ) + + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) + + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance_node = self.create_instance_node( + subset_name, + instance_data, + ) + + try: + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + napi.set_node_data( + instance_node, + napi.INSTANCE_DATA_KNOB, + instance.data_to_store() + ) + + return instance + + except Exception as er: + six.reraise( + napi.NukeCreatorError, + napi.NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2] + ) + + def _add_frame_range_limit(self, write_node, instance_data): + if "use_range_limit" not in self.instance_attributes: + return + + active_frame = ( + instance_data["creator_attributes"].get("active_frame")) + + write_node.begin() + for n in nuke.allNodes(): + # get write node + if n.Class() in "Write": + w_node = n + write_node.end() + + w_node["use_limit"].setValue(True) + w_node["first"].setValue(active_frame or nuke.frame()) + w_node["last"].setExpression("first") + + return write_node diff --git a/openpype/hosts/nuke/plugins/create/create_write_prerender.py b/openpype/hosts/nuke/plugins/create/create_write_prerender.py index fec97167fb..f46dd2d6d5 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_prerender.py +++ b/openpype/hosts/nuke/plugins/create/create_write_prerender.py @@ -1,56 +1,138 @@ import nuke +import sys +import six -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - create_write_node, create_write_node_legacy) +from openpype.pipeline import ( + CreatedInstance +) +from openpype.lib import ( + BoolDef +) +from openpype.hosts.nuke import api as napi -class CreateWritePrerender(plugin.AbstractWriteRender): - # change this to template preset - name = "WritePrerender" - label = "Create Write Prerender" - hosts = ["nuke"] - n_class = "Write" +class CreateWritePrerender(napi.NukeWriteCreator): + identifier = "create_write_prerender" + label = "Prerender (write)" family = "prerender" icon = "sign-out" - # settings - fpath_template = "{work}/render/nuke/{subset}/{subset}.{frame}.{ext}" - defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"] - reviewable = False - use_range_limit = True + instance_attributes = [ + "use_range_limit" + ] + default_variants = [ + "Key01", + "Bg01", + "Fg01", + "Branch01", + "Part01" + ] + temp_rendering_path_template = ( + "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}") - def __init__(self, *args, **kwargs): - super(CreateWritePrerender, self).__init__(*args, **kwargs) + def get_pre_create_attr_defs(self): + attr_defs = [ + BoolDef( + "use_selection", + default=not self.create_context.headless, + label="Use selection" + ), + self._get_render_target_enum() + ] + return attr_defs + + def create_instance_node(self, subset_name, instance_data): + linked_knobs_ = [] + if "use_range_limit" in self.instance_attributes: + linked_knobs_ = ["channels", "___", "first", "last", "use_limit"] - def _create_write_node(self, selected_node, inputs, outputs, write_data): # add fpath_template - write_data["fpath_template"] = self.fpath_template - write_data["use_range_limit"] = self.use_range_limit - write_data["frame_range"] = ( - nuke.root()["first_frame"].value(), - nuke.root()["last_frame"].value() + write_data = { + "creator": self.__class__.__name__, + "subset": subset_name, + "fpath_template": self.temp_rendering_path_template + } + + write_data.update(instance_data) + + # get width and height + if self.selected_node: + width, height = ( + self.selected_node.width(), self.selected_node.height()) + else: + actual_format = nuke.root().knob('format').value() + width, height = (actual_format.width(), actual_format.height()) + + created_node = napi.create_write_node( + subset_name, + write_data, + input=self.selected_node, + prenodes=self.prenodes, + linked_knobs=linked_knobs_, + **{ + "width": width, + "height": height + } ) - if not self.is_legacy(): - return create_write_node( - self.data["subset"], - write_data, - input=selected_node, - review=self.reviewable, - linked_knobs=["channels", "___", "first", "last", "use_limit"] - ) - else: - return create_write_node_legacy( - self.data["subset"], - write_data, - input=selected_node, - review=self.reviewable, - linked_knobs=["channels", "___", "first", "last", "use_limit"] + self._add_frame_range_limit(created_node) + + self.integrate_links(created_node, outputs=True) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + # pass values from precreate to instance + self.pass_pre_attributes_to_instance( + instance_data, + pre_create_data, + [ + "render_target" + ] + ) + + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) + + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance_node = self.create_instance_node( + subset_name, + instance_data + ) + + try: + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self ) - def _modify_write_node(self, write_node): - # open group node + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + napi.set_node_data( + instance_node, + napi.INSTANCE_DATA_KNOB, + instance.data_to_store() + ) + + return instance + + except Exception as er: + six.reraise( + napi.NukeCreatorError, + napi.NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2] + ) + + def _add_frame_range_limit(self, write_node): + if "use_range_limit" not in self.instance_attributes: + return + write_node.begin() for n in nuke.allNodes(): # get write node @@ -58,9 +140,8 @@ class CreateWritePrerender(plugin.AbstractWriteRender): w_node = n write_node.end() - if self.use_range_limit: - w_node["use_limit"].setValue(True) - w_node["first"].setValue(nuke.root()["first_frame"].value()) - w_node["last"].setValue(nuke.root()["last_frame"].value()) + w_node["use_limit"].setValue(True) + w_node["first"].setValue(nuke.root()["first_frame"].value()) + w_node["last"].setValue(nuke.root()["last_frame"].value()) return write_node diff --git a/openpype/hosts/nuke/plugins/create/create_write_render.py b/openpype/hosts/nuke/plugins/create/create_write_render.py index 23846c0332..c24405873a 100644 --- a/openpype/hosts/nuke/plugins/create/create_write_render.py +++ b/openpype/hosts/nuke/plugins/create/create_write_render.py @@ -1,86 +1,119 @@ import nuke +import sys +import six -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - create_write_node, create_write_node_legacy) +from openpype.pipeline import ( + CreatedInstance +) +from openpype.lib import ( + BoolDef +) +from openpype.hosts.nuke import api as napi -class CreateWriteRender(plugin.AbstractWriteRender): - # change this to template preset - name = "WriteRender" - label = "Create Write Render" - hosts = ["nuke"] - n_class = "Write" +class CreateWriteRender(napi.NukeWriteCreator): + identifier = "create_write_render" + label = "Render (write)" family = "render" icon = "sign-out" - # settings - fpath_template = "{work}/render/nuke/{subset}/{subset}.{frame}.{ext}" - defaults = ["Main", "Mask"] - prenodes = { - "Reformat01": { - "nodeclass": "Reformat", - "dependent": None, - "knobs": [ - { - "type": "text", - "name": "resize", - "value": "none" - }, - { - "type": "bool", - "name": "black_outside", - "value": True - } - ] - } - } + instance_attributes = [ + "reviewable" + ] + default_variants = [ + "Main", + "Mask" + ] + temp_rendering_path_template = ( + "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}") - def __init__(self, *args, **kwargs): - super(CreateWriteRender, self).__init__(*args, **kwargs) + def get_pre_create_attr_defs(self): + attr_defs = [ + BoolDef( + "use_selection", + default=not self.create_context.headless, + label="Use selection" + ), + self._get_render_target_enum() + ] + return attr_defs - def _create_write_node(self, selected_node, inputs, outputs, write_data): + def create_instance_node(self, subset_name, instance_data): # add fpath_template - write_data["fpath_template"] = self.fpath_template + write_data = { + "creator": self.__class__.__name__, + "subset": subset_name, + "fpath_template": self.temp_rendering_path_template + } + + write_data.update(instance_data) - # add reformat node to cut off all outside of format bounding box # get width and height - try: - width, height = (selected_node.width(), selected_node.height()) - except AttributeError: + if self.selected_node: + width, height = ( + self.selected_node.width(), self.selected_node.height()) + else: actual_format = nuke.root().knob('format').value() width, height = (actual_format.width(), actual_format.height()) - if not self.is_legacy(): - return create_write_node( - self.data["subset"], - write_data, - input=selected_node, - prenodes=self.prenodes, - **{ - "width": width, - "height": height - } - ) - else: - _prenodes = [ - { - "name": "Reformat01", - "class": "Reformat", - "knobs": [ - ("resize", 0), - ("black_outside", 1), - ], - "dependent": None - } + created_node = napi.create_write_node( + subset_name, + write_data, + input=self.selected_node, + prenodes=self.prenodes, + **{ + "width": width, + "height": height + } + ) + + self.integrate_links(created_node, outputs=False) + + return created_node + + def create(self, subset_name, instance_data, pre_create_data): + # pass values from precreate to instance + self.pass_pre_attributes_to_instance( + instance_data, + pre_create_data, + [ + "render_target" ] + ) + # make sure selected nodes are added + self.set_selected_nodes(pre_create_data) - return create_write_node_legacy( - self.data["subset"], - write_data, - input=selected_node, - prenodes=_prenodes + # make sure subset name is unique + self.check_existing_subset(subset_name) + + instance_node = self.create_instance_node( + subset_name, + instance_data + ) + + try: + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self ) - def _modify_write_node(self, write_node): - return write_node + instance.transient_data["node"] = instance_node + + self._add_instance_to_context(instance) + + napi.set_node_data( + instance_node, + napi.INSTANCE_DATA_KNOB, + instance.data_to_store() + ) + + return instance + + except Exception as er: + six.reraise( + napi.NukeCreatorError, + napi.NukeCreatorError("Creator error: {}".format(er)), + sys.exc_info()[2] + ) diff --git a/openpype/hosts/nuke/plugins/create/create_write_still.py b/openpype/hosts/nuke/plugins/create/create_write_still.py deleted file mode 100644 index bb08e8c2c6..0000000000 --- a/openpype/hosts/nuke/plugins/create/create_write_still.py +++ /dev/null @@ -1,105 +0,0 @@ -import nuke - -from openpype.hosts.nuke.api import plugin -from openpype.hosts.nuke.api.lib import ( - create_write_node, - create_write_node_legacy, - get_created_node_imageio_setting_legacy -) - -# HACK: just to disable still image on projects which -# are not having anatomy imageio preset for CreateWriteStill -# TODO: remove this code as soon as it will be obsolete -imageio_writes = get_created_node_imageio_setting_legacy( - "Write", - "CreateWriteStill", - "stillMain" -) -print(imageio_writes["knobs"]) - - -class CreateWriteStill(plugin.AbstractWriteRender): - # change this to template preset - name = "WriteStillFrame" - label = "Create Write Still Image" - hosts = ["nuke"] - n_class = "Write" - family = "still" - icon = "image" - - # settings - fpath_template = "{work}/render/nuke/{subset}/{subset}.{ext}" - defaults = [ - "ImageFrame", - "MPFrame", - "LayoutFrame" - ] - prenodes = { - "FrameHold01": { - "nodeclass": "FrameHold", - "dependent": None, - "knobs": [ - { - "type": "formatable", - "name": "first_frame", - "template": "{frame}", - "to_type": "number" - } - ] - } - } - - def __init__(self, *args, **kwargs): - super(CreateWriteStill, self).__init__(*args, **kwargs) - - def _create_write_node(self, selected_node, inputs, outputs, write_data): - # add fpath_template - write_data["fpath_template"] = self.fpath_template - - if not self.is_legacy(): - return create_write_node( - self.name, - write_data, - input=selected_node, - review=False, - prenodes=self.prenodes, - farm=False, - linked_knobs=["channels", "___", "first", "last", "use_limit"], - **{ - "frame": nuke.frame() - } - ) - else: - _prenodes = [ - { - "name": "FrameHold01", - "class": "FrameHold", - "knobs": [ - ("first_frame", nuke.frame()) - ], - "dependent": None - } - ] - return create_write_node_legacy( - self.name, - write_data, - input=selected_node, - review=False, - prenodes=_prenodes, - farm=False, - linked_knobs=["channels", "___", "first", "last", "use_limit"] - ) - - def _modify_write_node(self, write_node): - write_node.begin() - for n in nuke.allNodes(): - # get write node - if n.Class() in "Write": - w_node = n - write_node.end() - - w_node["use_limit"].setValue(True) - w_node["first"].setValue(nuke.frame()) - w_node["last"].setValue(nuke.frame()) - - return write_node diff --git a/openpype/hosts/nuke/plugins/create/workfile_creator.py b/openpype/hosts/nuke/plugins/create/workfile_creator.py new file mode 100644 index 0000000000..72ef61e63f --- /dev/null +++ b/openpype/hosts/nuke/plugins/create/workfile_creator.py @@ -0,0 +1,69 @@ +import openpype.hosts.nuke.api as api +from openpype.client import get_asset_by_name +from openpype.pipeline import ( + AutoCreator, + CreatedInstance, + legacy_io, +) +from openpype.hosts.nuke.api import ( + INSTANCE_DATA_KNOB, + set_node_data +) +import nuke + + +class WorkfileCreator(AutoCreator): + identifier = "workfile" + family = "workfile" + + default_variant = "Main" + + def get_instance_attr_defs(self): + return [] + + def collect_instances(self): + root_node = nuke.root() + instance_data = api.get_node_data( + root_node, api.INSTANCE_DATA_KNOB + ) + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, task_name, asset_doc, + project_name, host_name + ) + instance_data.update({ + "asset": asset_name, + "task": task_name, + "variant": self.default_variant + }) + instance_data.update(self.get_dynamic_data( + self.default_variant, task_name, asset_doc, + project_name, host_name, instance_data + )) + + instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + instance.transient_data["node"] = root_node + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + for created_inst, _changes in update_list: + instance_node = created_inst.transient_data["node"] + + set_node_data( + instance_node, + INSTANCE_DATA_KNOB, + created_inst.data_to_store() + ) + + def create(self, options=None): + # no need to create if it is created + # in `collect_instances` + pass diff --git a/openpype/hosts/nuke/plugins/load/actions.py b/openpype/hosts/nuke/plugins/load/actions.py index 69f56c7305..3227a7ed98 100644 --- a/openpype/hosts/nuke/plugins/load/actions.py +++ b/openpype/hosts/nuke/plugins/load/actions.py @@ -17,6 +17,7 @@ class SetFrameRangeLoader(load.LoaderPlugin): "yeticache", "pointcache"] representations = ["*"] + extension = {"*"} label = "Set frame range" order = 11 @@ -73,8 +74,7 @@ class SetFrameRangeWithHandlesLoader(load.LoaderPlugin): return # Include handles - handles = version_data.get("handles", 0) - start -= handles - end += handles + start -= version_data.get("handleStart", 0) + end += version_data.get("handleEnd", 0) lib.update_frame_range(start, end) diff --git a/openpype/hosts/nuke/plugins/load/load_backdrop.py b/openpype/hosts/nuke/plugins/load/load_backdrop.py index d1fb763500..67c7877e60 100644 --- a/openpype/hosts/nuke/plugins/load/load_backdrop.py +++ b/openpype/hosts/nuke/plugins/load/load_backdrop.py @@ -25,8 +25,9 @@ from openpype.hosts.nuke.api import containerise, update_container class LoadBackdropNodes(load.LoaderPlugin): """Loading Published Backdrop nodes (workfile, nukenodes)""" - representations = ["nk"] families = ["workfile", "nukenodes"] + representations = ["*"] + extension = {"nk"} label = "Import Nuke Nodes" order = 0 @@ -53,22 +54,19 @@ class LoadBackdropNodes(load.LoaderPlugin): version = context['version'] version_data = version.get("data", {}) vname = version.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) namespace = namespace or context['asset']['name'] colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) # prepare data for imprinting # add additional metadata from the version to imprint to Avalon knob - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] + add_keys = ["source", "author", "fps"] - data_imprint = {"frameStart": first, - "frameEnd": last, - "version": vname, - "colorspaceInput": colorspace, - "objectName": object_name} + data_imprint = { + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name + } for k in add_keys: data_imprint.update({k: version_data[k]}) @@ -203,18 +201,13 @@ class LoadBackdropNodes(load.LoaderPlugin): name = container['name'] version_data = version_doc.get("data", {}) vname = version_doc.get("name", None) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) namespace = container['namespace'] colorspace = version_data.get("colorspace", None) object_name = "{}_{}".format(name, namespace) - add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", - "source", "author", "fps"] + add_keys = ["source", "author", "fps"] data_imprint = {"representation": str(representation["_id"]), - "frameStart": first, - "frameEnd": last, "version": vname, "colorspaceInput": colorspace, "objectName": object_name} diff --git a/openpype/hosts/nuke/plugins/load/load_camera_abc.py b/openpype/hosts/nuke/plugins/load/load_camera_abc.py index 9fef7424c8..11cc63d25c 100644 --- a/openpype/hosts/nuke/plugins/load/load_camera_abc.py +++ b/openpype/hosts/nuke/plugins/load/load_camera_abc.py @@ -25,7 +25,8 @@ class AlembicCameraLoader(load.LoaderPlugin): """ families = ["camera"] - representations = ["abc"] + representations = ["*"] + extension = {"abc"} label = "Load Alembic Camera" icon = "camera" diff --git a/openpype/hosts/nuke/plugins/load/load_clip.py b/openpype/hosts/nuke/plugins/load/load_clip.py index 565d777811..cb3da79ef5 100644 --- a/openpype/hosts/nuke/plugins/load/load_clip.py +++ b/openpype/hosts/nuke/plugins/load/load_clip.py @@ -21,6 +21,10 @@ from openpype.hosts.nuke.api import ( viewer_update_and_undo_stop, colorspace_exists_on_node ) +from openpype.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) from openpype.hosts.nuke.api import plugin @@ -38,13 +42,10 @@ class LoadClip(plugin.NukeLoader): "prerender", "review" ] - representations = [ - "exr", - "dpx", - "mov", - "review", - "mp4" - ] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) label = "Load Clip" order = -20 @@ -81,17 +82,17 @@ class LoadClip(plugin.NukeLoader): @classmethod def get_representations(cls): - return ( - cls.representations - + cls._representations - + plugin.get_review_presets_config() - ) + return cls._representations or cls.representations def load(self, context, name, namespace, options): + """Load asset via database + """ representation = context["representation"] - # reste container id so it is always unique for each instance + # reset container id so it is always unique for each instance self.reset_container_id() + self.log.warning(self.extensions) + is_sequence = len(representation["files"]) > 1 if is_sequence: @@ -220,8 +221,23 @@ class LoadClip(plugin.NukeLoader): dict: altered representation data """ representation = deepcopy(representation) - frame = representation["context"]["frame"] - representation["context"]["frame"] = "#" * len(str(frame)) + context = representation["context"] + + # Get the frame from the context and hash it + frame = context["frame"] + hashed_frame = "#" * len(str(frame)) + + # Replace the frame with the hash in the originalBasename + if ( + "{originalBasename}" in representation["data"]["template"] + ): + origin_basename = context["originalBasename"] + context["originalBasename"] = origin_basename.replace( + frame, hashed_frame + ) + + # Replace the frame with the hash in the frame + representation["context"]["frame"] = hashed_frame return representation def update(self, container, representation): diff --git a/openpype/hosts/nuke/plugins/load/load_effects.py b/openpype/hosts/nuke/plugins/load/load_effects.py index cef4b0a5fc..d49f87a094 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects.py +++ b/openpype/hosts/nuke/plugins/load/load_effects.py @@ -22,8 +22,9 @@ from openpype.hosts.nuke.api import ( class LoadEffects(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" - representations = ["effectJson"] families = ["effect"] + representations = ["*"] + extension = {"json"} label = "Load Effects - nodes" order = 0 diff --git a/openpype/hosts/nuke/plugins/load/load_effects_ip.py b/openpype/hosts/nuke/plugins/load/load_effects_ip.py index 9bd40be816..bfe32c1ed9 100644 --- a/openpype/hosts/nuke/plugins/load/load_effects_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_effects_ip.py @@ -23,8 +23,9 @@ from openpype.hosts.nuke.api import ( class LoadEffectsInputProcess(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" - representations = ["effectJson"] families = ["effect"] + representations = ["*"] + extension = {"json"} label = "Load Effects - Input Process" order = 0 diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo.py b/openpype/hosts/nuke/plugins/load/load_gizmo.py index 9a18eeef5c..2aa7c49723 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo.py @@ -24,8 +24,9 @@ from openpype.hosts.nuke.api import ( class LoadGizmo(load.LoaderPlugin): """Loading nuke Gizmo""" - representations = ["gizmo"] families = ["gizmo"] + representations = ["*"] + extension = {"gizmo"} label = "Load Gizmo" order = 0 diff --git a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py index 2890dbfd2c..2514a28299 100644 --- a/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py +++ b/openpype/hosts/nuke/plugins/load/load_gizmo_ip.py @@ -26,8 +26,9 @@ from openpype.hosts.nuke.api import ( class LoadGizmoInputProcess(load.LoaderPlugin): """Loading colorspace soft effect exported from nukestudio""" - representations = ["gizmo"] families = ["gizmo"] + representations = ["*"] + extension = {"gizmo"} label = "Load Gizmo - Input Process" order = 0 diff --git a/openpype/hosts/nuke/plugins/load/load_image.py b/openpype/hosts/nuke/plugins/load/load_image.py index 49dc12f588..f82ee4db88 100644 --- a/openpype/hosts/nuke/plugins/load/load_image.py +++ b/openpype/hosts/nuke/plugins/load/load_image.py @@ -19,6 +19,9 @@ from openpype.hosts.nuke.api import ( update_container, viewer_update_and_undo_stop ) +from openpype.lib.transcoding import ( + IMAGE_EXTENSIONS +) class LoadImage(load.LoaderPlugin): @@ -33,7 +36,10 @@ class LoadImage(load.LoaderPlugin): "review", "image" ] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "psd", "tiff"] + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS + ) label = "Load Image" order = -10 @@ -58,7 +64,7 @@ class LoadImage(load.LoaderPlugin): @classmethod def get_representations(cls): - return cls.representations + cls._representations + return cls._representations or cls.representations def load(self, context, name, namespace, options): self.log.info("__ options: `{}`".format(options)) diff --git a/openpype/hosts/nuke/plugins/load/load_matchmove.py b/openpype/hosts/nuke/plugins/load/load_matchmove.py index f5a90706c7..a7d124d472 100644 --- a/openpype/hosts/nuke/plugins/load/load_matchmove.py +++ b/openpype/hosts/nuke/plugins/load/load_matchmove.py @@ -8,7 +8,9 @@ class MatchmoveLoader(load.LoaderPlugin): """ families = ["matchmove"] - representations = ["py"] + representations = ["*"] + extension = {"py"} + defaults = ["Camera", "Object"] label = "Run matchmove script" diff --git a/openpype/hosts/nuke/plugins/load/load_model.py b/openpype/hosts/nuke/plugins/load/load_model.py index ad985e83c6..f968da8475 100644 --- a/openpype/hosts/nuke/plugins/load/load_model.py +++ b/openpype/hosts/nuke/plugins/load/load_model.py @@ -23,7 +23,8 @@ class AlembicModelLoader(load.LoaderPlugin): """ families = ["model", "pointcache", "animation"] - representations = ["abc"] + representations = ["*"] + extension = {"abc"} label = "Load Alembic" icon = "cube" diff --git a/openpype/hosts/nuke/plugins/load/load_script_precomp.py b/openpype/hosts/nuke/plugins/load/load_script_precomp.py index f0972f85d2..53e9a76003 100644 --- a/openpype/hosts/nuke/plugins/load/load_script_precomp.py +++ b/openpype/hosts/nuke/plugins/load/load_script_precomp.py @@ -20,8 +20,9 @@ from openpype.hosts.nuke.api import ( class LinkAsGroup(load.LoaderPlugin): """Copy the published file to be pasted at the desired location""" - representations = ["nk"] families = ["workfile", "nukenodes"] + representations = ["*"] + extension = {"nk"} label = "Load Precomp" order = 0 @@ -137,7 +138,6 @@ class LinkAsGroup(load.LoaderPlugin): "version": version_doc.get("name"), "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handles": version_data.get("handles"), "fps": version_data.get("fps"), "author": version_data.get("author") }) diff --git a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py b/openpype/hosts/nuke/plugins/publish/collect_backdrop.py index 4efbb88b8c..7d51af7e9e 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/collect_backdrop.py @@ -1,9 +1,9 @@ +from pprint import pformat import pyblish.api from openpype.hosts.nuke.api import lib as pnlib import nuke -@pyblish.api.log class CollectBackdrops(pyblish.api.InstancePlugin): """Collect Backdrop node instance and its content """ @@ -14,8 +14,9 @@ class CollectBackdrops(pyblish.api.InstancePlugin): families = ["nukenodes"] def process(self, instance): + self.log.debug(pformat(instance.data)) - bckn = instance[0] + bckn = instance.data["transientData"]["node"] # define size of the backdrop left = bckn.xpos() @@ -23,6 +24,7 @@ class CollectBackdrops(pyblish.api.InstancePlugin): right = left + bckn['bdwidth'].value() bottom = top + bckn['bdheight'].value() + instance.data["transientData"]["childNodes"] = [] # iterate all nodes for node in nuke.allNodes(): @@ -37,51 +39,22 @@ class CollectBackdrops(pyblish.api.InstancePlugin): and (node.ypos() + node.screenHeight() < bottom): # add contained nodes to instance's node list - instance.append(node) + instance.data["transientData"]["childNodes"].append(node) # get all connections from outside of backdrop - nodes = instance[1:] + nodes = instance.data["transientData"]["childNodes"] connections_in, connections_out = pnlib.get_dependent_nodes(nodes) - instance.data["nodeConnectionsIn"] = connections_in - instance.data["nodeConnectionsOut"] = connections_out + instance.data["transientData"]["nodeConnectionsIn"] = connections_in + instance.data["transientData"]["nodeConnectionsOut"] = connections_out # make label nicer instance.data["label"] = "{0} ({1} nodes)".format( - bckn.name(), len(instance) - 1) - - instance.data["families"].append(instance.data["family"]) - - # Get frame range - handle_start = instance.context.data["handleStart"] - handle_end = instance.context.data["handleEnd"] - first_frame = int(nuke.root()["first_frame"].getValue()) - last_frame = int(nuke.root()["last_frame"].getValue()) + bckn.name(), len(instance.data["transientData"]["childNodes"])) # get version version = instance.context.data.get('version') - if not version: - raise RuntimeError("Script name has no version in the name.") + if version: + instance.data['version'] = version - instance.data['version'] = version - - # Add version data to instance - version_data = { - "handles": handle_start, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "version": int(version), - "families": [instance.data["family"]] + instance.data["families"], - "subset": instance.data["subset"], - "fps": instance.context.data["fps"] - } - - instance.data.update({ - "versionData": version_data, - "frameStart": first_frame, - "frameEnd": last_frame - }) - self.log.info("Backdrop content collected: `{}`".format(instance[:])) self.log.info("Backdrop instance collected: `{}`".format(instance)) diff --git a/openpype/hosts/nuke/plugins/publish/collect_context_data.py b/openpype/hosts/nuke/plugins/publish/collect_context_data.py new file mode 100644 index 0000000000..f1b4965205 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/collect_context_data.py @@ -0,0 +1,67 @@ +import os +import nuke +import pyblish.api +from openpype.lib import get_version_from_path +import openpype.hosts.nuke.api as napi +from openpype.pipeline import KnownPublishError + + +class CollectContextData(pyblish.api.ContextPlugin): + """Collect current context publish.""" + + order = pyblish.api.CollectorOrder - 0.499 + label = "Collect context data" + hosts = ['nuke'] + + def process(self, context): # sourcery skip: avoid-builtin-shadow + root_node = nuke.root() + + current_file = os.path.normpath(root_node.name()) + + if current_file.lower() == "root": + raise KnownPublishError( + "Workfile is not correct file name. \n" + "Use workfile tool to manage the name correctly." + ) + + # Get frame range + first_frame = int(root_node["first_frame"].getValue()) + last_frame = int(root_node["last_frame"].getValue()) + + # get instance data from root + root_instance_context = napi.get_node_data( + root_node, napi.INSTANCE_DATA_KNOB + ) + + handle_start = root_instance_context["handleStart"] + handle_end = root_instance_context["handleEnd"] + + # Get format + format = root_node['format'].value() + resolution_width = format.width() + resolution_height = format.height() + pixel_aspect = format.pixelAspect() + + script_data = { + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "pixelAspect": pixel_aspect, + + "handleStart": handle_start, + "handleEnd": handle_end, + "step": 1, + "fps": root_node['fps'].value(), + + "currentFile": current_file, + "version": int(get_version_from_path(current_file)), + + "host": pyblish.api.current_host(), + "hostVersion": nuke.NUKE_VERSION_STRING + } + + context.data["scriptData"] = script_data + context.data.update(script_data) + + self.log.info('Context from Nuke script collected') diff --git a/openpype/hosts/nuke/plugins/publish/collect_gizmo.py b/openpype/hosts/nuke/plugins/publish/collect_gizmo.py index 3db26096ae..e3c40a7a90 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/collect_gizmo.py @@ -2,25 +2,23 @@ import pyblish.api import nuke -@pyblish.api.log class CollectGizmo(pyblish.api.InstancePlugin): """Collect Gizmo (group) node instance and its content """ order = pyblish.api.CollectorOrder + 0.22 - label = "Collect Gizmo (Group)" + label = "Collect Gizmo (group)" hosts = ["nuke"] families = ["gizmo"] def process(self, instance): - grpn = instance[0] + gizmo_node = instance.data["transientData"]["node"] # add family to familiess instance.data["families"].insert(0, instance.data["family"]) # make label nicer - instance.data["label"] = "{0} ({1} nodes)".format( - grpn.name(), len(instance) - 1) + instance.data["label"] = gizmo_node.name() # Get frame range handle_start = instance.context.data["handleStart"] @@ -30,7 +28,6 @@ class CollectGizmo(pyblish.api.InstancePlugin): # Add version data to instance version_data = { - "handles": handle_start, "handleStart": handle_start, "handleEnd": handle_end, "frameStart": first_frame + handle_start, @@ -46,5 +43,4 @@ class CollectGizmo(pyblish.api.InstancePlugin): "frameStart": first_frame, "frameEnd": last_frame }) - self.log.info("Gizmo content collected: `{}`".format(instance[:])) self.log.info("Gizmo instance collected: `{}`".format(instance)) diff --git a/openpype/hosts/nuke/plugins/publish/collect_instance_data.py b/openpype/hosts/nuke/plugins/publish/collect_instance_data.py new file mode 100644 index 0000000000..3908aef4bc --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/collect_instance_data.py @@ -0,0 +1,44 @@ +import nuke +import pyblish.api + + +class CollectInstanceData(pyblish.api.InstancePlugin): + """Collect all nodes with Avalon knob.""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Collect Instance Data" + hosts = ["nuke", "nukeassist"] + + # presets + sync_workfile_version_on_families = [] + + def process(self, instance): + family = instance.data["family"] + + # Get format + root = nuke.root() + format_ = root['format'].value() + resolution_width = format_.width() + resolution_height = format_.height() + pixel_aspect = format_.pixelAspect() + + # sync workfile version + if family in self.sync_workfile_version_on_families: + self.log.debug( + "Syncing version with workfile for '{}'".format( + family + ) + ) + # get version to instance for integration + instance.data['version'] = instance.context.data['version'] + + instance.data.update({ + "step": 1, + "fps": root['fps'].value(), + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "pixelAspect": pixel_aspect + + }) + self.log.debug("Collected instance: {}".format( + instance.data)) diff --git a/openpype/hosts/nuke/plugins/publish/collect_model.py b/openpype/hosts/nuke/plugins/publish/collect_model.py index 5fca240553..3fdf376d0c 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_model.py +++ b/openpype/hosts/nuke/plugins/publish/collect_model.py @@ -2,7 +2,6 @@ import pyblish.api import nuke -@pyblish.api.log class CollectModel(pyblish.api.InstancePlugin): """Collect Model node instance and its content """ @@ -14,12 +13,12 @@ class CollectModel(pyblish.api.InstancePlugin): def process(self, instance): - grpn = instance[0] + geo_node = instance.data["transientData"]["node"] # add family to familiess instance.data["families"].insert(0, instance.data["family"]) # make label nicer - instance.data["label"] = grpn.name() + instance.data["label"] = geo_node.name() # Get frame range handle_start = instance.context.data["handleStart"] @@ -29,7 +28,6 @@ class CollectModel(pyblish.api.InstancePlugin): # Add version data to instance version_data = { - "handles": handle_start, "handleStart": handle_start, "handleEnd": handle_end, "frameStart": first_frame + handle_start, @@ -45,5 +43,4 @@ class CollectModel(pyblish.api.InstancePlugin): "frameStart": first_frame, "frameEnd": last_frame }) - self.log.info("Model content collected: `{}`".format(instance[:])) self.log.info("Model instance collected: `{}`".format(instance)) diff --git a/openpype/hosts/nuke/plugins/publish/collect_reads.py b/openpype/hosts/nuke/plugins/publish/collect_reads.py index b79d9646d5..831ae29a27 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_reads.py +++ b/openpype/hosts/nuke/plugins/publish/collect_reads.py @@ -2,12 +2,10 @@ import os import re import nuke import pyblish.api - from openpype.client import get_asset_by_name from openpype.pipeline import legacy_io -@pyblish.api.log class CollectNukeReads(pyblish.api.InstancePlugin): """Collect all read nodes.""" @@ -17,6 +15,8 @@ class CollectNukeReads(pyblish.api.InstancePlugin): families = ["source"] def process(self, instance): + node = instance.data["transientData"]["node"] + project_name = legacy_io.active_project() asset_name = legacy_io.Session["AVALON_ASSET"] asset_doc = get_asset_by_name(project_name, asset_name) @@ -25,7 +25,6 @@ class CollectNukeReads(pyblish.api.InstancePlugin): self.log.debug("checking instance: {}".format(instance)) - node = instance[0] if node.Class() != "Read": return @@ -99,15 +98,11 @@ class CollectNukeReads(pyblish.api.InstancePlugin): } instance.data["representations"].append(representation) - transfer = False - if "publish" in node.knobs(): - transfer = node["publish"] - + transfer = node["publish"] if "publish" in node.knobs() else False instance.data['transfer'] = transfer # Add version data to instance version_data = { - "handles": handle_start, "handleStart": handle_start, "handleEnd": handle_end, "frameStart": first_frame + handle_start, @@ -127,7 +122,8 @@ class CollectNukeReads(pyblish.api.InstancePlugin): "frameStart": first_frame, "frameEnd": last_frame, "colorspace": colorspace, - "handles": int(asset_doc["data"].get("handles", 0)), + "handleStart": handle_start, + "handleEnd": handle_end, "step": 1, "fps": int(nuke.root()['fps'].value()) }) diff --git a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py index bfe32d8fd1..5701087697 100644 --- a/openpype/hosts/nuke/plugins/publish/collect_slate_node.py +++ b/openpype/hosts/nuke/plugins/publish/collect_slate_node.py @@ -8,10 +8,10 @@ class CollectSlate(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.09 label = "Collect Slate Node" hosts = ["nuke"] - families = ["render", "render.local", "render.farm"] + families = ["render"] def process(self, instance): - node = instance[0] + node = instance.data["transientData"]["node"] slate = next((n for n in nuke.allNodes() if "slate" in n.name().lower() @@ -35,7 +35,6 @@ class CollectSlate(pyblish.api.InstancePlugin): instance.data["slateNode"] = slate_node instance.data["slate"] = True instance.data["families"].append("slate") - instance.data["versionData"]["families"].append("slate") self.log.info( "Slate node is in node graph: `{}`".format(slate.name())) self.log.debug( diff --git a/openpype/hosts/nuke/plugins/publish/collect_workfile.py b/openpype/hosts/nuke/plugins/publish/collect_workfile.py new file mode 100644 index 0000000000..852042e6e9 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/collect_workfile.py @@ -0,0 +1,40 @@ +import os +import nuke +import pyblish.api + + +class CollectWorkfile(pyblish.api.InstancePlugin): + """Collect current script for publish.""" + + order = pyblish.api.CollectorOrder + label = "Collect Workfile" + hosts = ['nuke'] + families = ["workfile"] + + def process(self, instance): # sourcery skip: avoid-builtin-shadow + + script_data = instance.context.data["scriptData"] + current_file = os.path.normpath(nuke.root().name()) + + # creating instances per write node + staging_dir = os.path.dirname(current_file) + base_name = os.path.basename(current_file) + + # creating representation + representation = { + 'name': 'nk', + 'ext': 'nk', + 'files': base_name, + "stagingDir": staging_dir, + } + + # creating instance data + instance.data.update({ + "name": base_name, + "representations": [representation] + }) + + # adding basic script data + instance.data.update(script_data) + + self.log.info("Collect script version") diff --git a/openpype/hosts/nuke/plugins/publish/collect_writes.py b/openpype/hosts/nuke/plugins/publish/collect_writes.py new file mode 100644 index 0000000000..2d1caacdc3 --- /dev/null +++ b/openpype/hosts/nuke/plugins/publish/collect_writes.py @@ -0,0 +1,196 @@ +import os +from pprint import pformat +import nuke +import pyblish.api +from openpype.hosts.nuke import api as napi +from openpype.pipeline import publish + + +class CollectNukeWrites(pyblish.api.InstancePlugin, + publish.ColormanagedPyblishPluginMixin): + """Collect all write nodes.""" + + order = pyblish.api.CollectorOrder + 0.0021 + label = "Collect Writes" + hosts = ["nuke", "nukeassist"] + families = ["render", "prerender", "image"] + + def process(self, instance): + self.log.debug(pformat(instance.data)) + creator_attributes = instance.data["creator_attributes"] + instance.data.update(creator_attributes) + + group_node = instance.data["transientData"]["node"] + render_target = instance.data["render_target"] + family = instance.data["family"] + families = instance.data["families"] + + # add targeted family to families + instance.data["families"].append( + "{}.{}".format(family, render_target) + ) + if instance.data.get("review"): + instance.data["families"].append("review") + + child_nodes = napi.get_instance_group_node_childs(instance) + instance.data["transientData"]["childNodes"] = child_nodes + + write_node = None + for x in child_nodes: + if x.Class() == "Write": + write_node = x + + if write_node is None: + self.log.warning( + "Created node '{}' is missing write node!".format( + group_node.name() + ) + ) + return + + instance.data["writeNode"] = write_node + self.log.debug("checking instance: {}".format(instance)) + + # Determine defined file type + ext = write_node["file_type"].value() + + # Get frame range + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + frame_length = int(last_frame - first_frame + 1) + + if write_node["use_limit"].getValue(): + first_frame = int(write_node["first"].getValue()) + last_frame = int(write_node["last"].getValue()) + + write_file_path = nuke.filename(write_node) + output_dir = os.path.dirname(write_file_path) + + # get colorspace and add to version data + colorspace = napi.get_colorspace_from_node(write_node) + + self.log.debug('output dir: {}'.format(output_dir)) + + if render_target == "frames": + representation = { + 'name': ext, + 'ext': ext, + "stagingDir": output_dir, + "tags": [] + } + + # get file path knob + node_file_knob = write_node["file"] + # list file paths based on input frames + expected_paths = list(sorted({ + node_file_knob.evaluate(frame) + for frame in range(first_frame, last_frame + 1) + })) + + # convert only to base names + expected_filenames = [ + os.path.basename(filepath) + for filepath in expected_paths + ] + + # make sure files are existing at folder + collected_frames = [ + filename + for filename in os.listdir(output_dir) + if filename in expected_filenames + ] + + if collected_frames: + collected_frames_len = len(collected_frames) + frame_start_str = "%0{}d".format( + len(str(last_frame))) % first_frame + representation['frameStart'] = frame_start_str + + # in case slate is expected and not yet rendered + self.log.debug("_ frame_length: {}".format(frame_length)) + self.log.debug("_ collected_frames_len: {}".format( + collected_frames_len)) + + # this will only run if slate frame is not already + # rendered from previews publishes + if ( + "slate" in families + and frame_length == collected_frames_len + and family == "render" + ): + frame_slate_str = ( + "{{:0{}d}}".format(len(str(last_frame))) + ).format(first_frame - 1) + + slate_frame = collected_frames[0].replace( + frame_start_str, frame_slate_str) + collected_frames.insert(0, slate_frame) + + if collected_frames_len == 1: + representation['files'] = collected_frames.pop() + else: + representation['files'] = collected_frames + + # inject colorspace data + self.set_representation_colorspace( + representation, instance.context, + colorspace=colorspace + ) + + instance.data["representations"].append(representation) + self.log.info("Publishing rendered frames ...") + + elif render_target == "farm": + farm_keys = ["farm_chunk", "farm_priority", "farm_concurrency"] + for key in farm_keys: + # Skip if key is not in creator attributes + if key not in creator_attributes: + continue + # Add farm attributes to instance + instance.data[key] = creator_attributes[key] + + # Farm rendering + instance.data["transfer"] = False + instance.data["farm"] = True + self.log.info("Farm rendering ON ...") + + # TODO: remove this when we have proper colorspace support + version_data = { + "colorspace": colorspace + } + + instance.data.update({ + "versionData": version_data, + "path": write_file_path, + "outputDir": output_dir, + "ext": ext, + "colorspace": colorspace + }) + + if family == "render": + instance.data.update({ + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, + }) + else: + instance.data.update({ + "handleStart": 0, + "handleEnd": 0, + "frameStart": first_frame, + "frameEnd": last_frame, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, + }) + + # make sure rendered sequence on farm will + # be used for extract review + if not instance.data.get("review"): + instance.data["useSequenceForReview"] = False + + self.log.debug("instance.data: {}".format(pformat(instance.data))) diff --git a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py index d1e5c4cc5a..5166fa4b2c 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/extract_backdrop.py @@ -26,8 +26,14 @@ class ExtractBackdropNode(publish.Extractor): families = ["nukenodes"] def process(self, instance): - tmp_nodes = list() - nodes = instance[1:] + tmp_nodes = [] + child_nodes = instance.data["transientData"]["childNodes"] + # all connections outside of backdrop + connections_in = instance.data["transientData"]["nodeConnectionsIn"] + connections_out = instance.data["transientData"]["nodeConnectionsOut"] + self.log.debug("_ connections_in: `{}`".format(connections_in)) + self.log.debug("_ connections_out: `{}`".format(connections_out)) + # Define extract output file path stagingdir = self.staging_dir(instance) filename = "{0}.nk".format(instance.name) @@ -35,20 +41,14 @@ class ExtractBackdropNode(publish.Extractor): # maintain selection with maintained_selection(): - # all connections outside of backdrop - connections_in = instance.data["nodeConnectionsIn"] - connections_out = instance.data["nodeConnectionsOut"] - self.log.debug("_ connections_in: `{}`".format(connections_in)) - self.log.debug("_ connections_out: `{}`".format(connections_out)) - - # create input nodes and name them as passing node (*_INP) + # create input child_nodes and name them as passing node (*_INP) for n, inputs in connections_in.items(): for i, input in inputs: inpn = nuke.createNode("Input") inpn["name"].setValue("{}_{}_INP".format(n.name(), i)) n.setInput(i, inpn) inpn.setXYpos(input.xpos(), input.ypos()) - nodes.append(inpn) + child_nodes.append(inpn) tmp_nodes.append(inpn) reset_selection() @@ -63,13 +63,13 @@ class ExtractBackdropNode(publish.Extractor): if d.name() in n.name()), 0), opn) opn.setInput(0, n) opn.autoplace() - nodes.append(opn) + child_nodes.append(opn) tmp_nodes.append(opn) reset_selection() - # select nodes to copy + # select child_nodes to copy reset_selection() - select_nodes(nodes) + select_nodes(child_nodes) # create tmp nk file # save file to the path nuke.nodeCopy(path) @@ -104,6 +104,3 @@ class ExtractBackdropNode(publish.Extractor): self.log.info("Extracted instance '{}' to: {}".format( instance.name, path)) - - self.log.info("Data {}".format( - instance.data)) diff --git a/openpype/hosts/nuke/plugins/publish/extract_camera.py b/openpype/hosts/nuke/plugins/publish/extract_camera.py index b751bfab03..4286f71e83 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_camera.py +++ b/openpype/hosts/nuke/plugins/publish/extract_camera.py @@ -28,6 +28,7 @@ class ExtractCamera(publish.Extractor): ] def process(self, instance): + camera_node = instance.data["transientData"]["node"] handle_start = instance.context.data["handleStart"] handle_end = instance.context.data["handleEnd"] first_frame = int(nuke.root()["first_frame"].getValue()) @@ -38,7 +39,7 @@ class ExtractCamera(publish.Extractor): self.log.info("instance.data: `{}`".format( pformat(instance.data))) - rm_nodes = list() + rm_nodes = [] self.log.info("Crating additional nodes") subset = instance.data["subset"] staging_dir = self.staging_dir(instance) @@ -58,7 +59,7 @@ class ExtractCamera(publish.Extractor): with maintained_selection(): # bake camera with axeses onto word coordinate XYZ rm_n = bakeCameraWithAxeses( - nuke.toNode(instance.data["name"]), output_range) + camera_node, output_range) rm_nodes.append(rm_n) # create scene node diff --git a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py b/openpype/hosts/nuke/plugins/publish/extract_gizmo.py index 3047ad6724..b0b1a9f7b7 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/extract_gizmo.py @@ -19,13 +19,14 @@ class ExtractGizmo(publish.Extractor): """ order = pyblish.api.ExtractorOrder - label = "Extract Gizmo (Group)" + label = "Extract Gizmo (group)" hosts = ["nuke"] families = ["gizmo"] def process(self, instance): - tmp_nodes = list() - orig_grpn = instance[0] + tmp_nodes = [] + orig_grpn = instance.data["transientData"]["node"] + # Define extract output file path stagingdir = self.staging_dir(instance) filename = "{0}.nk".format(instance.name) @@ -54,15 +55,6 @@ class ExtractGizmo(publish.Extractor): # convert gizmos to groups pnutils.bake_gizmos_recursively(copy_grpn) - # remove avalonknobs - knobs = copy_grpn.knobs() - avalon_knobs = [k for k in knobs.keys() - for ak in ["avalon:", "ak:"] - if ak in k] - avalon_knobs.append("publish") - for ak in avalon_knobs: - copy_grpn.removeKnob(knobs[ak]) - # add to temporary nodes tmp_nodes.append(copy_grpn) diff --git a/openpype/hosts/nuke/plugins/publish/extract_model.py b/openpype/hosts/nuke/plugins/publish/extract_model.py index d82cb3110b..814d404137 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_model.py +++ b/openpype/hosts/nuke/plugins/publish/extract_model.py @@ -36,8 +36,9 @@ class ExtractModel(publish.Extractor): self.log.info("instance.data: `{}`".format( pformat(instance.data))) - rm_nodes = list() - model_node = instance[0] + rm_nodes = [] + model_node = instance.data["transientData"]["node"] + self.log.info("Crating additional nodes") subset = instance.data["subset"] staging_dir = self.staging_dir(instance) diff --git a/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py b/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py index eb9bc0b429..e66cfd9018 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py +++ b/openpype/hosts/nuke/plugins/publish/extract_ouput_node.py @@ -16,13 +16,17 @@ class CreateOutputNode(pyblish.api.ContextPlugin): def process(self, context): # capture selection state with maintained_selection(): - active_node = [node for inst in context - for node in inst - if "ak:family" in node.knobs()] + + active_node = [ + inst.data.get("transientData", {}).get("node") + for inst in context + if inst.data.get("transientData", {}).get("node") + if inst.data.get( + "transientData", {}).get("node").Class() != "Root" + ] if active_node: - self.log.info(active_node) - active_node = active_node[0] + active_node = active_node.pop() self.log.info(active_node) active_node['selected'].setValue(True) diff --git a/openpype/hosts/nuke/plugins/publish/extract_render_local.py b/openpype/hosts/nuke/plugins/publish/extract_render_local.py index 843d588786..e2cf2addc5 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_render_local.py +++ b/openpype/hosts/nuke/plugins/publish/extract_render_local.py @@ -1,47 +1,51 @@ import os +import shutil import pyblish.api import clique import nuke - +from openpype.hosts.nuke import api as napi from openpype.pipeline import publish +from openpype.lib import collect_frames -class NukeRenderLocal(publish.Extractor): - # TODO: rewrite docstring to nuke +class NukeRenderLocal(publish.Extractor, + publish.ColormanagedPyblishPluginMixin): """Render the current Nuke composition locally. Extract the result of savers by starting a comp render This will run the local render of Fusion. + Allows to use last published frames and overwrite only specific ones + (set in instance.data.get("frames_to_fix")) """ order = pyblish.api.ExtractorOrder label = "Render Local" hosts = ["nuke"] - families = ["render.local", "prerender.local", "still.local"] + families = ["render.local", "prerender.local", "image.local"] def process(self, instance): - families = instance.data["families"] + child_nodes = ( + instance.data.get("transientData", {}).get("childNodes") + or instance + ) node = None - for x in instance: + for x in child_nodes: if x.Class() == "Write": node = x self.log.debug("instance collected: {}".format(instance.data)) - first_frame = instance.data.get("frameStartHandle", None) - - last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) - self.log.info("Starting render") - self.log.info("Start frame: {}".format(first_frame)) - self.log.info("End frame: {}".format(last_frame)) + first_frame = instance.data.get("frameStartHandle", None) + last_frame = instance.data.get("frameEndHandle", None) + filenames = [] node_file = node["file"] - # Collecte expected filepaths for each frame + # Collect expected filepaths for each frame # - for cases that output is still image is first created set of # paths which is then sorted and converted to list expected_paths = list(sorted({ @@ -49,24 +53,40 @@ class NukeRenderLocal(publish.Extractor): for frame in range(first_frame, last_frame + 1) })) # Extract only filenames for representation - filenames = [ + filenames.extend([ os.path.basename(filepath) for filepath in expected_paths - ] + ]) # Ensure output directory exists. out_dir = os.path.dirname(expected_paths[0]) if not os.path.exists(out_dir): os.makedirs(out_dir) - # Render frames - nuke.execute( - node_subset_name, - int(first_frame), - int(last_frame) - ) + frames_to_render = [(first_frame, last_frame)] + + frames_to_fix = instance.data.get("frames_to_fix") + if instance.data.get("last_version_published_files") and frames_to_fix: + frames_to_render = self._get_frames_to_render(frames_to_fix) + anatomy = instance.context.data["anatomy"] + self._copy_last_published(anatomy, instance, out_dir, + filenames) + + for render_first_frame, render_last_frame in frames_to_render: + + self.log.info("Starting render") + self.log.info("Start frame: {}".format(render_first_frame)) + self.log.info("End frame: {}".format(render_last_frame)) + + # Render frames + nuke.execute( + str(node_subset_name), + int(render_first_frame), + int(render_last_frame) + ) ext = node["file_type"].value() + colorspace = napi.get_colorspace_from_node(node) if "representations" not in instance.data: instance.data["representations"] = [] @@ -90,6 +110,13 @@ class NukeRenderLocal(publish.Extractor): 'files': filenames, "stagingDir": out_dir } + + # inject colorspace data + self.set_representation_colorspace( + repre, instance.context, + colorspace=colorspace + ) + instance.data["representations"].append(repre) self.log.info("Extracted instance '{0}' to: {1}".format( @@ -97,6 +124,7 @@ class NukeRenderLocal(publish.Extractor): out_dir )) + families = instance.data["families"] # redefinition of families if "render.local" in families: instance.data['family'] = 'render' @@ -108,9 +136,9 @@ class NukeRenderLocal(publish.Extractor): families.remove('prerender.local') families.insert(0, "prerender") instance.data["anatomyData"]["family"] = "prerender" - elif "still.local" in families: + elif "image.local" in families: instance.data['family'] = 'image' - families.remove('still.local') + families.remove('image.local') instance.data["anatomyData"]["family"] = "image" instance.data["families"] = families @@ -124,3 +152,58 @@ class NukeRenderLocal(publish.Extractor): self.log.info('Finished render') self.log.debug("_ instance.data: {}".format(instance.data)) + + def _copy_last_published(self, anatomy, instance, out_dir, + expected_filenames): + """Copies last published files to temporary out_dir. + + These are base of files which will be extended/fixed for specific + frames. + Renames published file to expected file name based on frame, eg. + test_project_test_asset_subset_v005.1001.exr > new_render.1001.exr + """ + last_published = instance.data["last_version_published_files"] + last_published_and_frames = collect_frames(last_published) + + expected_and_frames = collect_frames(expected_filenames) + frames_and_expected = {v: k for k, v in expected_and_frames.items()} + for file_path, frame in last_published_and_frames.items(): + file_path = anatomy.fill_root(file_path) + if not os.path.exists(file_path): + continue + target_file_name = frames_and_expected.get(frame) + if not target_file_name: + continue + + out_path = os.path.join(out_dir, target_file_name) + self.log.debug("Copying '{}' -> '{}'".format(file_path, out_path)) + shutil.copy(file_path, out_path) + + # TODO shouldn't this be uncommented + # instance.context.data["cleanupFullPaths"].append(out_path) + + def _get_frames_to_render(self, frames_to_fix): + """Return list of frame range tuples to render + + Args: + frames_to_fix (str): specific or range of frames to be rerendered + (1005, 1009-1010) + Returns: + (list): [(1005, 1005), (1009-1010)] + """ + frames_to_render = [] + + for frame_range in frames_to_fix.split(","): + if frame_range.isdigit(): + render_first_frame = frame_range + render_last_frame = frame_range + elif '-' in frame_range: + frames = frame_range.split('-') + render_first_frame = int(frames[0]) + render_last_frame = int(frames[1]) + else: + raise ValueError("Wrong format of frames to fix {}" + .format(frames_to_fix)) + frames_to_render.append((render_first_frame, + render_last_frame)) + return frames_to_render diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data.py b/openpype/hosts/nuke/plugins/publish/extract_review_data.py index 3c85b21b08..c221af40fb 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data.py @@ -23,9 +23,9 @@ class ExtractReviewData(publish.Extractor): representations = instance.data.get("representations", []) # review can be removed since `ProcessSubmittedJobOnFarm` will create - # reviable representation if needed + # reviewable representation if needed if ( - "render.farm" in instance.data["families"] + instance.data.get("farm") and "review" in instance.data["families"] ): instance.data["families"].remove("review") diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py index 67779e9599..e4b7b155cd 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_lut.py @@ -49,7 +49,12 @@ class ExtractReviewDataLut(publish.Extractor): exporter.stagingDir, exporter.file).replace("\\", "/") instance.data["representations"] += data["representations"] - if "render.farm" in families: + # review can be removed since `ProcessSubmittedJobOnFarm` will create + # reviewable representation if needed + if ( + instance.data.get("farm") + and "review" in instance.data["families"] + ): instance.data["families"].remove("review") self.log.debug( diff --git a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py index 3fcfc2a4b5..956d1a54a3 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py +++ b/openpype/hosts/nuke/plugins/publish/extract_review_data_mov.py @@ -105,10 +105,7 @@ class ExtractReviewDataMov(publish.Extractor): self, instance, o_name, o_data["extension"], multiple_presets) - if ( - "render.farm" in families or - "prerender.farm" in families - ): + if instance.data.get("farm"): if "review" in instance.data["families"]: instance.data["families"].remove("review") diff --git a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py index 19eae9638b..21eefda249 100644 --- a/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/nuke/plugins/publish/extract_thumbnail.py @@ -4,10 +4,8 @@ import nuke import pyblish.api from openpype.pipeline import publish -from openpype.hosts.nuke.api import ( - maintained_selection, - get_view_process_node -) +from openpype.hosts.nuke import api as napi +from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings if sys.version_info[0] >= 3: @@ -32,13 +30,13 @@ class ExtractThumbnail(publish.Extractor): bake_viewer_process = True bake_viewer_input_process = True nodes = {} - + reposition_nodes = None def process(self, instance): - if "render.farm" in instance.data["families"]: + if instance.data.get("farm"): return - with maintained_selection(): + with napi.maintained_selection(): self.log.debug("instance: {}".format(instance)) self.log.debug("instance.data[families]: {}".format( instance.data["families"])) @@ -69,7 +67,7 @@ class ExtractThumbnail(publish.Extractor): bake_viewer_input_process_node = kwargs[ "bake_viewer_input_process"] - node = instance[0] # group node + node = instance.data["transientData"]["node"] # group node self.log.info("Creating staging dir...") if "representations" not in instance.data: @@ -127,24 +125,38 @@ class ExtractThumbnail(publish.Extractor): temporary_nodes.append(rnode) previous_node = rnode - reformat_node = nuke.createNode("Reformat") - ref_node = self.nodes.get("Reformat", None) - if ref_node: - for k, v in ref_node: - self.log.debug("k, v: {0}:{1}".format(k, v)) - if isinstance(v, unicode): - v = str(v) - reformat_node[k].setValue(v) + if self.reposition_nodes is None: + # [deprecated] create reformat node old way + reformat_node = nuke.createNode("Reformat") + ref_node = self.nodes.get("Reformat", None) + if ref_node: + for k, v in ref_node: + self.log.debug("k, v: {0}:{1}".format(k, v)) + if isinstance(v, unicode): + v = str(v) + reformat_node[k].setValue(v) - reformat_node.setInput(0, previous_node) - previous_node = reformat_node - temporary_nodes.append(reformat_node) + reformat_node.setInput(0, previous_node) + previous_node = reformat_node + temporary_nodes.append(reformat_node) + else: + # create reformat node new way + for repo_node in self.reposition_nodes: + node_class = repo_node["node_class"] + knobs = repo_node["knobs"] + node = nuke.createNode(node_class) + set_node_knobs_from_settings(node, knobs) + + # connect in order + node.setInput(0, previous_node) + previous_node = node + temporary_nodes.append(node) # only create colorspace baking if toggled on if bake_viewer_process: if bake_viewer_input_process_node: # get input process and connect it to baking - ipn = get_view_process_node() + ipn = napi.get_view_process_node() if ipn is not None: ipn.setInput(0, previous_node) previous_node = ipn diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_asset_name.xml b/openpype/hosts/nuke/plugins/publish/help/validate_asset_name.xml index 1097909a5f..0422917e9c 100644 --- a/openpype/hosts/nuke/plugins/publish/help/validate_asset_name.xml +++ b/openpype/hosts/nuke/plugins/publish/help/validate_asset_name.xml @@ -1,7 +1,7 @@ - Shot/Asset mame + Shot/Asset name ## Invalid Shot/Asset name in subset diff --git a/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml b/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml index cdf85102bc..1717622a45 100644 --- a/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml +++ b/openpype/hosts/nuke/plugins/publish/help/validate_write_nodes.xml @@ -3,16 +3,30 @@ Knobs values -## Invalid node's knobs values + ## Invalid node's knobs values -Following write node knobs needs to be repaired: + Following write node knobs needs to be repaired: -{xml_msg} + {xml_msg} -### How to repair? + ### How to repair? -1. Use Repair button. -2. Hit Reload button on the publisher. + 1. Use Repair button. + 2. Hit Reload button on the publisher. + + + + Legacy knob types + + ## Knobs are in obsolete configuration + + Settings needs to be fixed. + + ### How to repair? + + Contact your supervisor or fix it in project settings at + 'project_settings/nuke/imageio/nodes/requiredNodes' at knobs. + Each '__legacy__' type has to be defined accordingly to its type. \ No newline at end of file diff --git a/openpype/hosts/nuke/plugins/publish/precollect_instances.py b/openpype/hosts/nuke/plugins/publish/precollect_instances.py deleted file mode 100644 index b396056eb9..0000000000 --- a/openpype/hosts/nuke/plugins/publish/precollect_instances.py +++ /dev/null @@ -1,158 +0,0 @@ -import nuke -import pyblish.api - -from openpype.hosts.nuke.api.lib import ( - add_publish_knob, - get_avalon_knob_data -) - - -@pyblish.api.log -class PreCollectNukeInstances(pyblish.api.ContextPlugin): - """Collect all nodes with Avalon knob.""" - - order = pyblish.api.CollectorOrder - 0.49 - label = "Pre-collect Instances" - hosts = ["nuke", "nukeassist"] - - # presets - sync_workfile_version_on_families = [] - - def process(self, context): - instances = [] - - root = nuke.root() - - self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes())) - for node in nuke.allNodes(): - - if node.Class() in ["Viewer", "Dot"]: - continue - - try: - if node["disable"].value(): - continue - except Exception as E: - self.log.warning(E) - - # get data from avalon knob - avalon_knob_data = get_avalon_knob_data( - node, ["avalon:", "ak:"]) - - self.log.debug("avalon_knob_data: {}".format(avalon_knob_data)) - - if not avalon_knob_data: - continue - - if avalon_knob_data["id"] != "pyblish.avalon.instance": - continue - - # establish families - family = avalon_knob_data["family"] - families_ak = avalon_knob_data.get("families", []) - families = [] - - # except disabled nodes but exclude backdrops in test - if ("nukenodes" not in family) and (node["disable"].value()): - continue - - subset = avalon_knob_data.get( - "subset", None) or node["name"].value() - - # Create instance - instance = context.create_instance(subset) - instance.append(node) - - suspend_publish = False - if "suspend_publish" in node.knobs(): - suspend_publish = node["suspend_publish"].value() - instance.data["suspend_publish"] = suspend_publish - - # get review knob value - review = False - if "review" in node.knobs(): - review = node["review"].value() - - if review: - families.append("review") - - # Add all nodes in group instances. - if node.Class() == "Group": - # only alter families for render family - if families_ak and "write" in families_ak.lower(): - target = node["render"].value() - if target == "Use existing frames": - # Local rendering - self.log.info("flagged for no render") - families.append(families_ak.lower()) - elif target == "Local": - # Local rendering - self.log.info("flagged for local render") - families.append("{}.local".format(family)) - family = families_ak.lower() - elif target == "On farm": - # Farm rendering - self.log.info("flagged for farm render") - instance.data["transfer"] = False - instance.data["farm"] = True - families.append("{}.farm".format(family)) - family = families_ak.lower() - - node.begin() - for i in nuke.allNodes(): - instance.append(i) - node.end() - - if not families and families_ak and family not in [ - "render", "prerender"]: - families.append(families_ak.lower()) - - self.log.debug("__ family: `{}`".format(family)) - self.log.debug("__ families: `{}`".format(families)) - - # Get format - format_ = root['format'].value() - resolution_width = format_.width() - resolution_height = format_.height() - pixel_aspect = format_.pixelAspect() - - # get publish knob value - if "publish" not in node.knobs(): - add_publish_knob(node) - - # sync workfile version - _families_test = [family] + families - self.log.debug("__ _families_test: `{}`".format(_families_test)) - for family_test in _families_test: - if family_test in self.sync_workfile_version_on_families: - self.log.debug( - "Syncing version with workfile for '{}'".format( - family_test - ) - ) - # get version to instance for integration - instance.data['version'] = instance.context.data['version'] - - instance.data.update({ - "subset": subset, - "asset": avalon_knob_data["asset"], - "label": node.name(), - "name": node.name(), - "subset": subset, - "family": family, - "families": families, - "avalonKnob": avalon_knob_data, - "step": 1, - "publish": node.knob('publish').value(), - "fps": nuke.root()['fps'].value(), - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "pixelAspect": pixel_aspect, - "review": review, - "representations": [] - - }) - self.log.info("collected instance: {}".format(instance.data)) - instances.append(instance) - - self.log.debug("context: {}".format(context)) diff --git a/openpype/hosts/nuke/plugins/publish/precollect_workfile.py b/openpype/hosts/nuke/plugins/publish/precollect_workfile.py deleted file mode 100644 index 316c651b66..0000000000 --- a/openpype/hosts/nuke/plugins/publish/precollect_workfile.py +++ /dev/null @@ -1,107 +0,0 @@ -import os - -import nuke - -import pyblish.api - -from openpype.lib import get_version_from_path -from openpype.hosts.nuke.api.lib import ( - add_publish_knob, - get_avalon_knob_data -) -from openpype.pipeline import KnownPublishError - - -class CollectWorkfile(pyblish.api.ContextPlugin): - """Collect current script for publish.""" - - order = pyblish.api.CollectorOrder - 0.50 - label = "Pre-collect Workfile" - hosts = ['nuke'] - - def process(self, context): # sourcery skip: avoid-builtin-shadow - root = nuke.root() - - current_file = os.path.normpath(nuke.root().name()) - - if current_file.lower() == "root": - raise KnownPublishError( - "Workfile is not correct file name. \n" - "Use workfile tool to manage the name correctly." - ) - - knob_data = get_avalon_knob_data(root) - - add_publish_knob(root) - - family = "workfile" - task = os.getenv("AVALON_TASK", None) - # creating instances per write node - staging_dir = os.path.dirname(current_file) - base_name = os.path.basename(current_file) - subset = family + task.capitalize() - - # Get frame range - first_frame = int(root["first_frame"].getValue()) - last_frame = int(root["last_frame"].getValue()) - - handle_start = int(knob_data.get("handleStart", 0)) - handle_end = int(knob_data.get("handleEnd", 0)) - - # Get format - format = root['format'].value() - resolution_width = format.width() - resolution_height = format.height() - pixel_aspect = format.pixelAspect() - - # Create instance - instance = context.create_instance(subset) - instance.add(root) - - script_data = { - "asset": os.getenv("AVALON_ASSET", None), - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "pixelAspect": pixel_aspect, - - # backward compatibility - "handles": handle_start, - - "handleStart": handle_start, - "handleEnd": handle_end, - "step": 1, - "fps": root['fps'].value(), - - "currentFile": current_file, - "version": int(get_version_from_path(current_file)), - - "host": pyblish.api.current_host(), - "hostVersion": nuke.NUKE_VERSION_STRING - } - context.data.update(script_data) - - # creating representation - representation = { - 'name': 'nk', - 'ext': 'nk', - 'files': base_name, - "stagingDir": staging_dir, - } - - # creating instance data - instance.data.update({ - "subset": subset, - "label": base_name, - "name": base_name, - "publish": root.knob('publish').value(), - "family": family, - "families": [family], - "representations": [representation] - }) - - # adding basic script data - instance.data.update(script_data) - - self.log.info('Publishing script version') diff --git a/openpype/hosts/nuke/plugins/publish/precollect_writes.py b/openpype/hosts/nuke/plugins/publish/precollect_writes.py deleted file mode 100644 index 17c4bc30cf..0000000000 --- a/openpype/hosts/nuke/plugins/publish/precollect_writes.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -import re -from pprint import pformat -import nuke -import pyblish.api - -from openpype.client import ( - get_last_version_by_subset_name, - get_representations, -) -from openpype.pipeline import ( - legacy_io, - get_representation_path, -) - - -@pyblish.api.log -class CollectNukeWrites(pyblish.api.InstancePlugin): - """Collect all write nodes.""" - - order = pyblish.api.CollectorOrder - 0.48 - label = "Pre-collect Writes" - hosts = ["nuke", "nukeassist"] - families = ["write"] - - def process(self, instance): - _families_test = [instance.data["family"]] + instance.data["families"] - self.log.debug("_families_test: {}".format(_families_test)) - - node = None - for x in instance: - if x.Class() == "Write": - node = x - - if node is None: - return - - instance.data["writeNode"] = node - self.log.debug("checking instance: {}".format(instance)) - - # Determine defined file type - ext = node["file_type"].value() - - # Determine output type - output_type = "img" - if ext == "mov": - output_type = "mov" - - # Get frame range - handle_start = instance.context.data["handleStart"] - handle_end = instance.context.data["handleEnd"] - first_frame = int(nuke.root()["first_frame"].getValue()) - last_frame = int(nuke.root()["last_frame"].getValue()) - frame_length = int(last_frame - first_frame + 1) - - if node["use_limit"].getValue(): - first_frame = int(node["first"].getValue()) - last_frame = int(node["last"].getValue()) - - # Prepare expected output paths by evaluating each frame of write node - # - paths are first collected to set to avoid duplicated paths, then - # sorted and converted to list - node_file = node["file"] - expected_paths = list(sorted({ - node_file.evaluate(frame) - for frame in range(first_frame, last_frame + 1) - })) - expected_filenames = [ - os.path.basename(filepath) - for filepath in expected_paths - ] - path = nuke.filename(node) - output_dir = os.path.dirname(path) - - self.log.debug('output dir: {}'.format(output_dir)) - - # create label - name = node.name() - # Include start and end render frame in label - label = "{0} ({1}-{2})".format( - name, - int(first_frame), - int(last_frame) - ) - - if [fm for fm in _families_test - if fm in ["render", "prerender", "still"]]: - if "representations" not in instance.data: - instance.data["representations"] = list() - - representation = { - 'name': ext, - 'ext': ext, - "stagingDir": output_dir, - "tags": list() - } - - try: - collected_frames = [ - filename - for filename in os.listdir(output_dir) - if filename in expected_filenames - ] - if collected_frames: - collected_frames_len = len(collected_frames) - frame_start_str = "%0{}d".format( - len(str(last_frame))) % first_frame - representation['frameStart'] = frame_start_str - - # in case slate is expected and not yet rendered - self.log.debug("_ frame_length: {}".format(frame_length)) - self.log.debug( - "_ collected_frames_len: {}".format( - collected_frames_len)) - # this will only run if slate frame is not already - # rendered from previews publishes - if "slate" in _families_test \ - and (frame_length == collected_frames_len) \ - and ("prerender" not in _families_test): - frame_slate_str = "%0{}d".format( - len(str(last_frame))) % (first_frame - 1) - slate_frame = collected_frames[0].replace( - frame_start_str, frame_slate_str) - collected_frames.insert(0, slate_frame) - - if collected_frames_len == 1: - representation['files'] = collected_frames.pop() - if "still" in _families_test: - instance.data['family'] = 'image' - instance.data["families"].remove('still') - else: - representation['files'] = collected_frames - instance.data["representations"].append(representation) - except Exception: - instance.data["representations"].append(representation) - self.log.debug("couldn't collect frames: {}".format(label)) - - # Add version data to instance - colorspace = node["colorspace"].value() - - # remove default part of the string - if "default (" in colorspace: - colorspace = re.sub(r"default.\(|\)", "", colorspace) - self.log.debug("colorspace: `{}`".format(colorspace)) - - version_data = { - "families": [ - _f.replace(".local", "").replace(".farm", "") - for _f in _families_test if "write" != _f - ], - "colorspace": colorspace - } - - group_node = [x for x in instance if x.Class() == "Group"][0] - dl_chunk_size = 1 - if "deadlineChunkSize" in group_node.knobs(): - dl_chunk_size = group_node["deadlineChunkSize"].value() - - dl_priority = 50 - if "deadlinePriority" in group_node.knobs(): - dl_priority = group_node["deadlinePriority"].value() - - dl_concurrent_tasks = 0 - if "deadlineConcurrentTasks" in group_node.knobs(): - dl_concurrent_tasks = group_node["deadlineConcurrentTasks"].value() - - instance.data.update({ - "versionData": version_data, - "path": path, - "outputDir": output_dir, - "ext": ext, - "label": label, - "outputType": output_type, - "colorspace": colorspace, - "deadlineChunkSize": dl_chunk_size, - "deadlinePriority": dl_priority, - "deadlineConcurrentTasks": dl_concurrent_tasks - }) - - if self.is_prerender(_families_test): - instance.data.update({ - "handleStart": 0, - "handleEnd": 0, - "frameStart": first_frame, - "frameEnd": last_frame, - "frameStartHandle": first_frame, - "frameEndHandle": last_frame, - }) - else: - instance.data.update({ - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "frameStartHandle": first_frame, - "frameEndHandle": last_frame, - }) - - # make sure rendered sequence on farm will - # be used for exctract review - if not instance.data["review"]: - instance.data["useSequenceForReview"] = False - - self.log.debug("instance.data: {}".format(pformat(instance.data))) - - def is_prerender(self, families): - return next((f for f in families if "prerender" in f), None) diff --git a/openpype/hosts/nuke/plugins/publish/validate_asset_name.py b/openpype/hosts/nuke/plugins/publish/validate_asset_name.py index 52731140ff..df05f76a5b 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_asset_name.py +++ b/openpype/hosts/nuke/plugins/publish/validate_asset_name.py @@ -2,17 +2,16 @@ """Validate if instance asset is the same as context asset.""" from __future__ import absolute_import -import nuke import pyblish.api import openpype.hosts.nuke.api.lib as nlib -import openpype.hosts.nuke.api as nuke_api + from openpype.pipeline.publish import ( ValidateContentsOrder, PublishXmlValidationError, + OptionalPyblishPluginMixin ) - class SelectInvalidInstances(pyblish.api.Action): """Select invalid instances in Outliner.""" @@ -51,9 +50,10 @@ class SelectInvalidInstances(pyblish.api.Action): self.deselect() def select(self, instances): - nlib.select_nodes( - [nuke.toNode(str(x)) for x in instances] - ) + for inst in instances: + if inst.data.get("transientData", {}).get("node"): + select_node = inst.data["transientData"]["node"] + select_node["selected"].setValue(True) def deselect(self): nlib.reset_selection() @@ -82,16 +82,20 @@ class RepairSelectInvalidInstances(pyblish.api.Action): # Apply pyblish.logic to get the instances for the plug-in instances = pyblish.api.instances_by_plugin(failed, plugin) + self.log.debug(instances) context_asset = context.data["assetEntity"]["name"] for instance in instances: - origin_node = instance[0] - nuke_api.lib.recreate_instance( - origin_node, avalon_data={"asset": context_asset} - ) + node = instance.data["transientData"]["node"] + node_data = nlib.get_node_data(node, nlib.INSTANCE_DATA_KNOB) + node_data["asset"] = context_asset + nlib.set_node_data(node, nlib.INSTANCE_DATA_KNOB, node_data) -class ValidateCorrectAssetName(pyblish.api.InstancePlugin): +class ValidateCorrectAssetName( + pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin +): """Validator to check if instance asset match context asset. When working in per-shot style you always publish data in context of @@ -110,8 +114,12 @@ class ValidateCorrectAssetName(pyblish.api.InstancePlugin): optional = True def process(self, instance): + if not self.is_active(instance.data): + return + asset = instance.data.get("asset") context_asset = instance.context.data["assetEntity"]["name"] + node = instance.data["transientData"]["node"] msg = ( "Instance `{}` has wrong shot/asset name:\n" @@ -123,7 +131,7 @@ class ValidateCorrectAssetName(pyblish.api.InstancePlugin): if asset != context_asset: raise PublishXmlValidationError( self, msg, formatting_data={ - "node_name": instance[0]["name"].value(), + "node_name": node.name(), "wrong_name": asset, "correct_name": context_asset } diff --git a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py index 17dc79dc56..ad60089952 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_backdrop.py +++ b/openpype/hosts/nuke/plugins/publish/validate_backdrop.py @@ -1,8 +1,12 @@ import nuke import pyblish -from openpype.hosts.nuke.api.lib import maintained_selection -from openpype.pipeline import PublishXmlValidationError +from openpype.hosts.nuke import api as napi +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin +) class SelectCenterInNodeGraph(pyblish.api.Action): """ @@ -25,14 +29,14 @@ class SelectCenterInNodeGraph(pyblish.api.Action): # Apply pyblish.logic to get the instances for the plug-in instances = pyblish.api.instances_by_plugin(failed, plugin) - all_xC = list() - all_yC = list() + all_xC = [] + all_yC = [] # maintain selection - with maintained_selection(): + with napi.maintained_selection(): # collect all failed nodes xpos and ypos for instance in instances: - bdn = instance[0] + bdn = instance.data["transientData"]["node"] xC = bdn.xpos() + bdn.screenWidth() / 2 yC = bdn.ypos() + bdn.screenHeight() / 2 @@ -46,13 +50,15 @@ class SelectCenterInNodeGraph(pyblish.api.Action): nuke.zoom(2, [min(all_xC), min(all_yC)]) -@pyblish.api.log -class ValidateBackdrop(pyblish.api.InstancePlugin): +class ValidateBackdrop( + pyblish.api.InstancePlugin, + OptionalPyblishPluginMixin +): """ Validate amount of nodes on backdrop node in case user - forgoten to add nodes above the publishing backdrop node. + forgotten to add nodes above the publishing backdrop node. """ - order = pyblish.api.ValidatorOrder + order = ValidateContentsOrder optional = True families = ["nukenodes"] label = "Validate Backdrop" @@ -60,7 +66,11 @@ class ValidateBackdrop(pyblish.api.InstancePlugin): actions = [SelectCenterInNodeGraph] def process(self, instance): - connections_out = instance.data["nodeConnectionsOut"] + if not self.is_active(instance.data): + return + + child_nodes = instance.data["transientData"]["childNodes"] + connections_out = instance.data["transientData"]["nodeConnectionsOut"] msg_multiple_outputs = ( "Only one outcoming connection from " @@ -78,10 +88,10 @@ class ValidateBackdrop(pyblish.api.InstancePlugin): self.log.debug( "Amount of nodes on instance: {}".format( - len(instance)) + len(child_nodes)) ) - if len(instance) == 1: + if child_nodes == []: raise PublishXmlValidationError( self, msg_no_nodes, diff --git a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py b/openpype/hosts/nuke/plugins/publish/validate_gizmo.py index 2321bd1fd4..878d938bea 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_gizmo.py +++ b/openpype/hosts/nuke/plugins/publish/validate_gizmo.py @@ -1,6 +1,6 @@ import pyblish from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.nuke.api import maintained_selection +from openpype.hosts.nuke import api as napi import nuke @@ -26,45 +26,44 @@ class OpenFailedGroupNode(pyblish.api.Action): instances = pyblish.api.instances_by_plugin(failed, plugin) # maintain selection - with maintained_selection(): + with napi.maintained_selection(): # collect all failed nodes xpos and ypos for instance in instances: - grpn = instance[0] + grpn = instance.data["transientData"]["node"] nuke.showDag(grpn) -@pyblish.api.log class ValidateGizmo(pyblish.api.InstancePlugin): """Validate amount of output nodes in gizmo (group) node""" order = pyblish.api.ValidatorOrder optional = True families = ["gizmo"] - label = "Validate Gizmo (Group)" + label = "Validate Gizmo (group)" hosts = ["nuke"] actions = [OpenFailedGroupNode] def process(self, instance): - grpn = instance[0] + grpn = instance.data["transientData"]["node"] with grpn: connections_out = nuke.allNodes('Output') - msg_multiple_outputs = ( - "Only one outcoming connection from " - "\"{}\" is allowed").format(instance.data["name"]) - if len(connections_out) > 1: + msg_multiple_outputs = ( + "Only one outcoming connection from " + "\"{}\" is allowed").format(instance.data["name"]) + raise PublishXmlValidationError( self, msg_multiple_outputs, "multiple_outputs", {"node_name": grpn["name"].value()} ) connections_in = nuke.allNodes('Input') - msg_missing_inputs = ( - "At least one Input node has to be inside Group: " - "\"{}\"").format(instance.data["name"]) - if len(connections_in) == 0: + msg_missing_inputs = ( + "At least one Input node has to be inside Group: " + "\"{}\"").format(instance.data["name"]) + raise PublishXmlValidationError( self, msg_missing_inputs, "no_inputs", {"node_name": grpn["name"].value()} diff --git a/openpype/hosts/nuke/plugins/publish/validate_knobs.py b/openpype/hosts/nuke/plugins/publish/validate_knobs.py index d44f27791a..db21cdc7c5 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_knobs.py +++ b/openpype/hosts/nuke/plugins/publish/validate_knobs.py @@ -61,17 +61,11 @@ class ValidateKnobs(pyblish.api.ContextPlugin): invalid_knobs = [] for instance in context: - # Filter publisable instances. - if not instance.data["publish"]: - continue # Filter families. families = [instance.data["family"]] families += instance.data.get("families", []) - if not families: - continue - # Get all knobs to validate. knobs = {} for family in families: diff --git a/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py b/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py index 1e59880f90..dbcd216a84 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py +++ b/openpype/hosts/nuke/plugins/publish/validate_output_resolution.py @@ -1,12 +1,19 @@ import pyblish.api -from openpype.hosts.nuke.api import maintained_selection -from openpype.pipeline import PublishXmlValidationError +from openpype.hosts.nuke import api as napi from openpype.pipeline.publish import RepairAction +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) + import nuke -class ValidateOutputResolution(pyblish.api.InstancePlugin): +class ValidateOutputResolution( + OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin +): """Validates Output Resolution. It is making sure the resolution of write's input is the same as @@ -15,8 +22,8 @@ class ValidateOutputResolution(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder optional = True - families = ["render", "render.local", "render.farm"] - label = "Write Resolution" + families = ["render"] + label = "Write resolution" hosts = ["nuke"] actions = [RepairAction] @@ -24,14 +31,22 @@ class ValidateOutputResolution(pyblish.api.InstancePlugin): resolution_msg = "Reformat is set to wrong format" def process(self, instance): + if not self.is_active(instance.data): + return + invalid = self.get_invalid(instance) if invalid: raise PublishXmlValidationError(self, invalid) @classmethod def get_reformat(cls, instance): + child_nodes = ( + instance.data.get("transientData", {}).get("childNodes") + or instance + ) + reformat = None - for inode in instance: + for inode in child_nodes: if inode.Class() != "Reformat": continue reformat = inode @@ -64,21 +79,26 @@ class ValidateOutputResolution(pyblish.api.InstancePlugin): @classmethod def repair(cls, instance): + child_nodes = ( + instance.data.get("transientData", {}).get("childNodes") + or instance + ) + invalid = cls.get_invalid(instance) - grp_node = instance[0] + grp_node = instance.data["transientData"]["node"] if cls.missing_msg == invalid: # make sure we are inside of the group node with grp_node: # find input node and select it _input = None - for inode in instance: + for inode in child_nodes: if inode.Class() != "Input": continue _input = inode # add reformat node under it - with maintained_selection(): + with napi.maintained_selection(): _input['selected'].setValue(True) _rfn = nuke.createNode("Reformat", "name Reformat01") _rfn["resize"].setValue(0) diff --git a/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py b/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py index dac240ad19..c26a03f31a 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py +++ b/openpype/hosts/nuke/plugins/publish/validate_proxy_mode.py @@ -17,7 +17,6 @@ class FixProxyMode(pyblish.api.Action): rootNode["proxy"].setValue(False) -@pyblish.api.log class ValidateProxyMode(pyblish.api.ContextPlugin): """Validate active proxy mode""" diff --git a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py deleted file mode 100644 index 2bf1ff81f8..0000000000 --- a/openpype/hosts/nuke/plugins/publish/validate_read_legacy.py +++ /dev/null @@ -1,87 +0,0 @@ -import os - -import nuke - -import toml -import pyblish.api -from bson.objectid import ObjectId - -from openpype.pipeline import ( - discover_loader_plugins, - load_container, -) - - -class RepairReadLegacyAction(pyblish.api.Action): - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - # Get the errored instances - failed = [] - for result in context.data["results"]: - if (result["error"] is not None and result["instance"] is not None - and result["instance"] not in failed): - failed.append(result["instance"]) - - # Apply pyblish.logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(failed, plugin) - - for instance in instances: - - data = toml.loads(instance[0]["avalon"].value()) - data["name"] = instance[0].name() - data["xpos"] = instance[0].xpos() - data["ypos"] = instance[0].ypos() - data["extension"] = os.path.splitext( - instance[0]["file"].value() - )[1][1:] - - data["connections"] = [] - for d in instance[0].dependent(): - for i in range(d.inputs()): - if d.input(i) == instance[0]: - data["connections"].append([i, d]) - - nuke.delete(instance[0]) - - loader_name = "LoadSequence" - if data["extension"] == "mov": - loader_name = "LoadMov" - - loader_plugin = None - for Loader in discover_loader_plugins(): - if Loader.__name__ != loader_name: - continue - - loader_plugin = Loader - - load_container( - Loader=loader_plugin, - representation=ObjectId(data["representation"]) - ) - - node = nuke.toNode(data["name"]) - for connection in data["connections"]: - connection[1].setInput(connection[0], node) - - node.setXYpos(data["xpos"], data["ypos"]) - - -class ValidateReadLegacy(pyblish.api.InstancePlugin): - """Validate legacy read instance[0]s.""" - - order = pyblish.api.ValidatorOrder - optional = True - families = ["read.legacy"] - label = "Read Legacy" - hosts = ["nuke"] - actions = [RepairReadLegacyAction] - - def process(self, instance): - - msg = "Clean up legacy read node \"{}\"".format(instance) - assert False, msg diff --git a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py index 237ff423e5..1c22c5b9d0 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py +++ b/openpype/hosts/nuke/plugins/publish/validate_rendered_frames.py @@ -4,7 +4,6 @@ import clique from openpype.pipeline import PublishXmlValidationError -@pyblish.api.log class RepairActionBase(pyblish.api.Action): on = "failed" icon = "wrench" @@ -23,6 +22,7 @@ class RepairActionBase(pyblish.api.Action): def repair_knob(self, instances, state): for instance in instances: + node = instance.data["transientData"]["node"] files_remove = [os.path.join(instance.data["outputDir"], f) for r in instance.data.get("representations", []) for f in r.get("files", []) @@ -31,7 +31,7 @@ class RepairActionBase(pyblish.api.Action): for f in files_remove: os.remove(f) self.log.debug("removing file: {}".format(f)) - instance[0]["render"].setValue(state) + node["render"].setValue(state) self.log.info("Rendering toggled to `{}`".format(state)) @@ -62,9 +62,10 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): actions = [RepairCollectionActionToLocal, RepairCollectionActionToFarm] def process(self, instance): + node = instance.data["transientData"]["node"] f_data = { - "node_name": instance[0]["name"].value() + "node_name": node.name() } for repre in instance.data["representations"]: diff --git a/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py b/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py index f0632f8080..57bfce7993 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py +++ b/openpype/hosts/nuke/plugins/publish/validate_script_attributes.py @@ -1,35 +1,35 @@ -from pprint import pformat +from copy import deepcopy import pyblish.api - -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) from openpype.pipeline.publish import RepairAction from openpype.hosts.nuke.api.lib import ( - get_avalon_knob_data, WorkfileSettings ) -import nuke -@pyblish.api.log -class ValidateScriptAttributes(pyblish.api.InstancePlugin): +class ValidateScriptAttributes( + OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin +): """ Validates file output. """ order = pyblish.api.ValidatorOrder + 0.1 families = ["workfile"] - label = "Validatte script attributes" + label = "Validate script attributes" hosts = ["nuke"] optional = True actions = [RepairAction] def process(self, instance): - root = nuke.root() - knob_data = get_avalon_knob_data(root) + if not self.is_active(instance.data): + return + + script_data = deepcopy(instance.context.data["scriptData"]) + asset = instance.data["assetEntity"] - # get asset data frame values - frame_start = asset["data"]["frameStart"] - frame_end = asset["data"]["frameEnd"] - handle_start = asset["data"]["handleStart"] - handle_end = asset["data"]["handleEnd"] # These attributes will be checked attributes = [ @@ -48,37 +48,11 @@ class ValidateScriptAttributes(pyblish.api.InstancePlugin): for attr in attributes if attr in asset["data"] } - # fix float to max 4 digints (only for evaluating) - fps_data = float("{0:.4f}".format( - asset_attributes["fps"])) # fix frame values to include handles - asset_attributes.update({ - "frameStart": frame_start - handle_start, - "frameEnd": frame_end + handle_end, - "fps": fps_data - }) - - self.log.debug(pformat( - asset_attributes - )) - - # Get format - _format = root["format"].value() - - # Get values from nukescript - script_attributes = { - "handleStart": int(knob_data["handleStart"]), - "handleEnd": int(knob_data["handleEnd"]), - "fps": float("{0:.4f}".format(root['fps'].value())), - "frameStart": int(root["first_frame"].getValue()), - "frameEnd": int(root["last_frame"].getValue()), - "resolutionWidth": _format.width(), - "resolutionHeight": _format.height(), - "pixelAspect": _format.pixelAspect() - } - self.log.debug(pformat( - script_attributes - )) + asset_attributes["fps"] = float("{0:.4f}".format( + asset_attributes["fps"])) + script_data["fps"] = float("{0:.4f}".format( + script_data["fps"])) # Compare asset's values Nukescript X Database not_matching = [] @@ -87,14 +61,14 @@ class ValidateScriptAttributes(pyblish.api.InstancePlugin): "Asset vs Script attribute \"{}\": {}, {}".format( attr, asset_attributes[attr], - script_attributes[attr] + script_data[attr] ) ) - if asset_attributes[attr] != script_attributes[attr]: + if asset_attributes[attr] != script_data[attr]: not_matching.append({ "name": attr, "expected": asset_attributes[attr], - "actual": script_attributes[attr] + "actual": script_data[attr] }) # Raise error if not matching diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py b/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py deleted file mode 100644 index 907577a97d..0000000000 --- a/openpype/hosts/nuke/plugins/publish/validate_write_deadline_tab.py +++ /dev/null @@ -1,53 +0,0 @@ -import pyblish.api -import openpype.hosts.nuke.lib - - -class RepairNukeWriteDeadlineTab(pyblish.api.Action): - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - # Get the errored instances - failed = [] - for result in context.data["results"]: - if (result["error"] is not None and result["instance"] is not None - and result["instance"] not in failed): - failed.append(result["instance"]) - - # Apply pyblish.logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(failed, plugin) - - for instance in instances: - group_node = [x for x in instance if x.Class() == "Group"][0] - - # Remove existing knobs. - knob_names = openpype.hosts.nuke.lib.get_deadline_knob_names() - for name, knob in group_node.knobs().items(): - if name in knob_names: - group_node.removeKnob(knob) - - openpype.hosts.nuke.lib.add_deadline_tab(group_node) - - -class ValidateNukeWriteDeadlineTab(pyblish.api.InstancePlugin): - """Ensure Deadline tab is present and current.""" - - order = pyblish.api.ValidatorOrder - label = "Deadline Tab" - hosts = ["nuke"] - optional = True - families = ["render"] - actions = [RepairNukeWriteDeadlineTab] - - def process(self, instance): - group_node = [x for x in instance if x.Class() == "Group"][0] - - knob_names = openpype.hosts.nuke.lib.get_deadline_knob_names() - missing_knobs = [] - for name in knob_names: - if name not in group_node.knobs().keys(): - missing_knobs.append(name) - assert not missing_knobs, "Missing knobs: {}".format(missing_knobs) diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py b/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py deleted file mode 100644 index 699526ef57..0000000000 --- a/openpype/hosts/nuke/plugins/publish/validate_write_legacy.py +++ /dev/null @@ -1,108 +0,0 @@ -import toml - -import nuke - -import pyblish.api - -from openpype.pipeline import discover_creator_plugins -from openpype.pipeline.publish import RepairAction -from openpype.hosts.nuke.api.lib import get_avalon_knob_data - - -class ValidateWriteLegacy(pyblish.api.InstancePlugin): - """Validate legacy write nodes.""" - - order = pyblish.api.ValidatorOrder - optional = True - families = ["write"] - label = "Validate Write Legacy" - hosts = ["nuke"] - actions = [RepairAction] - - def process(self, instance): - node = instance[0] - msg = "Clean up legacy write node \"{}\"".format(instance) - - if node.Class() not in ["Group", "Write"]: - return - - # test avalon knobs - family_knobs = ["ak:family", "avalon:family"] - family_test = [k for k in node.knobs().keys() if k in family_knobs] - self.log.debug("_ family_test: {}".format(family_test)) - - # test if render in family test knob - # and only one item should be available - assert len(family_test) == 1, msg + " > More avalon attributes" - assert "render" in node[family_test[0]].value() \ - or "still" in node[family_test[0]].value(), msg + \ - " > Not correct family" - # test if `file` knob in node, this way old - # non-group-node write could be detected - assert "file" not in node.knobs(), msg + \ - " > file knob should not be present" - - # check if write node is having old render targeting - assert "render_farm" not in node.knobs(), msg + \ - " > old way of setting render target" - - @classmethod - def repair(cls, instance): - node = instance[0] - - if "Write" in node.Class(): - data = toml.loads(node["avalon"].value()) - else: - data = get_avalon_knob_data(node) - - # collect reusable data - data["XYpos"] = (node.xpos(), node.ypos()) - data["input"] = node.input(0) - data["publish"] = node["publish"].value() - data["render"] = node["render"].value() - data["render_farm"] = node["render_farm"].value() - data["review"] = node["review"].value() - data["use_limit"] = node["use_limit"].value() - data["first"] = node["first"].value() - data["last"] = node["last"].value() - - family = data["family"] - cls.log.debug("_ orig node family: {}".format(family)) - - # define what family of write node should be recreated - if family == "render": - Create_name = "CreateWriteRender" - elif family == "prerender": - Create_name = "CreateWritePrerender" - elif family == "still": - Create_name = "CreateWriteStill" - - # get appropriate plugin class - creator_plugin = None - for Creator in discover_creator_plugins(): - if Creator.__name__ != Create_name: - continue - - creator_plugin = Creator - - # delete the legaci write node - nuke.delete(node) - - # create write node with creator - new_node_name = data["subset"] - creator_plugin(new_node_name, data["asset"]).process() - - node = nuke.toNode(new_node_name) - node.setXYpos(*data["XYpos"]) - node.setInput(0, data["input"]) - node["publish"].setValue(data["publish"]) - node["review"].setValue(data["review"]) - node["use_limit"].setValue(data["use_limit"]) - node["first"].setValue(data["first"]) - node["last"].setValue(data["last"]) - - # recreate render targets - if data["render"]: - node["render"].setValue("Local") - if data["render_farm"]: - node["render"].setValue("On farm") diff --git a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py b/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py index 3e2881f298..aeecea655f 100644 --- a/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py +++ b/openpype/hosts/nuke/plugins/publish/validate_write_nodes.py @@ -5,10 +5,13 @@ from openpype.hosts.nuke.api.lib import ( set_node_knobs_from_settings, color_gui_to_int ) -from openpype.pipeline import PublishXmlValidationError + +from openpype.pipeline.publish import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) -@pyblish.api.log class RepairNukeWriteNodeAction(pyblish.api.Action): label = "Repair" on = "failed" @@ -18,10 +21,15 @@ class RepairNukeWriteNodeAction(pyblish.api.Action): instances = get_errored_instances_from_context(context) for instance in instances: - write_group_node = instance[0] + child_nodes = ( + instance.data.get("transientData", {}).get("childNodes") + or instance + ) + + write_group_node = instance.data["transientData"]["node"] # get write node from inside of group write_node = None - for x in instance: + for x in child_nodes: if x.Class() == "Write": write_node = x @@ -32,7 +40,10 @@ class RepairNukeWriteNodeAction(pyblish.api.Action): self.log.info("Node attributes were fixed") -class ValidateNukeWriteNode(pyblish.api.InstancePlugin): +class ValidateNukeWriteNode( + OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin +): """ Validate Write node's knobs. Compare knobs on write node inside the render group @@ -42,16 +53,24 @@ class ValidateNukeWriteNode(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder optional = True families = ["render"] - label = "Write Node" + label = "Validate write node" actions = [RepairNukeWriteNodeAction] hosts = ["nuke"] def process(self, instance): - write_group_node = instance[0] + if not self.is_active(instance.data): + return + + child_nodes = ( + instance.data.get("transientData", {}).get("childNodes") + or instance + ) + + write_group_node = instance.data["transientData"]["node"] # get write node from inside of group write_node = None - for x in instance: + for x in child_nodes: if x.Class() == "Write": write_node = x @@ -60,17 +79,31 @@ class ValidateNukeWriteNode(pyblish.api.InstancePlugin): correct_data = get_write_node_template_attr(write_group_node) - if correct_data: - check_knobs = correct_data["knobs"] - else: - return - check = [] self.log.debug("__ write_node: {}".format( write_node )) + self.log.debug("__ correct_data: {}".format( + correct_data + )) + + for knob_data in correct_data["knobs"]: + knob_type = knob_data["type"] + self.log.debug("__ knob_type: {}".format( + knob_type + )) + + if ( + knob_type == "__legacy__" + ): + raise PublishXmlValidationError( + self, ( + "Please update data in settings 'project_settings" + "/nuke/imageio/nodes/requiredNodes'" + ), + key="legacy" + ) - for knob_data in check_knobs: key = knob_data["name"] value = knob_data["value"] node_value = write_node[key].value() diff --git a/openpype/vendor/python/common/scriptsmenu/vendor/__init__.py b/openpype/hosts/nuke/startup/__init__.py similarity index 100% rename from openpype/vendor/python/common/scriptsmenu/vendor/__init__.py rename to openpype/hosts/nuke/startup/__init__.py diff --git a/openpype/hosts/nuke/startup/custom_write_node.py b/openpype/hosts/nuke/startup/custom_write_node.py new file mode 100644 index 0000000000..d9313231d8 --- /dev/null +++ b/openpype/hosts/nuke/startup/custom_write_node.py @@ -0,0 +1,76 @@ +import os +import nuke +from openpype.hosts.nuke.api.lib import set_node_knobs_from_settings + + +frame_padding = 5 +temp_rendering_path_template = ( + "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}") + +knobs_setting = { + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "exr" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit half" + }, + { + "type": "text", + "name": "compression", + "value": "Zip (1 scanline)" + }, + { + "type": "bool", + "name": "autocrop", + "value": True + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 186, + 35, + 35, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "bool", + "name": "create_directories", + "value": True + } + ] +} + + +def main(): + write_selected_nodes = [ + s for s in nuke.selectedNodes() if s.Class() == "Write"] + + ext = None + knobs = knobs_setting["knobs"] + for knob in knobs: + if knob["name"] == "file_type": + ext = knob["value"] + for w in write_selected_nodes: + # data for mapping the path + data = { + "work": os.getenv("AVALON_WORKDIR"), + "subset": w["name"].value(), + "frame": "#" * frame_padding, + "ext": ext + } + file_path = temp_rendering_path_template.format(**data) + file_path = file_path.replace("\\", "/") + w["file"].setValue(file_path) + set_node_knobs_from_settings(w, knobs) diff --git a/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py new file mode 100644 index 0000000000..f0cbabe20f --- /dev/null +++ b/openpype/hosts/nuke/startup/frame_setting_for_read_nodes.py @@ -0,0 +1,47 @@ +""" OpenPype custom script for resetting read nodes start frame values """ + +import nuke +import nukescripts + + +class FrameSettingsPanel(nukescripts.PythonPanel): + """ Frame Settings Panel """ + def __init__(self): + nukescripts.PythonPanel.__init__(self, "Set Frame Start (Read Node)") + + # create knobs + self.frame = nuke.Int_Knob( + 'frame', 'Frame Number') + self.selected = nuke.Boolean_Knob("selection") + # add knobs to panel + self.addKnob(self.selected) + self.addKnob(self.frame) + + # set values + self.selected.setValue(False) + self.frame.setValue(nuke.root().firstFrame()) + + def process(self): + """ Process the panel values. """ + # get values + frame = self.frame.value() + if self.selected.value(): + # selected nodes processing + if not nuke.selectedNodes(): + return + for rn_ in nuke.selectedNodes(): + if rn_.Class() != "Read": + continue + rn_["frame_mode"].setValue("start_at") + rn_["frame"].setValue(str(frame)) + else: + # all nodes processing + for rn_ in nuke.allNodes(filter="Read"): + rn_["frame_mode"].setValue("start_at") + rn_["frame"].setValue(str(frame)) + + +def main(): + p_ = FrameSettingsPanel() + if p_.showModalDialog(): + print(p_.process()) diff --git a/openpype/hosts/nuke/startup/menu.py b/openpype/hosts/nuke/startup/menu.py index 5e29121e9b..613d508387 100644 --- a/openpype/hosts/nuke/startup/menu.py +++ b/openpype/hosts/nuke/startup/menu.py @@ -1,64 +1,5 @@ -import nuke -import os - -from openpype.lib import Logger from openpype.pipeline import install_host -from openpype.hosts.nuke import api -from openpype.hosts.nuke.api.lib import ( - on_script_load, - check_inventory_versions, - WorkfileSettings, - dirmap_file_name_filter, - add_scripts_gizmo -) -from openpype.settings import get_project_settings +from openpype.hosts.nuke.api import NukeHost -log = Logger.get_logger(__name__) - - -install_host(api) - -# fix ffmpeg settings on script -nuke.addOnScriptLoad(on_script_load) - -# set checker for last versions on loaded containers -nuke.addOnScriptLoad(check_inventory_versions) -nuke.addOnScriptSave(check_inventory_versions) - -# # set apply all workfile settings on script load and save -nuke.addOnScriptLoad(WorkfileSettings().set_context_settings) - -nuke.addFilenameFilter(dirmap_file_name_filter) - -log.info('Automatic syncing of write file knob to script version') - - -def add_scripts_menu(): - try: - from scriptsmenu import launchfornuke - except ImportError: - log.warning( - "Skipping studio.menu install, because " - "'scriptsmenu' module seems unavailable." - ) - return - - # load configuration of custom menu - project_settings = get_project_settings(os.getenv("AVALON_PROJECT")) - config = project_settings["nuke"]["scriptsmenu"]["definition"] - _menu = project_settings["nuke"]["scriptsmenu"]["name"] - - if not config: - log.warning("Skipping studio menu, no definition found.") - return - - # run the launcher for Maya menu - studio_menu = launchfornuke.main(title=_menu.title()) - - # apply configuration - studio_menu.build_from_configuration(studio_menu, config) - - -add_scripts_menu() - -add_scripts_gizmo() +host = NukeHost() +install_host(host) diff --git a/openpype/hosts/photoshop/api/__init__.py b/openpype/hosts/photoshop/api/__init__.py index 94152b5706..c5a12cba06 100644 --- a/openpype/hosts/photoshop/api/__init__.py +++ b/openpype/hosts/photoshop/api/__init__.py @@ -7,28 +7,15 @@ Anything that isn't defined here is INTERNAL and unreliable for external use. from .launch_logic import stub from .pipeline import ( + PhotoshopHost, ls, - list_instances, - remove_instance, - install, - uninstall, - containerise, - get_context_data, - update_context_data, - get_context_title + containerise ) from .plugin import ( PhotoshopLoader, get_unique_layer_name ) -from .workio import ( - file_extensions, - has_unsaved_changes, - save_file, - open_file, - current_file, - work_root, -) + from .lib import ( maintained_selection, @@ -40,28 +27,14 @@ __all__ = [ "stub", # pipeline + "PhotoshopHost", "ls", - "list_instances", - "remove_instance", - "install", - "uninstall", "containerise", - "get_context_data", - "update_context_data", - "get_context_title", # Plugin "PhotoshopLoader", "get_unique_layer_name", - # workfiles - "file_extensions", - "has_unsaved_changes", - "save_file", - "open_file", - "current_file", - "work_root", - # lib "maintained_selection", "maintained_visibility", diff --git a/openpype/hosts/photoshop/api/extension.zxp b/openpype/hosts/photoshop/api/extension.zxp index a25ec96e7d..39b766cd0d 100644 Binary files a/openpype/hosts/photoshop/api/extension.zxp and b/openpype/hosts/photoshop/api/extension.zxp differ diff --git a/openpype/hosts/photoshop/api/extension/CSXS/manifest.xml b/openpype/hosts/photoshop/api/extension/CSXS/manifest.xml index 6396cd2412..2089d06da1 100644 --- a/openpype/hosts/photoshop/api/extension/CSXS/manifest.xml +++ b/openpype/hosts/photoshop/api/extension/CSXS/manifest.xml @@ -1,5 +1,5 @@ - + diff --git a/openpype/hosts/photoshop/api/extension/extension.zxp b/openpype/hosts/photoshop/api/extension/extension.zxp new file mode 100644 index 0000000000..39b766cd0d Binary files /dev/null and b/openpype/hosts/photoshop/api/extension/extension.zxp differ diff --git a/openpype/hosts/photoshop/api/extension/host/index.jsx b/openpype/hosts/photoshop/api/extension/host/index.jsx index 2acec1ebc1..e2711fb960 100644 --- a/openpype/hosts/photoshop/api/extension/host/index.jsx +++ b/openpype/hosts/photoshop/api/extension/host/index.jsx @@ -199,7 +199,7 @@ function getActiveDocumentName(){ function getActiveDocumentFullName(){ /** * Returns file name of active document with file path. - * activeDocument.fullName returns path in URI (eg /c/.. insted of c:/) + * activeDocument.fullName returns path in URI (eg /c/.. instead of c:/) * */ if (documents.length == 0){ return null; @@ -225,7 +225,7 @@ function getSelectedLayers(doc) { * Returns json representation of currently selected layers. * Works in three steps - 1) creates new group with selected layers * 2) traverses this group - * 3) deletes newly created group, not neede + * 3) deletes newly created group, not needed * Bit weird, but Adobe.. **/ if (doc == null){ @@ -284,7 +284,7 @@ function selectLayers(selectedLayers){ existing_ids.push(existing_layers[y]["id"]); } for (var i = 0; i < selectedLayers.length; i++) { - // a check to see if the id stil exists + // a check to see if the id still exists var id = selectedLayers[i]; if(existing_ids.toString().indexOf(id)>=0){ layers[i] = charIDToTypeID( "Lyr " ); diff --git a/openpype/hosts/photoshop/api/extension/index.html b/openpype/hosts/photoshop/api/extension/index.html index 501e753c0b..9d7363e62d 100644 --- a/openpype/hosts/photoshop/api/extension/index.html +++ b/openpype/hosts/photoshop/api/extension/index.html @@ -32,17 +32,6 @@ }); - - - - - - diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 1403b6cfa1..25732446b5 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -10,10 +10,20 @@ from wsrpc_aiohttp import ( from qtpy import QtCore -from openpype.lib import Logger -from openpype.pipeline import legacy_io +from openpype.lib import Logger, StringTemplate +from openpype.pipeline import ( + registered_host, + Anatomy, +) +from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context, + get_last_workfile, +) +from openpype.pipeline.template_data import get_template_data_with_names from openpype.tools.utils import host_tools from openpype.tools.adobe_webserver.app import WebServerTool +from openpype.pipeline.context_tools import change_current_context +from openpype.client import get_asset_by_name from .ws_stub import PhotoshopServerStub @@ -56,11 +66,11 @@ class MainThreadItem: return self._result def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ log.debug("Executing process in main thread") if self.done: @@ -310,23 +320,28 @@ class PhotoshopRoute(WebSocketRoute): # client functions async def set_context(self, project, asset, task): """ - Sets 'project' and 'asset' to envs, eg. setting context + Sets 'project' and 'asset' to envs, eg. setting context. - Args: - project (str) - asset (str) + Opens last workile from that context if exists. + + Args: + project (str) + asset (str) + task (str """ log.info("Setting context change") - log.info("project {} asset {} ".format(project, asset)) - if project: - legacy_io.Session["AVALON_PROJECT"] = project - os.environ["AVALON_PROJECT"] = project - if asset: - legacy_io.Session["AVALON_ASSET"] = asset - os.environ["AVALON_ASSET"] = asset - if task: - legacy_io.Session["AVALON_TASK"] = task - os.environ["AVALON_TASK"] = task + log.info(f"project {project} asset {asset} task {task}") + + asset_doc = get_asset_by_name(project, asset) + change_current_context(asset_doc, task) + + last_workfile_path = self._get_last_workfile_path(project, + asset, + task) + if last_workfile_path and os.path.exists(last_workfile_path): + ProcessLauncher.execute_in_main_thread( + lambda: stub().open(last_workfile_path)) + async def read(self): log.debug("photoshop.read client calls server server calls " @@ -334,9 +349,6 @@ class PhotoshopRoute(WebSocketRoute): return await self.socket.call('photoshop.read') # panel routes for tools - async def creator_route(self): - self._tool_route("creator") - async def workfiles_route(self): self._tool_route("workfiles") @@ -344,14 +356,11 @@ class PhotoshopRoute(WebSocketRoute): self._tool_route("loader") async def publish_route(self): - self._tool_route("publish") + self._tool_route("publisher") async def sceneinventory_route(self): self._tool_route("sceneinventory") - async def subsetmanager_route(self): - self._tool_route("subsetmanager") - async def experimental_tools_route(self): self._tool_route("experimental_tools") @@ -362,3 +371,35 @@ class PhotoshopRoute(WebSocketRoute): # Required return statement. return "nothing" + + def _get_last_workfile_path(self, project_name, asset_name, task_name): + """Returns last workfile path if exists""" + host = registered_host() + host_name = "photoshop" + template_key = get_workfile_template_key_from_context( + asset_name, + task_name, + host_name, + project_name=project_name + ) + anatomy = Anatomy(project_name) + + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) + data["root"] = anatomy.roots + + file_template = anatomy.templates[template_key]["file"] + + # Define saving file extension + extensions = host.get_workfile_extensions() + + folder_template = anatomy.templates[template_key]["folder"] + work_root = StringTemplate.format_strict_template( + folder_template, data + ) + last_workfile_path = get_last_workfile( + work_root, file_template, data, extensions, True + ) + + return last_workfile_path diff --git a/openpype/hosts/photoshop/api/lib.py b/openpype/hosts/photoshop/api/lib.py index e3b601d011..ff520348f0 100644 --- a/openpype/hosts/photoshop/api/lib.py +++ b/openpype/hosts/photoshop/api/lib.py @@ -9,6 +9,7 @@ from openpype.lib import env_value_to_bool, Logger from openpype.modules import ModulesManager from openpype.pipeline import install_host from openpype.tools.utils import host_tools +from openpype.tests.lib import is_in_tests from .launch_logic import ProcessLauncher, stub @@ -20,9 +21,11 @@ def safe_excepthook(*args): def main(*subprocess_args): - from openpype.hosts.photoshop import api + from openpype.hosts.photoshop.api import PhotoshopHost + + host = PhotoshopHost() + install_host(host) - install_host(api) sys.excepthook = safe_excepthook # coloring in StdOutBroker @@ -40,7 +43,7 @@ def main(*subprocess_args): webpublisher_addon.headless_publish, log, "ClosePS", - os.environ.get("IS_TEST") + is_in_tests() ) elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", default=True): diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index d2da8c5cb4..73dc80260c 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,4 +1,5 @@ import os + from qtpy import QtWidgets import pyblish.api @@ -12,6 +13,14 @@ from openpype.pipeline import ( deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) + +from openpype.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost +) + from openpype.pipeline.load import any_outdated_containers from openpype.hosts.photoshop import PHOTOSHOP_HOST_DIR @@ -26,6 +35,140 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "photoshop" + + def install(self): + """Install Photoshop-specific functionality needed for integration. + + This function is called automatically on calling + `api.install(photoshop)`. + """ + log.info("Installing OpenPype Photoshop...") + pyblish.api.register_host("photoshop") + + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + log.info(PUBLISH_PATH) + + pyblish.api.register_callback( + "instanceToggled", on_pyblish_instance_toggled + ) + + register_event_callback("application.launched", on_application_launch) + + def current_file(self): + try: + full_name = lib.stub().get_active_document_full_name() + if full_name and full_name != "null": + return os.path.normpath(full_name).replace("\\", "/") + except Exception: + pass + + return None + + def work_root(self, session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") + + def open_workfile(self, filepath): + lib.stub().open(filepath) + + return True + + def save_workfile(self, filepath=None): + _, ext = os.path.splitext(filepath) + lib.stub().saveAs(filepath, ext[1:], True) + + def get_current_workfile(self): + return self.current_file() + + def workfile_has_unsaved_changes(self): + if self.current_file(): + return not lib.stub().is_saved() + + return False + + def get_workfile_extensions(self): + return [".psd", ".psb"] + + def get_containers(self): + return ls() + + def get_context_data(self): + """Get stored values for context (validation enable/disable etc)""" + meta = _get_stub().get_layers_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + def update_context_data(self, data, changes): + """Store value needed for context""" + item = data + item["id"] = "publish_context" + _get_stub().imprint(item["id"], item) + + def get_context_title(self): + """Returns title for Creator window""" + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + return "{}/{}/{}".format(project_name, asset_name, task_name) + + def list_instances(self): + """List all created instances to publish from current workfile. + + Pulls from File > File Info + + Returns: + (list) of dictionaries matching instances format + """ + stub = _get_stub() + + if not stub: + return [] + + instances = [] + layers_meta = stub.get_layers_metadata() + if layers_meta: + for instance in layers_meta: + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) + + return instances + + def remove_instance(self, instance): + """Remove instance from current workfile metadata. + + Updates metadata of current file in File > File Info and removes + icon highlight on group layer. + + Args: + instance (dict): instance representation from subsetmanager model + """ + stub = _get_stub() + + if not stub: + return + + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_layer(instance["members"][0]) + if item: + stub.rename_layer(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) + + def check_inventory(): if not any_outdated_containers(): return @@ -52,32 +195,6 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): instance[0].Visible = new_value -def install(): - """Install Photoshop-specific functionality of avalon-core. - - This function is called automatically on calling `api.install(photoshop)`. - """ - log.info("Installing OpenPype Photoshop...") - pyblish.api.register_host("photoshop") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - register_creator_plugin_path(CREATE_PATH) - log.info(PUBLISH_PATH) - - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled - ) - - register_event_callback("application.launched", on_application_launch) - - -def uninstall(): - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - deregister_creator_plugin_path(CREATE_PATH) - - def ls(): """Yields containers from active Photoshop document @@ -117,61 +234,6 @@ def ls(): yield data -def list_instances(): - """List all created instances to publish from current workfile. - - Pulls from File > File Info - - For SubsetManager - - Returns: - (list) of dictionaries matching instances format - """ - stub = _get_stub() - - if not stub: - return [] - - instances = [] - layers_meta = stub.get_layers_metadata() - if layers_meta: - for instance in layers_meta: - if instance.get("id") == "pyblish.avalon.instance": - instances.append(instance) - - return instances - - -def remove_instance(instance): - """Remove instance from current workfile metadata. - - Updates metadata of current file in File > File Info and removes - icon highlight on group layer. - - For SubsetManager - - Args: - instance (dict): instance representation from subsetmanager model - """ - stub = _get_stub() - - if not stub: - return - - inst_id = instance.get("instance_id") or instance.get("uuid") # legacy - if not inst_id: - log.warning("No instance identifier for {}".format(instance)) - return - - stub.remove_instance(inst_id) - - if instance.get("members"): - item = stub.get_layer(instance["members"][0]) - if item: - stub.rename_layer(item.id, - item.name.replace(stub.PUBLISH_ICON, '')) - - def _get_stub(): """Handle pulling stub from PS to run operations on host @@ -226,28 +288,17 @@ def containerise( return layer -def get_context_data(): - """Get stored values for context (validation enable/disable etc)""" - meta = _get_stub().get_layers_metadata() - for item in meta: - if item.get("id") == "publish_context": - item.pop("id") - return item +def cache_and_get_instances(creator): + """Cache instances in shared data. - return {} - - -def update_context_data(data, changes): - """Store value needed for context""" - item = data - item["id"] = "publish_context" - _get_stub().imprint(item["id"], item) - - -def get_context_title(): - """Returns title for Creator window""" - - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - return "{}/{}/{}".format(project_name, asset_name, task_name) + Storing all instances as a list as legacy instances might be still present. + Args: + creator (Creator): Plugin which would like to get instances from host. + Returns: + List[]: list of all instances stored in metadata + """ + shared_key = "openpype.photoshop.instances" + if shared_key not in creator.collection_shared_data: + creator.collection_shared_data[shared_key] = \ + creator.host.list_instances() + return creator.collection_shared_data[shared_key] diff --git a/openpype/hosts/photoshop/api/workio.py b/openpype/hosts/photoshop/api/workio.py deleted file mode 100644 index 35b44d6070..0000000000 --- a/openpype/hosts/photoshop/api/workio.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Host API required Work Files tool""" -import os - -from . import lib - - -def _active_document(): - document_name = lib.stub().get_active_document_name() - if not document_name: - return None - - return document_name - - -def file_extensions(): - return [".psd", ".psb"] - - -def has_unsaved_changes(): - if _active_document(): - return not lib.stub().is_saved() - - return False - - -def save_file(filepath): - _, ext = os.path.splitext(filepath) - lib.stub().saveAs(filepath, ext[1:], True) - - -def open_file(filepath): - lib.stub().open(filepath) - - return True - - -def current_file(): - try: - full_name = lib.stub().get_active_document_full_name() - if full_name and full_name != "null": - return os.path.normpath(full_name).replace("\\", "/") - except Exception: - pass - - return None - - -def work_root(session): - return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/openpype/hosts/photoshop/plugins/create/workfile_creator.py b/openpype/hosts/photoshop/lib.py similarity index 68% rename from openpype/hosts/photoshop/plugins/create/workfile_creator.py rename to openpype/hosts/photoshop/lib.py index e79d16d154..ae7a33b7b6 100644 --- a/openpype/hosts/photoshop/plugins/create/workfile_creator.py +++ b/openpype/hosts/photoshop/lib.py @@ -2,33 +2,31 @@ import openpype.hosts.photoshop.api as api from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, - CreatedInstance, - legacy_io + CreatedInstance ) +from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances -class PSWorkfileCreator(AutoCreator): - identifier = "workfile" - family = "workfile" - - default_variant = "Main" - +class PSAutoCreator(AutoCreator): + """Generic autocreator to extend.""" def get_instance_attr_defs(self): return [] def collect_instances(self): - for instance_data in api.list_instances(): + for instance_data in cache_and_get_instances(self): creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: - subset_name = instance_data["subset"] - instance = CreatedInstance( - self.family, subset_name, instance_data, self + instance = CreatedInstance.from_existing( + instance_data, self ) self._add_instance_to_context(instance) def update_instances(self, update_list): - # nothing to change on workfiles - pass + self.log.debug("update_list:: {}".format(update_list)) + for created_inst, _changes in update_list: + api.stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) def create(self, options=None): existing_instance = None @@ -37,10 +35,11 @@ class PSWorkfileCreator(AutoCreator): existing_instance = instance break - project_name = legacy_io.Session["AVALON_PROJECT"] - asset_name = legacy_io.Session["AVALON_ASSET"] - task_name = legacy_io.Session["AVALON_TASK"] - host_name = legacy_io.Session["AVALON_APP"] + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name if existing_instance is None: asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( @@ -54,9 +53,12 @@ class PSWorkfileCreator(AutoCreator): } data.update(self.get_dynamic_data( self.default_variant, task_name, asset_doc, - project_name, host_name + project_name, host_name, None )) + if not self.active_on_create: + data["active"] = False + new_instance = CreatedInstance( self.family, subset_name, data, self ) diff --git a/openpype/hosts/photoshop/plugins/create/create_flatten_image.py b/openpype/hosts/photoshop/plugins/create/create_flatten_image.py new file mode 100644 index 0000000000..3bc61c8184 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/create/create_flatten_image.py @@ -0,0 +1,120 @@ +from openpype.pipeline import CreatedInstance + +from openpype.lib import BoolDef +import openpype.hosts.photoshop.api as api +from openpype.hosts.photoshop.lib import PSAutoCreator +from openpype.pipeline.create import get_subset_name +from openpype.client import get_asset_by_name + + +class AutoImageCreator(PSAutoCreator): + """Creates flatten image from all visible layers. + + Used in simplified publishing as auto created instance. + Must be enabled in Setting and template for subset name provided + """ + identifier = "auto_image" + family = "image" + + # Settings + default_variant = "" + # - Mark by default instance for review + mark_for_review = True + active_on_create = True + + def create(self, options=None): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name + asset_doc = get_asset_by_name(project_name, asset_name) + + if existing_instance is None: + subset_name = get_subset_name( + self.family, self.default_variant, task_name, asset_doc, + project_name, host_name + ) + + publishable_ids = [layer.id for layer in api.stub().get_layers() + if layer.visible] + data = { + "asset": asset_name, + "task": task_name, + # ids are "virtual" layers, won't get grouped as 'members' do + # same difference in color coded layers in WP + "ids": publishable_ids + } + + if not self.active_on_create: + data["active"] = False + + creator_attributes = {"mark_for_review": self.mark_for_review} + data.update({"creator_attributes": creator_attributes}) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + api.stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + + elif ( # existing instance from different context + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + subset_name = get_subset_name( + self.family, self.default_variant, task_name, asset_doc, + project_name, host_name + ) + + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name + + api.stub().imprint(existing_instance.get("instance_id"), + existing_instance.data_to_store()) + + def get_pre_create_attr_defs(self): + return [ + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def get_instance_attr_defs(self): + return [ + BoolDef( + "mark_for_review", + label="Review" + ) + ] + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["photoshop"]["create"]["AutoImageCreator"] + ) + + self.active_on_create = plugin_settings["active_on_create"] + self.default_variant = plugin_settings["default_variant"] + self.mark_for_review = plugin_settings["mark_for_review"] + self.enabled = plugin_settings["enabled"] + + def get_detail_description(self): + return """Creator for flatten image. + + Studio might configure simple publishing workflow. In that case + `image` instance is automatically created which will publish flat + image from all visible layers. + + Artist might disable this instance from publishing or from creating + review for it though. + """ diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index 2cfbfa8778..f3165fca57 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -5,33 +5,28 @@ from openpype.lib import BoolDef from openpype.pipeline import ( Creator, CreatedInstance, - legacy_io + CreatorError ) from openpype.lib import prepare_template_data from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances class ImageCreator(Creator): - """Creates image instance for publishing.""" + """Creates image instance for publishing. + + Result of 'image' instance is image of all visible layers, or image(s) of + selected layers. + """ identifier = "image" label = "Image" family = "image" description = "Image creator" - def collect_instances(self): - for instance_data in api.list_instances(): - # legacy instances have family=='image' - creator_id = (instance_data.get("creator_identifier") or - instance_data.get("family")) - - if creator_id == self.identifier: - instance_data = self._handle_legacy(instance_data) - layer = api.stub().get_layer(instance_data["members"][0]) - instance_data["layer"] = layer - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) + # Settings + default_variants = "" + mark_for_review = False + active_on_create = True def create(self, subset_name_from_ui, data, pre_create_data): groups_to_create = [] @@ -58,9 +53,10 @@ class ImageCreator(Creator): try: group = stub.group_selected_layers(subset_name_from_ui) except: - raise ValueError("Cannot group locked Bakcground layer!") + raise CreatorError("Cannot group locked Background layer!") groups_to_create.append(group) + # create empty group if nothing selected if not groups_to_create and not top_layers_to_wrap: group = stub.create_group(subset_name_from_ui) groups_to_create.append(group) @@ -72,13 +68,16 @@ class ImageCreator(Creator): groups_to_create.append(group) layer_name = '' - creating_multiple_groups = len(groups_to_create) > 1 + # use artist chosen option OR force layer if more subsets are created + # to differentiate them + use_layer_name = (pre_create_data.get("use_layer_name") or + len(groups_to_create) > 1) for group in groups_to_create: subset_name = subset_name_from_ui # reset to name from creator UI layer_names_in_hierarchy = [] created_group_name = self._clean_highlights(stub, group.name) - if creating_multiple_groups: + if use_layer_name: layer_name = re.sub( "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), "", @@ -97,8 +96,15 @@ class ImageCreator(Creator): data.update({"subset": subset_name}) data.update({"members": [str(group.id)]}) + data.update({"layer_name": layer_name}) data.update({"long_name": "_".join(layer_names_in_hierarchy)}) + creator_attributes = {"mark_for_review": self.mark_for_review} + data.update({"creator_attributes": creator_attributes}) + + if not self.active_on_create: + data["active"] = False + new_instance = CreatedInstance(self.family, subset_name, data, self) @@ -110,6 +116,21 @@ class ImageCreator(Creator): stub.rename_layer(group.id, stub.PUBLISH_ICON + created_group_name) + def collect_instances(self): + for instance_data in cache_and_get_instances(self): + # legacy instances have family=='image' + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family")) + + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + layer = api.stub().get_layer(instance_data["members"][0]) + instance_data["layer"] = layer + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + def update_instances(self, update_list): self.log.debug("update_list:: {}".format(update_list)) for created_inst, _changes in update_list: @@ -121,26 +142,80 @@ class ImageCreator(Creator): def remove_instances(self, instances): for instance in instances: - api.remove_instance(instance) + self.host.remove_instance(instance) self._remove_instance_from_context(instance) - def get_default_variants(self): - return [ - "Main" - ] - def get_pre_create_attr_defs(self): output = [ BoolDef("use_selection", default=True, label="Create only for selected"), BoolDef("create_multiple", default=True, - label="Create separate instance for each selected") + label="Create separate instance for each selected"), + BoolDef("use_layer_name", + default=False, + label="Use layer name in subset"), + BoolDef( + "mark_for_review", + label="Create separate review", + default=False + ) ] return output + def get_instance_attr_defs(self): + return [ + BoolDef( + "mark_for_review", + label="Review" + ) + ] + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["photoshop"]["create"]["ImageCreator"] + ) + + self.active_on_create = plugin_settings["active_on_create"] + self.default_variants = plugin_settings["default_variants"] + self.mark_for_review = plugin_settings["mark_for_review"] + self.enabled = plugin_settings["enabled"] + + def get_detail_description(self): - return """Creator for Image instances""" + return """Creator for Image instances + + Main publishable item in Photoshop will be of `image` family. Result of + this item (instance) is picture that could be loaded and used + in another DCCs (for example as single layer in composition in + AfterEffects, reference in Maya etc). + + There are couple of options what to publish: + - separate image per selected layer (or group of layers) + - one image for all selected layers + - all visible layers (groups) flattened into single image + + In most cases you would like to keep `Create only for selected` + toggled on and select what you would like to publish. + Toggling this option off will allow you to create instance for all + visible layers without a need to select them explicitly. + + Use 'Create separate instance for each selected' to create separate + images per selected layer (group of layers). + + 'Use layer name in subset' will explicitly add layer name into subset + name. Position of this name is configurable in + `project_settings/global/tools/creator/subset_name_profiles`. + If layer placeholder ({layer}) is not used in `subset_name_profiles` + but layer name should be used (set explicitly in UI or implicitly if + multiple images should be created), it is added in capitalized form + as a suffix to subset name. + + Each image could have its separate review created if necessary via + `Create separate review` toggle. + But more use case is to use separate `review` instance to create review + from all published items. + """ def _handle_legacy(self, instance_data): """Converts old instances to new format.""" @@ -153,7 +228,7 @@ class ImageCreator(Creator): instance_data.pop("uuid") if not instance_data.get("task"): - instance_data["task"] = legacy_io.Session.get("AVALON_TASK") + instance_data["task"] = self.create_context.get_current_task_name() if not instance_data.get("variant"): instance_data["variant"] = '' @@ -163,6 +238,11 @@ class ImageCreator(Creator): def _clean_highlights(self, stub, item): return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, '') - @classmethod - def get_dynamic_data(cls, *args, **kwargs): + + def get_dynamic_data(self, variant, task_name, asset_doc, + project_name, host_name, instance): + if instance is not None: + layer_name = instance.get("layer_name") + if layer_name: + return {"layer": layer_name} return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py b/openpype/hosts/photoshop/plugins/create/create_legacy_image.py deleted file mode 100644 index 2d655cae32..0000000000 --- a/openpype/hosts/photoshop/plugins/create/create_legacy_image.py +++ /dev/null @@ -1,120 +0,0 @@ -import re - -from qtpy import QtWidgets -from openpype.pipeline import create -from openpype.hosts.photoshop import api as photoshop - -from openpype.lib import prepare_template_data -from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS - - -class CreateImage(create.LegacyCreator): - """Image folder for publish.""" - - name = "imageDefault" - label = "Image" - family = "image" - defaults = ["Main"] - - def process(self): - groups = [] - layers = [] - create_group = False - - stub = photoshop.stub() - if (self.options or {}).get("useSelection"): - multiple_instances = False - selection = stub.get_selected_layers() - self.log.info("selection {}".format(selection)) - if len(selection) > 1: - # Ask user whether to create one image or image per selected - # item. - active_window = QtWidgets.QApplication.activeWindow() - msg_box = QtWidgets.QMessageBox(parent=active_window) - msg_box.setIcon(QtWidgets.QMessageBox.Warning) - msg_box.setText( - "Multiple layers selected." - "\nDo you want to make one image per layer?" - ) - msg_box.setStandardButtons( - QtWidgets.QMessageBox.Yes | - QtWidgets.QMessageBox.No | - QtWidgets.QMessageBox.Cancel - ) - ret = msg_box.exec_() - if ret == QtWidgets.QMessageBox.Yes: - multiple_instances = True - elif ret == QtWidgets.QMessageBox.Cancel: - return - - if multiple_instances: - for item in selection: - if item.group: - groups.append(item) - else: - layers.append(item) - else: - group = stub.group_selected_layers(self.name) - groups.append(group) - - elif len(selection) == 1: - # One selected item. Use group if its a LayerSet (group), else - # create a new group. - if selection[0].group: - groups.append(selection[0]) - else: - layers.append(selection[0]) - elif len(selection) == 0: - # No selection creates an empty group. - create_group = True - else: - group = stub.create_group(self.name) - groups.append(group) - - if create_group: - group = stub.create_group(self.name) - groups.append(group) - - for layer in layers: - stub.select_layers([layer]) - group = stub.group_selected_layers(layer.name) - groups.append(group) - - creator_subset_name = self.data["subset"] - layer_name = '' - for group in groups: - long_names = [] - group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ - replace(stub.LOADED_ICON, '') - - subset_name = creator_subset_name - if len(groups) > 1: - layer_name = re.sub( - "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), - "", - group.name - ) - if "{layer}" not in subset_name.lower(): - subset_name += "{Layer}" - - layer_fill = prepare_template_data({"layer": layer_name}) - subset_name = subset_name.format(**layer_fill) - - if group.long_name: - for directory in group.long_name[::-1]: - name = directory.replace(stub.PUBLISH_ICON, '').\ - replace(stub.LOADED_ICON, '') - long_names.append(name) - - self.data.update({"subset": subset_name}) - self.data.update({"uuid": str(group.id)}) - self.data.update({"members": [str(group.id)]}) - self.data.update({"long_name": "_".join(long_names)}) - stub.imprint(group, self.data) - # reusing existing group, need to rename afterwards - if not create_group: - stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) - - @classmethod - def get_dynamic_data(cls, *args, **kwargs): - return {"layer": "{layer}"} diff --git a/openpype/hosts/photoshop/plugins/create/create_review.py b/openpype/hosts/photoshop/plugins/create/create_review.py new file mode 100644 index 0000000000..064485d465 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/create/create_review.py @@ -0,0 +1,28 @@ +from openpype.hosts.photoshop.lib import PSAutoCreator + + +class ReviewCreator(PSAutoCreator): + """Creates review instance which might be disabled from publishing.""" + identifier = "review" + family = "review" + + default_variant = "Main" + + def get_detail_description(self): + return """Auto creator for review. + + Photoshop review is created from all published images or from all + visible layers if no `image` instances got created. + + Review might be disabled by an artist (instance shouldn't be deleted as + it will get recreated in next publish either way). + """ + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["photoshop"]["create"]["ReviewCreator"] + ) + + self.default_variant = plugin_settings["default_variant"] + self.active_on_create = plugin_settings["active_on_create"] + self.enabled = plugin_settings["enabled"] diff --git a/openpype/hosts/photoshop/plugins/create/create_workfile.py b/openpype/hosts/photoshop/plugins/create/create_workfile.py new file mode 100644 index 0000000000..d498f0549c --- /dev/null +++ b/openpype/hosts/photoshop/plugins/create/create_workfile.py @@ -0,0 +1,28 @@ +from openpype.hosts.photoshop.lib import PSAutoCreator + + +class WorkfileCreator(PSAutoCreator): + identifier = "workfile" + family = "workfile" + + default_variant = "Main" + + def get_detail_description(self): + return """Auto creator for workfile. + + It is expected that each publish will also publish its source workfile + for safekeeping. This creator triggers automatically without need for + an artist to remember and trigger it explicitly. + + Workfile instance could be disabled if it is not required to publish + workfile. (Instance shouldn't be deleted though as it will be recreated + in next publish automatically). + """ + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["photoshop"]["create"]["WorkfileCreator"] + ) + + self.active_on_create = plugin_settings["active_on_create"] + self.enabled = plugin_settings["enabled"] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_auto_image.py b/openpype/hosts/photoshop/plugins/publish/collect_auto_image.py new file mode 100644 index 0000000000..ce408f8d01 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_auto_image.py @@ -0,0 +1,101 @@ +import pyblish.api + +from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import get_subset_name + + +class CollectAutoImage(pyblish.api.ContextPlugin): + """Creates auto image in non artist based publishes (Webpublisher). + + 'remotepublish' should be renamed to 'autopublish' or similar in the future + """ + + label = "Collect Auto Image" + order = pyblish.api.CollectorOrder + hosts = ["photoshop"] + order = pyblish.api.CollectorOrder + 0.2 + + targets = ["remotepublish"] + + def process(self, context): + family = "image" + for instance in context: + creator_identifier = instance.data.get("creator_identifier") + if creator_identifier and creator_identifier == "auto_image": + self.log.debug("Auto image instance found, won't create new") + return + + project_name = context.data["anatomyData"]["project"]["name"] + proj_settings = context.data["project_settings"] + task_name = context.data["anatomyData"]["task"]["name"] + host_name = context.data["hostName"] + asset_doc = context.data["assetEntity"] + asset_name = asset_doc["name"] + + auto_creator = proj_settings.get( + "photoshop", {}).get( + "create", {}).get( + "AutoImageCreator", {}) + + if not auto_creator or not auto_creator["enabled"]: + self.log.debug("Auto image creator disabled, won't create new") + return + + stub = photoshop.stub() + stored_items = stub.get_layers_metadata() + for item in stored_items: + if item.get("creator_identifier") == "auto_image": + if not item.get("active"): + self.log.debug("Auto_image instance disabled") + return + + layer_items = stub.get_layers() + + publishable_ids = [layer.id for layer in layer_items + if layer.visible] + + # collect stored image instances + instance_names = [] + for layer_item in layer_items: + layer_meta_data = stub.read(layer_item, stored_items) + + # Skip layers without metadata. + if layer_meta_data is None: + continue + + # Skip containers. + if "container" in layer_meta_data["id"]: + continue + + # active might not be in legacy meta + if layer_meta_data.get("active", True) and layer_item.visible: + instance_names.append(layer_meta_data["subset"]) + + if len(instance_names) == 0: + variants = proj_settings.get( + "photoshop", {}).get( + "create", {}).get( + "CreateImage", {}).get( + "default_variants", ['']) + family = "image" + + variant = context.data.get("variant") or variants[0] + + subset_name = get_subset_name( + family, variant, task_name, asset_doc, + project_name, host_name + ) + + instance = context.create_instance(subset_name) + instance.data["family"] = family + instance.data["asset"] = asset_name + instance.data["subset"] = subset_name + instance.data["ids"] = publishable_ids + instance.data["publish"] = True + instance.data["creator_identifier"] = "auto_image" + + if auto_creator["mark_for_review"]: + instance.data["creator_attributes"] = {"mark_for_review": True} + instance.data["families"] = ["review"] + + self.log.info("auto image instance: {} ".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_auto_review.py b/openpype/hosts/photoshop/plugins/publish/collect_auto_review.py new file mode 100644 index 0000000000..7de4adcaf4 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_auto_review.py @@ -0,0 +1,92 @@ +""" +Requires: + None + +Provides: + instance -> family ("review") +""" +import pyblish.api + +from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import get_subset_name + + +class CollectAutoReview(pyblish.api.ContextPlugin): + """Create review instance in non artist based workflow. + + Called only if PS is triggered in Webpublisher or in tests. + """ + + label = "Collect Auto Review" + hosts = ["photoshop"] + order = pyblish.api.CollectorOrder + 0.2 + targets = ["remotepublish"] + + publish = True + + def process(self, context): + family = "review" + has_review = False + for instance in context: + if instance.data["family"] == family: + self.log.debug("Review instance found, won't create new") + has_review = True + + creator_attributes = instance.data.get("creator_attributes", {}) + if (creator_attributes.get("mark_for_review") and + "review" not in instance.data["families"]): + instance.data["families"].append("review") + + if has_review: + return + + stub = photoshop.stub() + stored_items = stub.get_layers_metadata() + for item in stored_items: + if item.get("creator_identifier") == family: + if not item.get("active"): + self.log.debug("Review instance disabled") + return + + auto_creator = context.data["project_settings"].get( + "photoshop", {}).get( + "create", {}).get( + "ReviewCreator", {}) + + if not auto_creator or not auto_creator["enabled"]: + self.log.debug("Review creator disabled, won't create new") + return + + variant = (context.data.get("variant") or + auto_creator["default_variant"]) + + project_name = context.data["anatomyData"]["project"]["name"] + proj_settings = context.data["project_settings"] + task_name = context.data["anatomyData"]["task"]["name"] + host_name = context.data["hostName"] + asset_doc = context.data["assetEntity"] + asset_name = asset_doc["name"] + + subset_name = get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name, + host_name=host_name, + project_settings=proj_settings + ) + + instance = context.create_instance(subset_name) + instance.data.update({ + "subset": subset_name, + "label": subset_name, + "name": subset_name, + "family": family, + "families": [], + "representations": [], + "asset": asset_name, + "publish": self.publish + }) + + self.log.debug("auto review created::{}".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_auto_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_auto_workfile.py new file mode 100644 index 0000000000..d10cf62c67 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_auto_workfile.py @@ -0,0 +1,99 @@ +import os +import pyblish.api + +from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import get_subset_name + + +class CollectAutoWorkfile(pyblish.api.ContextPlugin): + """Collect current script for publish.""" + + order = pyblish.api.CollectorOrder + 0.2 + label = "Collect Workfile" + hosts = ["photoshop"] + + targets = ["remotepublish"] + + def process(self, context): + family = "workfile" + file_path = context.data["currentFile"] + _, ext = os.path.splitext(file_path) + staging_dir = os.path.dirname(file_path) + base_name = os.path.basename(file_path) + workfile_representation = { + "name": ext[1:], + "ext": ext[1:], + "files": base_name, + "stagingDir": staging_dir, + } + + for instance in context: + if instance.data["family"] == family: + self.log.debug("Workfile instance found, won't create new") + instance.data.update({ + "label": base_name, + "name": base_name, + "representations": [], + }) + + # creating representation + _, ext = os.path.splitext(file_path) + instance.data["representations"].append( + workfile_representation) + + return + + stub = photoshop.stub() + stored_items = stub.get_layers_metadata() + for item in stored_items: + if item.get("creator_identifier") == family: + if not item.get("active"): + self.log.debug("Workfile instance disabled") + return + + project_name = context.data["anatomyData"]["project"]["name"] + proj_settings = context.data["project_settings"] + auto_creator = proj_settings.get( + "photoshop", {}).get( + "create", {}).get( + "WorkfileCreator", {}) + + if not auto_creator or not auto_creator["enabled"]: + self.log.debug("Workfile creator disabled, won't create new") + return + + # context.data["variant"] might come only from collect_batch_data + variant = (context.data.get("variant") or + auto_creator["default_variant"]) + + task_name = context.data["anatomyData"]["task"]["name"] + host_name = context.data["hostName"] + asset_doc = context.data["assetEntity"] + asset_name = asset_doc["name"] + + subset_name = get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name, + host_name=host_name, + project_settings=proj_settings + ) + + # Create instance + instance = context.create_instance(subset_name) + instance.data.update({ + "subset": subset_name, + "label": base_name, + "name": base_name, + "family": family, + "families": [], + "representations": [], + "asset": asset_name + }) + + # creating representation + instance.data["representations"].append(workfile_representation) + + self.log.debug("auto workfile review created:{}".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py index 5d50a78914..a5fea7ac7d 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py @@ -22,6 +22,7 @@ from openpype_modules.webpublisher.lib import ( get_batch_asset_task_info, parse_json ) +from openpype.tests.lib import is_in_tests class CollectBatchData(pyblish.api.ContextPlugin): @@ -39,7 +40,7 @@ class CollectBatchData(pyblish.api.ContextPlugin): def process(self, context): self.log.info("CollectBatchData") batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") - if os.environ.get("IS_TEST"): + if is_in_tests(): self.log.debug("Automatic testing, no batch data, skipping") return diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py index c157c932fd..90fca8398f 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py @@ -6,6 +6,7 @@ import pyblish.api from openpype.lib import prepare_template_data from openpype.hosts.photoshop import api as photoshop from openpype.settings import get_project_settings +from openpype.tests.lib import is_in_tests class CollectColorCodedInstances(pyblish.api.ContextPlugin): @@ -46,7 +47,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): def process(self, context): self.log.info("CollectColorCodedInstances") batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") - if (os.environ.get("IS_TEST") and + if (is_in_tests() and (not batch_dir or not os.path.exists(batch_dir))): self.log.debug("Automatic testing, no batch data, skipping") return diff --git a/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py b/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py index 64c99b4fc1..dc0678c9af 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py @@ -43,7 +43,7 @@ class CollectExtensionVersion(pyblish.api.ContextPlugin): with open(manifest_url) as fp: content = fp.read() - found = re.findall(r'(ExtensionBundleVersion=")([0-10\.]+)(")', + found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")', content) if found: expected_version = found[0][1] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_instances.py deleted file mode 100644 index b466ec8687..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/collect_instances.py +++ /dev/null @@ -1,116 +0,0 @@ -import pprint - -import pyblish.api - -from openpype.settings import get_project_settings -from openpype.hosts.photoshop import api as photoshop -from openpype.lib import prepare_template_data -from openpype.pipeline import legacy_io - - -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by LayerSet and file metadata - - Collects publishable instances from file metadata or enhance - already collected by creator (family == "image"). - - If no image instances are explicitly created, it looks if there is value - in `flatten_subset_template` (configurable in Settings), in that case it - produces flatten image with all visible layers. - - Identifier: - id (str): "pyblish.avalon.instance" - """ - - label = "Collect Instances" - order = pyblish.api.CollectorOrder - hosts = ["photoshop"] - families_mapping = { - "image": [] - } - # configurable in Settings - flatten_subset_template = "" - - def process(self, context): - instance_by_layer_id = {} - for instance in context: - if ( - instance.data["family"] == "image" and - instance.data.get("members")): - layer_id = str(instance.data["members"][0]) - instance_by_layer_id[layer_id] = instance - - stub = photoshop.stub() - layer_items = stub.get_layers() - layers_meta = stub.get_layers_metadata() - instance_names = [] - - all_layer_ids = [] - for layer_item in layer_items: - layer_meta_data = stub.read(layer_item, layers_meta) - all_layer_ids.append(layer_item.id) - - # Skip layers without metadata. - if layer_meta_data is None: - continue - - # Skip containers. - if "container" in layer_meta_data["id"]: - continue - - # active might not be in legacy meta - if not layer_meta_data.get("active", True): - continue - - instance = instance_by_layer_id.get(str(layer_item.id)) - if instance is None: - instance = context.create_instance(layer_meta_data["subset"]) - - instance.data["layer"] = layer_item - instance.data.update(layer_meta_data) - instance.data["families"] = self.families_mapping[ - layer_meta_data["family"] - ] - instance.data["publish"] = layer_item.visible - instance_names.append(layer_meta_data["subset"]) - - # Produce diagnostic message for any graphical - # user interface interested in visualising it. - self.log.info("Found: \"%s\" " % instance.data["name"]) - self.log.info("instance: {} ".format( - pprint.pformat(instance.data, indent=4))) - - if len(instance_names) != len(set(instance_names)): - self.log.warning("Duplicate instances found. " + - "Remove unwanted via SubsetManager") - - if len(instance_names) == 0 and self.flatten_subset_template: - project_name = context.data["projectEntity"]["name"] - variants = get_project_settings(project_name).get( - "photoshop", {}).get( - "create", {}).get( - "CreateImage", {}).get( - "defaults", ['']) - family = "image" - task_name = legacy_io.Session["AVALON_TASK"] - asset_name = context.data["assetEntity"]["name"] - - variant = context.data.get("variant") or variants[0] - fill_pairs = { - "variant": variant, - "family": family, - "task": task_name - } - - subset = self.flatten_subset_template.format( - **prepare_template_data(fill_pairs)) - - instance = context.create_instance(subset) - instance.data["family"] = family - instance.data["asset"] = asset_name - instance.data["subset"] = subset - instance.data["ids"] = all_layer_ids - instance.data["families"] = self.families_mapping[family] - instance.data["publish"] = True - - self.log.info("flatten instance: {} ".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py index 7e598a8250..87ec4ee3f1 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_review.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_review.py @@ -14,10 +14,7 @@ from openpype.pipeline.create import get_subset_name class CollectReview(pyblish.api.ContextPlugin): - """Gather the active document as review instance. - - Triggers once even if no 'image' is published as by defaults it creates - flatten image from a workfile. + """Adds review to families for instances marked to be reviewable. """ label = "Collect Review" @@ -28,25 +25,8 @@ class CollectReview(pyblish.api.ContextPlugin): publish = True def process(self, context): - family = "review" - subset = get_subset_name( - family, - context.data.get("variant", ''), - context.data["anatomyData"]["task"]["name"], - context.data["assetEntity"], - context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"], - project_settings=context.data["project_settings"] - ) - - instance = context.create_instance(subset) - instance.data.update({ - "subset": subset, - "label": subset, - "name": subset, - "family": family, - "families": [], - "representations": [], - "asset": os.environ["AVALON_ASSET"], - "publish": self.publish - }) + for instance in context: + creator_attributes = instance.data["creator_attributes"] + if (creator_attributes.get("mark_for_review") and + "review" not in instance.data["families"]): + instance.data["families"].append("review") diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py index 9a5aad5569..9625464499 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py @@ -14,50 +14,19 @@ class CollectWorkfile(pyblish.api.ContextPlugin): default_variant = "Main" def process(self, context): - existing_instance = None for instance in context: if instance.data["family"] == "workfile": - self.log.debug("Workfile instance found, won't create new") - existing_instance = instance - break + file_path = context.data["currentFile"] + _, ext = os.path.splitext(file_path) + staging_dir = os.path.dirname(file_path) + base_name = os.path.basename(file_path) - family = "workfile" - # context.data["variant"] might come only from collect_batch_data - variant = context.data.get("variant") or self.default_variant - subset = get_subset_name( - family, - variant, - context.data["anatomyData"]["task"]["name"], - context.data["assetEntity"], - context.data["anatomyData"]["project"]["name"], - host_name=context.data["hostName"], - project_settings=context.data["project_settings"] - ) - - file_path = context.data["currentFile"] - staging_dir = os.path.dirname(file_path) - base_name = os.path.basename(file_path) - - # Create instance - if existing_instance is None: - instance = context.create_instance(subset) - instance.data.update({ - "subset": subset, - "label": base_name, - "name": base_name, - "family": family, - "families": [], - "representations": [], - "asset": os.environ["AVALON_ASSET"] - }) - else: - instance = existing_instance - - # creating representation - _, ext = os.path.splitext(file_path) - instance.data["representations"].append({ - "name": ext[1:], - "ext": ext[1:], - "files": base_name, - "stagingDir": staging_dir, - }) + # creating representation + _, ext = os.path.splitext(file_path) + instance.data["representations"].append({ + "name": ext[1:], + "ext": ext[1:], + "files": base_name, + "stagingDir": staging_dir, + }) + return diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py index 01022ce0b2..d5416a389d 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_review.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_review.py @@ -47,32 +47,42 @@ class ExtractReview(publish.Extractor): layers = self._get_layers_from_image_instances(instance) self.log.info("Layers image instance found: {}".format(layers)) + repre_name = "jpg" + repre_skeleton = { + "name": repre_name, + "ext": "jpg", + "stagingDir": staging_dir, + "tags": self.jpg_options['tags'], + } + + if instance.data["family"] != "review": + # enable creation of review, without this jpg review would clash + # with jpg of the image family + output_name = repre_name + repre_name = "{}_{}".format(repre_name, output_name) + repre_skeleton.update({"name": repre_name, + "outputName": output_name}) + if self.make_image_sequence and len(layers) > 1: self.log.info("Extract layers to image sequence.") img_list = self._save_sequence_images(staging_dir, layers) - instance.data["representations"].append({ - "name": "jpg", - "ext": "jpg", - "files": img_list, + repre_skeleton.update({ "frameStart": 0, "frameEnd": len(img_list), "fps": fps, - "stagingDir": staging_dir, - "tags": self.jpg_options['tags'], + "files": img_list, }) + instance.data["representations"].append(repre_skeleton) processed_img_names = img_list else: self.log.info("Extract layers to flatten image.") img_list = self._save_flatten_image(staging_dir, layers) - instance.data["representations"].append({ - "name": "jpg", - "ext": "jpg", - "files": img_list, # cannot be [] for single frame - "stagingDir": staging_dir, - "tags": self.jpg_options['tags'] + repre_skeleton.update({ + "files": img_list, }) + instance.data["representations"].append(repre_skeleton) processed_img_names = [img_list] ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") @@ -129,7 +139,6 @@ class ExtractReview(publish.Extractor): "frameStart": 1, "frameEnd": no_of_frames, "fps": fps, - "preview": True, "tags": self.mov_options['tags'] }) diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml new file mode 100644 index 0000000000..e05ac92182 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml @@ -0,0 +1,20 @@ + + + +Asset does not match + +## Collected asset name is not same as in context + + {msg} +### How to repair? + {repair_msg} + Refresh Publish afterwards (circle arrow at the bottom right). + + If that's not correct value, close workfile and reopen via Workfiles to get + proper context asset name OR disable this validator and publish again + if you are publishing to different context deliberately. + + (Context means combination of project, asset name and task name.) + + + \ No newline at end of file diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml index 5a1e266748..023bbf26fa 100644 --- a/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml @@ -10,7 +10,7 @@ Subset or layer name cannot contain specific characters (spaces etc) which could ### How to repair? -You can fix this with "repair" button on the right. +You can fix this with "repair" button on the right and press Refresh publishing button at the bottom right. ### __Detailed Info__ (optional) diff --git a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py index 2609f7a8cf..b9d721dbdb 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py @@ -1,7 +1,11 @@ import pyblish.api from openpype.pipeline import legacy_io -from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin +) from openpype.hosts.photoshop import api as photoshop @@ -31,30 +35,38 @@ class ValidateInstanceAssetRepair(pyblish.api.Action): stub.imprint(instance[0], data) -class ValidateInstanceAsset(pyblish.api.InstancePlugin): +class ValidateInstanceAsset(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): """Validate the instance asset is the current selected context asset. - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong asset! + As it might happen that multiple worfiles are opened, switching + between them would mess with selected context. + In that case outputs might be output under wrong asset! - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. + Repair action will use Context asset value (from Workfiles or Launcher) + Closing and reopening with Workfiles will refresh Context value. """ label = "Validate Instance Asset" hosts = ["photoshop"] + optional = True actions = [ValidateInstanceAssetRepair] order = ValidateContentsOrder def process(self, instance): instance_asset = instance.data["asset"] current_asset = legacy_io.Session["AVALON_ASSET"] - msg = ( - f"Instance asset {instance_asset} is not the same " - f"as current context {current_asset}. PLEASE DO:\n" - f"Repair with 'A' action to use '{current_asset}'.\n" - f"If that's not correct value, close workfile and " - f"reopen via Workfiles!" - ) - assert instance_asset == current_asset, msg + + if instance_asset != current_asset: + msg = ( + f"Instance asset {instance_asset} is not the same " + f"as current context {current_asset}." + + ) + repair_msg = ( + f"Repair with 'Repair' button to use '{current_asset}'.\n" + ) + formatting_data = {"msg": msg, + "repair_msg": repair_msg} + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index 0665aff9d0..07810f505e 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -84,7 +84,7 @@ class ValidateNaming(pyblish.api.InstancePlugin): replace_char = '' def process(self, instance): - help_msg = ' Use Repair action (A) in Pyblish to fix it.' + help_msg = ' Use Repair button to fix it and then refresh publish.' layer = instance.data.get("layer") if layer: diff --git a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py deleted file mode 100644 index 78e84729ce..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py +++ /dev/null @@ -1,39 +0,0 @@ -import collections -import pyblish.api -from openpype.pipeline.publish import ( - ValidateContentsOrder, - PublishXmlValidationError, -) - - -class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): - """ - Validate that all subset's names are unique. - """ - - label = "Validate Subset Uniqueness" - hosts = ["photoshop"] - order = ValidateContentsOrder - families = ["image"] - - def process(self, context): - subset_names = [] - - for instance in context: - self.log.info("instance:: {}".format(instance.data)) - if instance.data.get('publish'): - subset_names.append(instance.data.get('subset')) - - non_unique = \ - [item - for item, count in collections.Counter(subset_names).items() - if count > 1] - msg = ("Instance subset names {} are not unique. ".format(non_unique) + - "Remove duplicates via SubsetManager.") - formatting_data = { - "non_unique": ",".join(non_unique) - } - - if non_unique: - raise PublishXmlValidationError(self, msg, - formatting_data=formatting_data) diff --git a/openpype/hosts/resolve/api/__init__.py b/openpype/hosts/resolve/api/__init__.py index 00a598548e..2b4546f8d6 100644 --- a/openpype/hosts/resolve/api/__init__.py +++ b/openpype/hosts/resolve/api/__init__.py @@ -24,6 +24,8 @@ from .lib import ( get_project_manager, get_current_project, get_current_timeline, + get_any_timeline, + get_new_timeline, create_bin, get_media_pool_item, create_media_pool_item, @@ -95,6 +97,8 @@ __all__ = [ "get_project_manager", "get_current_project", "get_current_timeline", + "get_any_timeline", + "get_new_timeline", "create_bin", "get_media_pool_item", "create_media_pool_item", diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py index f41eb36caf..a44c527f13 100644 --- a/openpype/hosts/resolve/api/lib.py +++ b/openpype/hosts/resolve/api/lib.py @@ -15,6 +15,7 @@ log = Logger.get_logger(__name__) self = sys.modules[__name__] self.project_manager = None self.media_storage = None +self.current_project = None # OpenPype sequential rename variables self.rename_index = 0 @@ -85,22 +86,60 @@ def get_media_storage(): def get_current_project(): - # initialize project manager - get_project_manager() + """Get current project object. + """ + if not self.current_project: + self.current_project = get_project_manager().GetCurrentProject() - return self.project_manager.GetCurrentProject() + return self.current_project def get_current_timeline(new=False): - # get current project + """Get current timeline object. + + Args: + new (bool)[optional]: [DEPRECATED] if True it will create + new timeline if none exists + + Returns: + TODO: will need to reflect future `None` + object: resolve.Timeline + """ project = get_current_project() + timeline = project.GetCurrentTimeline() + # return current timeline if any + if timeline: + return timeline + + # TODO: [deprecated] and will be removed in future if new: - media_pool = project.GetMediaPool() - new_timeline = media_pool.CreateEmptyTimeline(self.pype_timeline_name) - project.SetCurrentTimeline(new_timeline) + return get_new_timeline() - return project.GetCurrentTimeline() + +def get_any_timeline(): + """Get any timeline object. + + Returns: + object | None: resolve.Timeline + """ + project = get_current_project() + timeline_count = project.GetTimelineCount() + if timeline_count > 0: + return project.GetTimelineByIndex(1) + + +def get_new_timeline(): + """Get new timeline object. + + Returns: + object: resolve.Timeline + """ + project = get_current_project() + media_pool = project.GetMediaPool() + new_timeline = media_pool.CreateEmptyTimeline(self.pype_timeline_name) + project.SetCurrentTimeline(new_timeline) + return new_timeline def create_bin(name: str, root: object = None) -> object: @@ -250,7 +289,7 @@ def create_timeline_item(media_pool_item: object, media_pool_item, timeline) assert output_timeline_item, AssertionError( - "Track Item with name `{}` doesnt exist on the timeline: `{}`".format( + "Track Item with name `{}` doesn't exist on the timeline: `{}`".format( clip_name, timeline.GetName() )) return output_timeline_item @@ -312,7 +351,13 @@ def get_current_timeline_items( track_type = track_type or "video" selecting_color = selecting_color or "Chocolate" project = get_current_project() - timeline = get_current_timeline() + + # get timeline anyhow + timeline = ( + get_current_timeline() or + get_any_timeline() or + get_new_timeline() + ) selected_clips = [] # get all tracks count filtered by track type @@ -571,7 +616,7 @@ def create_compound_clip(clip_data, name, folder): # Set current folder to input media_pool_folder: mp.SetCurrentFolder(folder) - # check if clip doesnt exist already: + # check if clip doesn't exist already: clips = folder.GetClipList() cct = next((c for c in clips if c.GetName() in name), None) @@ -582,7 +627,7 @@ def create_compound_clip(clip_data, name, folder): # Create empty timeline in current folder and give name: cct = mp.CreateEmptyTimeline(name) - # check if clip doesnt exist already: + # check if clip doesn't exist already: clips = folder.GetClipList() cct = next((c for c in clips if c.GetName() in name), None) diff --git a/openpype/hosts/resolve/api/menu.py b/openpype/hosts/resolve/api/menu.py index eeb9e65dec..b3717e01ea 100644 --- a/openpype/hosts/resolve/api/menu.py +++ b/openpype/hosts/resolve/api/menu.py @@ -69,7 +69,7 @@ class OpenPypeMenu(QtWidgets.QWidget): # "Set colorspace from presets", self # ) # reset_resolution_btn = QtWidgets.QPushButton( - # "Reset Resolution from peresets", self + # "Set Resolution from presets", self # ) layout = QtWidgets.QVBoxLayout(self) @@ -108,7 +108,7 @@ class OpenPypeMenu(QtWidgets.QWidget): libload_btn.clicked.connect(self.on_libload_clicked) # rename_btn.clicked.connect(self.on_rename_clicked) # set_colorspace_btn.clicked.connect(self.on_set_colorspace_clicked) - # reset_resolution_btn.clicked.connect(self.on_reset_resolution_clicked) + # reset_resolution_btn.clicked.connect(self.on_set_resolution_clicked) experimental_btn.clicked.connect(self.on_experimental_clicked) def on_workfile_clicked(self): @@ -145,8 +145,8 @@ class OpenPypeMenu(QtWidgets.QWidget): def on_set_colorspace_clicked(self): print("Clicked Set Colorspace") - def on_reset_resolution_clicked(self): - print("Clicked Reset Resolution") + def on_set_resolution_clicked(self): + print("Clicked Set Resolution") def on_experimental_clicked(self): host_tools.show_experimental_tools_dialog() diff --git a/openpype/hosts/resolve/api/menu_style.qss b/openpype/hosts/resolve/api/menu_style.qss index d2d3d1ed37..3d51c7139f 100644 --- a/openpype/hosts/resolve/api/menu_style.qss +++ b/openpype/hosts/resolve/api/menu_style.qss @@ -61,7 +61,7 @@ QVBoxLayout { background-color: #282828; } -#Devider { +#Divider { border: 1px solid #090909; background-color: #585858; } diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 77e30149fd..e5846c2fc2 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -327,7 +327,10 @@ class ClipLoader: self.active_timeline = options["timeline"] else: # create new sequence - self.active_timeline = lib.get_current_timeline(new=True) + self.active_timeline = ( + lib.get_current_timeline() or + lib.get_new_timeline() + ) else: self.active_timeline = lib.get_current_timeline() @@ -715,7 +718,7 @@ class PublishClip: # increasing steps by index of rename iteration self.count_steps *= self.rename_index - hierarchy_formating_data = dict() + hierarchy_formatting_data = dict() _data = self.timeline_item_default_data.copy() if self.ui_inputs: # adding tag metadata from ui @@ -749,13 +752,13 @@ class PublishClip: # fill up pythonic expresisons in hierarchy data for k, _v in self.hierarchy_data.items(): - hierarchy_formating_data[k] = _v["value"].format(**_data) + hierarchy_formatting_data[k] = _v["value"].format(**_data) else: # if no gui mode then just pass default data - hierarchy_formating_data = self.hierarchy_data + hierarchy_formatting_data = self.hierarchy_data tag_hierarchy_data = self._solve_tag_hierarchy_data( - hierarchy_formating_data + hierarchy_formatting_data ) tag_hierarchy_data.update({"heroTrack": True}) @@ -792,18 +795,17 @@ class PublishClip: else: self.tag_data.update({"reviewTrack": None}) - - def _solve_tag_hierarchy_data(self, hierarchy_formating_data): + def _solve_tag_hierarchy_data(self, hierarchy_formatting_data): """ Solve tag data from hierarchy data and templates. """ # fill up clip name and hierarchy keys - hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data) - clip_name_filled = self.clip_name.format(**hierarchy_formating_data) + hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data) + clip_name_filled = self.clip_name.format(**hierarchy_formatting_data) return { "newClipName": clip_name_filled, "hierarchy": hierarchy_filled, "parents": self.parents, - "hierarchyData": hierarchy_formating_data, + "hierarchyData": hierarchy_formatting_data, "subset": self.subset, "family": self.subset_family, "families": ["clip"] diff --git a/openpype/hosts/resolve/api/workio.py b/openpype/hosts/resolve/api/workio.py index 5ce73eea53..5966fa6a43 100644 --- a/openpype/hosts/resolve/api/workio.py +++ b/openpype/hosts/resolve/api/workio.py @@ -43,18 +43,22 @@ def open_file(filepath): """ Loading project """ + + from . import bmdvr + pm = get_project_manager() + page = bmdvr.GetCurrentPage() + if page is not None: + # Save current project only if Resolve has an active page, otherwise + # we consider Resolve being in a pre-launch state (no open UI yet) + project = pm.GetCurrentProject() + print(f"Saving current project: {project}") + pm.SaveProject() + file = os.path.basename(filepath) fname, _ = os.path.splitext(file) dname, _ = fname.split("_v") - - # deal with current project - project = pm.GetCurrentProject() - log.info(f"Test `pm`: {pm}") - pm.SaveProject() - try: - log.info(f"Test `dname`: {dname}") if not set_project_manager_to_folder_name(dname): raise # load project from input path @@ -72,6 +76,7 @@ def open_file(filepath): return False return True + def current_file(): pm = get_project_manager() current_dir = os.getenv("AVALON_WORKDIR") diff --git a/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py new file mode 100644 index 0000000000..0e27ddb8c3 --- /dev/null +++ b/openpype/hosts/resolve/hooks/pre_resolve_launch_last_workfile.py @@ -0,0 +1,45 @@ +import os + +from openpype.lib import PreLaunchHook +import openpype.hosts.resolve + + +class ResolveLaunchLastWorkfile(PreLaunchHook): + """Special hook to open last workfile for Resolve. + + Checks 'start_last_workfile', if set to False, it will not open last + workfile. This property is set explicitly in Launcher. + """ + + # Execute after workfile template copy + order = 10 + app_groups = ["resolve"] + + def execute(self): + if not self.data.get("start_last_workfile"): + self.log.info("It is set to not start last workfile on start.") + return + + last_workfile = self.data.get("last_workfile_path") + if not last_workfile: + self.log.warning("Last workfile was not collected.") + return + + if not os.path.exists(last_workfile): + self.log.info("Current context does not have any workfile yet.") + return + + # Add path to launch environment for the startup script to pick up + self.log.info(f"Setting OPENPYPE_RESOLVE_OPEN_ON_LAUNCH to launch " + f"last workfile: {last_workfile}") + key = "OPENPYPE_RESOLVE_OPEN_ON_LAUNCH" + self.launch_context.env[key] = last_workfile + + # Set the openpype prelaunch startup script path for easy access + # in the LUA .scriptlib code + op_resolve_root = os.path.dirname(openpype.hosts.resolve.__file__) + script_path = os.path.join(op_resolve_root, "startup.py") + key = "OPENPYPE_RESOLVE_STARTUP_SCRIPT" + self.launch_context.env[key] = script_path + self.log.info("Setting OPENPYPE_RESOLVE_STARTUP_SCRIPT to: " + f"{script_path}") diff --git a/openpype/hosts/resolve/hooks/pre_resolve_setup.py b/openpype/hosts/resolve/hooks/pre_resolve_setup.py index 8574b3ad01..d066fc2da2 100644 --- a/openpype/hosts/resolve/hooks/pre_resolve_setup.py +++ b/openpype/hosts/resolve/hooks/pre_resolve_setup.py @@ -1,4 +1,5 @@ import os +from pathlib import Path import platform from openpype.lib import PreLaunchHook from openpype.hosts.resolve.utils import setup @@ -6,33 +7,57 @@ from openpype.hosts.resolve.utils import setup class ResolvePrelaunch(PreLaunchHook): """ - This hook will check if current workfile path has Resolve - project inside. IF not, it initialize it and finally it pass - path to the project by environment variable to Premiere launcher - shell script. + This hook will set up the Resolve scripting environment as described in + Resolve's documentation found with the installed application at + {resolve}/Support/Developer/Scripting/README.txt + + Prepares the following environment variables: + - `RESOLVE_SCRIPT_API` + - `RESOLVE_SCRIPT_LIB` + + It adds $RESOLVE_SCRIPT_API/Modules to PYTHONPATH. + + Additionally it sets up the Python home for Python 3 based on the + RESOLVE_PYTHON3_HOME in the environment (usually defined in OpenPype's + Application environment for Resolve by the admin). For this it sets + PYTHONHOME and PATH variables. + + It also defines: + - `RESOLVE_UTILITY_SCRIPTS_DIR`: Destination directory for OpenPype + Fusion scripts to be copied to for Resolve to pick them up. + - `OPENPYPE_LOG_NO_COLORS` to True to ensure OP doesn't try to + use logging with terminal colors as it fails in Resolve. + """ + app_groups = ["resolve"] def execute(self): current_platform = platform.system().lower() - PROGRAMDATA = self.launch_context.env.get("PROGRAMDATA", "") - RESOLVE_SCRIPT_API_ = { + programdata = self.launch_context.env.get("PROGRAMDATA", "") + resolve_script_api_locations = { "windows": ( - f"{PROGRAMDATA}/Blackmagic Design/" + f"{programdata}/Blackmagic Design/" "DaVinci Resolve/Support/Developer/Scripting" ), "darwin": ( "/Library/Application Support/Blackmagic Design" "/DaVinci Resolve/Developer/Scripting" ), - "linux": "/opt/resolve/Developer/Scripting" + "linux": "/opt/resolve/Developer/Scripting", } - RESOLVE_SCRIPT_API = os.path.normpath( - RESOLVE_SCRIPT_API_[current_platform]) - self.launch_context.env["RESOLVE_SCRIPT_API"] = RESOLVE_SCRIPT_API + resolve_script_api = Path( + resolve_script_api_locations[current_platform] + ) + self.log.info( + f"setting RESOLVE_SCRIPT_API variable to {resolve_script_api}" + ) + self.launch_context.env[ + "RESOLVE_SCRIPT_API" + ] = resolve_script_api.as_posix() - RESOLVE_SCRIPT_LIB_ = { + resolve_script_lib_dirs = { "windows": ( "C:/Program Files/Blackmagic Design" "/DaVinci Resolve/fusionscript.dll" @@ -41,61 +66,69 @@ class ResolvePrelaunch(PreLaunchHook): "/Applications/DaVinci Resolve/DaVinci Resolve.app" "/Contents/Libraries/Fusion/fusionscript.so" ), - "linux": "/opt/resolve/libs/Fusion/fusionscript.so" + "linux": "/opt/resolve/libs/Fusion/fusionscript.so", } - RESOLVE_SCRIPT_LIB = os.path.normpath( - RESOLVE_SCRIPT_LIB_[current_platform]) - self.launch_context.env["RESOLVE_SCRIPT_LIB"] = RESOLVE_SCRIPT_LIB + resolve_script_lib = Path(resolve_script_lib_dirs[current_platform]) + self.launch_context.env[ + "RESOLVE_SCRIPT_LIB" + ] = resolve_script_lib.as_posix() + self.log.info( + f"setting RESOLVE_SCRIPT_LIB variable to {resolve_script_lib}" + ) - # TODO: add OTIO installation from `openpype/requirements.py` + # TODO: add OTIO installation from `openpype/requirements.py` # making sure python <3.9.* is installed at provided path - python3_home = os.path.normpath( - self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "")) + python3_home = Path( + self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "") + ) - assert os.path.isdir(python3_home), ( + assert python3_home.is_dir(), ( "Python 3 is not installed at the provided folder path. Either " "make sure the `environments\resolve.json` is having correctly " "set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed " f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`" ) - self.launch_context.env["PYTHONHOME"] = python3_home - self.log.info(f"Path to Resolve Python folder: `{python3_home}`...") - - # add to the python path to path - env_path = self.launch_context.env["PATH"] - self.launch_context.env["PATH"] = os.pathsep.join([ - python3_home, - os.path.join(python3_home, "Scripts") - ] + env_path.split(os.pathsep)) - - self.log.debug(f"PATH: {self.launch_context.env['PATH']}") + python3_home_str = python3_home.as_posix() + self.launch_context.env["PYTHONHOME"] = python3_home_str + self.log.info(f"Path to Resolve Python folder: `{python3_home_str}`") # add to the PYTHONPATH env_pythonpath = self.launch_context.env["PYTHONPATH"] - self.launch_context.env["PYTHONPATH"] = os.pathsep.join([ - os.path.join(python3_home, "Lib", "site-packages"), - os.path.join(RESOLVE_SCRIPT_API, "Modules"), - ] + env_pythonpath.split(os.pathsep)) + modules_path = Path(resolve_script_api, "Modules").as_posix() + self.launch_context.env[ + "PYTHONPATH" + ] = f"{modules_path}{os.pathsep}{env_pythonpath}" self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}") - RESOLVE_UTILITY_SCRIPTS_DIR_ = { + # add the pythonhome folder to PATH because on Windows + # this is needed for Py3 to be correctly detected within Resolve + env_path = self.launch_context.env["PATH"] + self.log.info(f"Adding `{python3_home_str}` to the PATH variable") + self.launch_context.env[ + "PATH" + ] = f"{python3_home_str}{os.pathsep}{env_path}" + + self.log.debug(f"PATH: {self.launch_context.env['PATH']}") + + resolve_utility_scripts_dirs = { "windows": ( - f"{PROGRAMDATA}/Blackmagic Design" + f"{programdata}/Blackmagic Design" "/DaVinci Resolve/Fusion/Scripts/Comp" ), "darwin": ( "/Library/Application Support/Blackmagic Design" "/DaVinci Resolve/Fusion/Scripts/Comp" ), - "linux": "/opt/resolve/Fusion/Scripts/Comp" + "linux": "/opt/resolve/Fusion/Scripts/Comp", } - RESOLVE_UTILITY_SCRIPTS_DIR = os.path.normpath( - RESOLVE_UTILITY_SCRIPTS_DIR_[current_platform] + resolve_utility_scripts_dir = Path( + resolve_utility_scripts_dirs[current_platform] ) # setting utility scripts dir for scripts syncing - self.launch_context.env["RESOLVE_UTILITY_SCRIPTS_DIR"] = ( - RESOLVE_UTILITY_SCRIPTS_DIR) + self.launch_context.env[ + "RESOLVE_UTILITY_SCRIPTS_DIR" + ] = resolve_utility_scripts_dir.as_posix() # remove terminal coloring tags self.launch_context.env["OPENPYPE_LOG_NO_COLORS"] = "True" diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index a0c78c182f..05bfb003d6 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -14,6 +14,10 @@ from openpype.hosts.resolve.api.pipeline import ( containerise, update_container, ) +from openpype.lib.transcoding import ( + VIDEO_EXTENSIONS, + IMAGE_EXTENSIONS +) class LoadClip(plugin.TimelineItemLoader): @@ -24,7 +28,11 @@ class LoadClip(plugin.TimelineItemLoader): """ families = ["render2d", "source", "plate", "render", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264", "mov"] + + representations = ["*"] + extensions = set( + ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS) + ) label = "Load as clip" order = -10 diff --git a/openpype/hosts/resolve/startup.py b/openpype/hosts/resolve/startup.py new file mode 100644 index 0000000000..79a64e0fbf --- /dev/null +++ b/openpype/hosts/resolve/startup.py @@ -0,0 +1,62 @@ +"""This script is used as a startup script in Resolve through a .scriptlib file + +It triggers directly after the launch of Resolve and it's recommended to keep +it optimized for fast performance since the Resolve UI is actually interactive +while this is running. As such, there's nothing ensuring the user isn't +continuing manually before any of the logic here runs. As such we also try +to delay any imports as much as possible. + +This code runs in a separate process to the main Resolve process. + +""" +import os + +import openpype.hosts.resolve.api + + +def ensure_installed_host(): + """Install resolve host with openpype and return the registered host. + + This function can be called multiple times without triggering an + additional install. + """ + from openpype.pipeline import install_host, registered_host + host = registered_host() + if host: + return host + + install_host(openpype.hosts.resolve.api) + return registered_host() + + +def launch_menu(): + print("Launching Resolve OpenPype menu..") + ensure_installed_host() + openpype.hosts.resolve.api.launch_pype_menu() + + +def open_file(path): + # Avoid the need to "install" the host + host = ensure_installed_host() + host.open_file(path) + + +def main(): + # Open last workfile + workfile_path = os.environ.get("OPENPYPE_RESOLVE_OPEN_ON_LAUNCH") + if workfile_path: + open_file(workfile_path) + else: + print("No last workfile set to open. Skipping..") + + # Launch OpenPype menu + from openpype.settings import get_project_settings + from openpype.pipeline.context_tools import get_current_project_name + project_name = get_current_project_name() + settings = get_project_settings(project_name) + if settings.get("resolve", {}).get("launch_openpype_menu_on_start", True): + launch_menu() + + +if __name__ == "__main__": + main() diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/OpenPype__Menu.py similarity index 100% rename from openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py rename to openpype/hosts/resolve/utility_scripts/OpenPype__Menu.py diff --git a/openpype/hosts/resolve/utility_scripts/README.markdown b/openpype/hosts/resolve/utility_scripts/README.markdown deleted file mode 100644 index 8b13789179..0000000000 --- a/openpype/hosts/resolve/utility_scripts/README.markdown +++ /dev/null @@ -1 +0,0 @@ - diff --git a/openpype/hosts/resolve/utility_scripts/OTIO_export.py b/openpype/hosts/resolve/utility_scripts/develop/OTIO_export.py similarity index 100% rename from openpype/hosts/resolve/utility_scripts/OTIO_export.py rename to openpype/hosts/resolve/utility_scripts/develop/OTIO_export.py diff --git a/openpype/hosts/resolve/utility_scripts/OTIO_import.py b/openpype/hosts/resolve/utility_scripts/develop/OTIO_import.py similarity index 100% rename from openpype/hosts/resolve/utility_scripts/OTIO_import.py rename to openpype/hosts/resolve/utility_scripts/develop/OTIO_import.py diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py similarity index 100% rename from openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py rename to openpype/hosts/resolve/utility_scripts/develop/OpenPype_sync_util_scripts.py diff --git a/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib new file mode 100644 index 0000000000..ec9b30a18d --- /dev/null +++ b/openpype/hosts/resolve/utility_scripts/openpype_startup.scriptlib @@ -0,0 +1,21 @@ +-- Run OpenPype's Python launch script for resolve +function file_exists(name) + local f = io.open(name, "r") + return f ~= nil and io.close(f) +end + + +openpype_startup_script = os.getenv("OPENPYPE_RESOLVE_STARTUP_SCRIPT") +if openpype_startup_script ~= nil then + script = fusion:MapPath(openpype_startup_script) + + if file_exists(script) then + -- We must use RunScript to ensure it runs in a separate + -- process to Resolve itself to avoid a deadlock for + -- certain imports of OpenPype libraries or Qt + print("Running launch script: " .. script) + fusion:RunScript(script) + else + print("Launch script not found at: " .. script) + end +end \ No newline at end of file diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_timeline_op.py b/openpype/hosts/resolve/utility_scripts/tests/testing_timeline_op.py new file mode 100644 index 0000000000..8270496f64 --- /dev/null +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_timeline_op.py @@ -0,0 +1,13 @@ +#! python3 +from openpype.pipeline import install_host +from openpype.hosts.resolve import api as bmdvr +from openpype.hosts.resolve.api.lib import get_current_project + +if __name__ == "__main__": + install_host(bmdvr) + project = get_current_project() + timeline_count = project.GetTimelineCount() + print(f"Timeline count: {timeline_count}") + timeline = project.GetTimelineByIndex(timeline_count) + print(f"Timeline name: {timeline.GetName()}") + print(timeline.GetTrackCount("video")) diff --git a/openpype/hosts/resolve/utils.py b/openpype/hosts/resolve/utils.py index 5881f153ae..5e3003862f 100644 --- a/openpype/hosts/resolve/utils.py +++ b/openpype/hosts/resolve/utils.py @@ -1,6 +1,6 @@ import os import shutil -from openpype.lib import Logger +from openpype.lib import Logger, is_running_from_build RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -8,30 +8,33 @@ RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) def setup(env): log = Logger.get_logger("ResolveSetup") scripts = {} - us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") - us_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"] + util_scripts_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") + util_scripts_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"] - us_paths = [os.path.join( + util_scripts_paths = [os.path.join( RESOLVE_ROOT_DIR, "utility_scripts" )] # collect script dirs - if us_env: - log.info("Utility Scripts Env: `{}`".format(us_env)) - us_paths = us_env.split( - os.pathsep) + us_paths + if util_scripts_env: + log.info("Utility Scripts Env: `{}`".format(util_scripts_env)) + util_scripts_paths = util_scripts_env.split( + os.pathsep) + util_scripts_paths # collect scripts from dirs - for path in us_paths: + for path in util_scripts_paths: scripts.update({path: os.listdir(path)}) - log.info("Utility Scripts Dir: `{}`".format(us_paths)) + log.info("Utility Scripts Dir: `{}`".format(util_scripts_paths)) log.info("Utility Scripts: `{}`".format(scripts)) + # Make sure scripts dir exists + os.makedirs(util_scripts_dir, exist_ok=True) + # make sure no script file is in folder - for s in os.listdir(us_dir): - path = os.path.join(us_dir, s) + for script in os.listdir(util_scripts_dir): + path = os.path.join(util_scripts_dir, script) log.info("Removing `{}`...".format(path)) if os.path.isdir(path): shutil.rmtree(path, onerror=None) @@ -39,12 +42,25 @@ def setup(env): os.remove(path) # copy scripts into Resolve's utility scripts dir - for d, sl in scripts.items(): - # directory and scripts list - for s in sl: - # script in script list - src = os.path.join(d, s) - dst = os.path.join(us_dir, s) + for directory, scripts in scripts.items(): + for script in scripts: + if ( + is_running_from_build() and + script in ["tests", "develop"] + ): + # only copy those if started from build + continue + + src = os.path.join(directory, script) + dst = os.path.join(util_scripts_dir, script) + + # TODO: Make this a less hacky workaround + if script == "openpype_startup.scriptlib": + # Handle special case for scriptlib that needs to be a folder + # up from the Comp folder in the Fusion scripts + dst = os.path.join(os.path.dirname(util_scripts_dir), + script) + log.info("Copying `{}` to `{}`...".format(src, dst)) if os.path.isdir(src): shutil.copytree( diff --git a/openpype/hosts/standalonepublisher/addon.py b/openpype/hosts/standalonepublisher/addon.py index 65a4226664..67204b581b 100644 --- a/openpype/hosts/standalonepublisher/addon.py +++ b/openpype/hosts/standalonepublisher/addon.py @@ -10,7 +10,7 @@ STANDALONEPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) class StandAlonePublishAddon(OpenPypeModule, ITrayAction, IHostAddon): - label = "Publish" + label = "Publisher (legacy)" name = "standalonepublisher" host_name = "standalonepublisher" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py index 7925b0ecf3..6c3b0c3efd 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py @@ -83,9 +83,9 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): self.log.info(f"Created new instance: {instance_name}") - def convertor(value): + def converter(value): return str(value) self.log.debug("Instance data: {}".format( - json.dumps(new_instance.data, indent=4, default=convertor) + json.dumps(new_instance.data, indent=4, default=converter) )) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py index 2bf3917e2f..96aaae23dc 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -104,7 +104,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): if repr.get(k): repr.pop(k) - # convert files to list if it isnt + # convert files to list if it isn't if not isinstance(files, (tuple, list)): files = [files] @@ -174,7 +174,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): continue files = repre["files"] - # Convert files to list if it isnt + # Convert files to list if it isn't if not isinstance(files, (tuple, list)): files = [files] @@ -255,7 +255,9 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): if ext.startswith("."): component["ext"] = ext[1:] - if component["preview"]: + # Remove 'preview' key from representation data + preview = component.pop("preview") + if preview: instance.data["families"].append("review") component["tags"] = ["review"] self.log.debug("Adding review family") diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py index 8633d4bf9d..391cace761 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py @@ -116,7 +116,7 @@ class CollectEditorial(pyblish.api.InstancePlugin): kwargs = {} if extension == ".edl": # EDL has no frame rate embedded so needs explicit - # frame rate else 24 is asssumed. + # frame rate else 24 is assumed. kwargs["rate"] = get_current_project_asset()["data"]["fps"] instance.data["otio_timeline"] = otio.adapters.read_from_file( diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py index 18bf0394ae..9ff84e32fb 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_workfile_location.py @@ -27,11 +27,12 @@ class ExtractWorkfileUrl(pyblish.api.ContextPlugin): rep_name = instance.data.get("representations")[0].get("name") template_data["representation"] = rep_name template_data["ext"] = rep_name - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["path"] + template_obj = anatomy.templates_obj["publish"]["path"] + template_filled = template_obj.format_strict(template_data) filepath = os.path.normpath(template_filled) self.log.info("Using published scene for render {}".format( filepath)) + break if not filepath: self.log.info("Texture batch doesn't contain workfile.") diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py index 074c62ea0e..e46fbe6098 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py @@ -29,7 +29,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): for pattern in self.skip_timelines_check): self.log.info("Skipping for {} task".format(instance.data["task"])) - # TODO repace query with using 'instance.data["assetEntity"]' + # TODO replace query with using 'instance.data["assetEntity"]' asset_data = get_current_project_asset(instance.data["asset"])["data"] frame_start = asset_data["frameStart"] frame_end = asset_data["frameEnd"] diff --git a/openpype/hosts/substancepainter/__init__.py b/openpype/hosts/substancepainter/__init__.py new file mode 100644 index 0000000000..4c33b9f507 --- /dev/null +++ b/openpype/hosts/substancepainter/__init__.py @@ -0,0 +1,10 @@ +from .addon import ( + SubstanceAddon, + SUBSTANCE_HOST_DIR, +) + + +__all__ = ( + "SubstanceAddon", + "SUBSTANCE_HOST_DIR" +) diff --git a/openpype/hosts/substancepainter/addon.py b/openpype/hosts/substancepainter/addon.py new file mode 100644 index 0000000000..2fbea139c5 --- /dev/null +++ b/openpype/hosts/substancepainter/addon.py @@ -0,0 +1,34 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +SUBSTANCE_HOST_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class SubstanceAddon(OpenPypeModule, IHostAddon): + name = "substancepainter" + host_name = "substancepainter" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + # Add requirements to SUBSTANCE_PAINTER_PLUGINS_PATH + plugin_path = os.path.join(SUBSTANCE_HOST_DIR, "deploy") + plugin_path = plugin_path.replace("\\", "/") + if env.get("SUBSTANCE_PAINTER_PLUGINS_PATH"): + plugin_path += os.pathsep + env["SUBSTANCE_PAINTER_PLUGINS_PATH"] + + env["SUBSTANCE_PAINTER_PLUGINS_PATH"] = plugin_path + + # Log in Substance Painter doesn't support custom terminal colors + env["OPENPYPE_LOG_NO_COLORS"] = "Yes" + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(SUBSTANCE_HOST_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".spp", ".toc"] diff --git a/openpype/hosts/substancepainter/api/__init__.py b/openpype/hosts/substancepainter/api/__init__.py new file mode 100644 index 0000000000..937d0c429e --- /dev/null +++ b/openpype/hosts/substancepainter/api/__init__.py @@ -0,0 +1,8 @@ +from .pipeline import ( + SubstanceHost, + +) + +__all__ = [ + "SubstanceHost", +] diff --git a/openpype/hosts/substancepainter/api/colorspace.py b/openpype/hosts/substancepainter/api/colorspace.py new file mode 100644 index 0000000000..375b61b39b --- /dev/null +++ b/openpype/hosts/substancepainter/api/colorspace.py @@ -0,0 +1,157 @@ +"""Substance Painter OCIO management + +Adobe Substance 3D Painter supports OCIO color management using a per project +configuration. Output color spaces are defined at the project level + +More information see: + - https://substance3d.adobe.com/documentation/spdoc/color-management-223053233.html # noqa + - https://substance3d.adobe.com/documentation/spdoc/color-management-with-opencolorio-225969419.html # noqa + +""" +import substance_painter.export +import substance_painter.js +import json + +from .lib import ( + get_document_structure, + get_channel_format +) + + +def _iter_document_stack_channels(): + """Yield all stack paths and channels project""" + + for material in get_document_structure()["materials"]: + material_name = material["name"] + for stack in material["stacks"]: + stack_name = stack["name"] + if stack_name: + stack_path = [material_name, stack_name] + else: + stack_path = material_name + for channel in stack["channels"]: + yield stack_path, channel + + +def _get_first_color_and_data_stack_and_channel(): + """Return first found color channel and data channel.""" + color_channel = None + data_channel = None + for stack_path, channel in _iter_document_stack_channels(): + channel_format = get_channel_format(stack_path, channel) + if channel_format["color"]: + color_channel = (stack_path, channel) + else: + data_channel = (stack_path, channel) + + if color_channel and data_channel: + return color_channel, data_channel + + return color_channel, data_channel + + +def get_project_channel_data(): + """Return colorSpace settings for the current substance painter project. + + In Substance Painter only color channels have Color Management enabled + whereas data channels have no color management applied. This can't be + changed. The artist can only customize the export color space for color + channels per bit-depth for 8 bpc, 16 bpc and 32 bpc. + + As such this returns the color space for 'data' and for per bit-depth + for color channels. + + Example output: + { + "data": {'colorSpace': 'Utility - Raw'}, + "8": {"colorSpace": "ACES - AcesCG"}, + "16": {"colorSpace": "ACES - AcesCG"}, + "16f": {"colorSpace": "ACES - AcesCG"}, + "32f": {"colorSpace": "ACES - AcesCG"} + } + + """ + + keys = ["colorSpace"] + query = {key: f"${key}" for key in keys} + + config = { + "exportPath": "/", + "exportShaderParams": False, + "defaultExportPreset": "query_preset", + + "exportPresets": [{ + "name": "query_preset", + + # List of maps making up this export preset. + "maps": [{ + "fileName": json.dumps(query), + # List of source/destination defining which channels will + # make up the texture file. + "channels": [], + "parameters": { + "fileFormat": "exr", + "bitDepth": "32f", + "dithering": False, + "sizeLog2": 4, + "paddingAlgorithm": "passthrough", + "dilationDistance": 16 + } + }] + }], + } + + def _get_query_output(config): + # Return the basename of the single output path we defined + result = substance_painter.export.list_project_textures(config) + path = next(iter(result.values()))[0] + # strip extension and slash since we know relevant json data starts + # and ends with { and } characters + path = path.strip("/\\.exr") + return json.loads(path) + + # Query for each type of channel (color and data) + color_channel, data_channel = _get_first_color_and_data_stack_and_channel() + colorspaces = {} + for key, channel_data in { + "data": data_channel, + "color": color_channel + }.items(): + if channel_data is None: + # No channel of that datatype anywhere in the Stack. We're + # unable to identify the output color space of the project + colorspaces[key] = None + continue + + stack, channel = channel_data + + # Stack must be a string + if not isinstance(stack, str): + # Assume iterable + stack = "/".join(stack) + + # Define the temp output config + config["exportList"] = [{"rootPath": stack}] + config_map = config["exportPresets"][0]["maps"][0] + config_map["channels"] = [ + { + "destChannel": x, + "srcChannel": x, + "srcMapType": "documentMap", + "srcMapName": channel + } for x in "RGB" + ] + + if key == "color": + # Query for each bit depth + # Color space definition can have a different OCIO config set + # for 8-bit, 16-bit and 32-bit outputs so we need to check each + # bit depth + for depth in ["8", "16", "16f", "32f"]: + config_map["parameters"]["bitDepth"] = depth # noqa + colorspaces[key + depth] = _get_query_output(config) + else: + # Data channel (not color managed) + colorspaces[key] = _get_query_output(config) + + return colorspaces diff --git a/openpype/hosts/substancepainter/api/lib.py b/openpype/hosts/substancepainter/api/lib.py new file mode 100644 index 0000000000..2cd08f862e --- /dev/null +++ b/openpype/hosts/substancepainter/api/lib.py @@ -0,0 +1,649 @@ +import os +import re +import json +from collections import defaultdict + +import substance_painter.project +import substance_painter.resource +import substance_painter.js +import substance_painter.export + +from qtpy import QtGui, QtWidgets, QtCore + + +def get_export_presets(): + """Return Export Preset resource URLs for all available Export Presets. + + Returns: + dict: {Resource url: GUI Label} + + """ + # TODO: Find more optimal way to find all export templates + + preset_resources = {} + for shelf in substance_painter.resource.Shelves.all(): + shelf_path = os.path.normpath(shelf.path()) + + presets_path = os.path.join(shelf_path, "export-presets") + if not os.path.exists(presets_path): + continue + + for filename in os.listdir(presets_path): + if filename.endswith(".spexp"): + template_name = os.path.splitext(filename)[0] + + resource = substance_painter.resource.ResourceID( + context=shelf.name(), + name=template_name + ) + resource_url = resource.url() + + preset_resources[resource_url] = template_name + + # Sort by template name + export_templates = dict(sorted(preset_resources.items(), + key=lambda x: x[1])) + + # Add default built-ins at the start + # TODO: find the built-ins automatically; scraped with https://gist.github.com/BigRoy/97150c7c6f0a0c916418207b9a2bc8f1 # noqa + result = { + "export-preset-generator://viewport2d": "2D View", # noqa + "export-preset-generator://doc-channel-normal-no-alpha": "Document channels + Normal + AO (No Alpha)", # noqa + "export-preset-generator://doc-channel-normal-with-alpha": "Document channels + Normal + AO (With Alpha)", # noqa + "export-preset-generator://sketchfab": "Sketchfab", # noqa + "export-preset-generator://adobe-standard-material": "Substance 3D Stager", # noqa + "export-preset-generator://usd": "USD PBR Metal Roughness", # noqa + "export-preset-generator://gltf": "glTF PBR Metal Roughness", # noqa + "export-preset-generator://gltf-displacement": "glTF PBR Metal Roughness + Displacement texture (experimental)" # noqa + } + result.update(export_templates) + return result + + +def _convert_stack_path_to_cmd_str(stack_path): + """Convert stack path `str` or `[str, str]` for javascript query + + Example usage: + >>> stack_path = _convert_stack_path_to_cmd_str(stack_path) + >>> cmd = f"alg.mapexport.channelIdentifiers({stack_path})" + >>> substance_painter.js.evaluate(cmd) + + Args: + stack_path (list or str): Path to the stack, could be + "Texture set name" or ["Texture set name", "Stack name"] + + Returns: + str: Stack path usable as argument in javascript query. + + """ + return json.dumps(stack_path) + + +def get_channel_identifiers(stack_path=None): + """Return the list of channel identifiers. + + If a context is passed (texture set/stack), + return only used channels with resolved user channels. + + Channel identifiers are: + basecolor, height, specular, opacity, emissive, displacement, + glossiness, roughness, anisotropylevel, anisotropyangle, transmissive, + scattering, reflection, ior, metallic, normal, ambientOcclusion, + diffuse, specularlevel, blendingmask, [custom user names]. + + Args: + stack_path (list or str, Optional): Path to the stack, could be + "Texture set name" or ["Texture set name", "Stack name"] + + Returns: + list: List of channel identifiers. + + """ + if stack_path is None: + stack_path = "" + else: + stack_path = _convert_stack_path_to_cmd_str(stack_path) + cmd = f"alg.mapexport.channelIdentifiers({stack_path})" + return substance_painter.js.evaluate(cmd) + + +def get_channel_format(stack_path, channel): + """Retrieve the channel format of a specific stack channel. + + See `alg.mapexport.channelFormat` (javascript API) for more details. + + The channel format data is: + "label" (str): The channel format label: could be one of + [sRGB8, L8, RGB8, L16, RGB16, L16F, RGB16F, L32F, RGB32F] + "color" (bool): True if the format is in color, False is grayscale + "floating" (bool): True if the format uses floating point + representation, false otherwise + "bitDepth" (int): Bit per color channel (could be 8, 16 or 32 bpc) + + Arguments: + stack_path (list or str): Path to the stack, could be + "Texture set name" or ["Texture set name", "Stack name"] + channel (str): Identifier of the channel to export + (see `get_channel_identifiers`) + + Returns: + dict: The channel format data. + + """ + stack_path = _convert_stack_path_to_cmd_str(stack_path) + cmd = f"alg.mapexport.channelFormat({stack_path}, '{channel}')" + return substance_painter.js.evaluate(cmd) + + +def get_document_structure(): + """Dump the document structure. + + See `alg.mapexport.documentStructure` (javascript API) for more details. + + Returns: + dict: Document structure or None when no project is open + + """ + return substance_painter.js.evaluate("alg.mapexport.documentStructure()") + + +def get_export_templates(config, format="png", strip_folder=True): + """Return export config outputs. + + This use the Javascript API `alg.mapexport.getPathsExportDocumentMaps` + which returns a different output than using the Python equivalent + `substance_painter.export.list_project_textures(config)`. + + The nice thing about the Javascript API version is that it returns the + output textures grouped by filename template. + + A downside is that it doesn't return all the UDIM tiles but per template + always returns a single file. + + Note: + The file format needs to be explicitly passed to the Javascript API + but upon exporting through the Python API the file format can be based + on the output preset. So it's likely the file extension will mismatch + + Warning: + Even though the function appears to solely get the expected outputs + the Javascript API will actually create the config's texture output + folder if it does not exist yet. As such, a valid path must be set. + + Example output: + { + "DefaultMaterial": { + "$textureSet_BaseColor(_$colorSpace)(.$udim)": "DefaultMaterial_BaseColor_ACES - ACEScg.1002.png", # noqa + "$textureSet_Emissive(_$colorSpace)(.$udim)": "DefaultMaterial_Emissive_ACES - ACEScg.1002.png", # noqa + "$textureSet_Height(_$colorSpace)(.$udim)": "DefaultMaterial_Height_Utility - Raw.1002.png", # noqa + "$textureSet_Metallic(_$colorSpace)(.$udim)": "DefaultMaterial_Metallic_Utility - Raw.1002.png", # noqa + "$textureSet_Normal(_$colorSpace)(.$udim)": "DefaultMaterial_Normal_Utility - Raw.1002.png", # noqa + "$textureSet_Roughness(_$colorSpace)(.$udim)": "DefaultMaterial_Roughness_Utility - Raw.1002.png" # noqa + } + } + + Arguments: + config (dict) Export config + format (str, Optional): Output format to write to, defaults to 'png' + strip_folder (bool, Optional): Whether to strip the output folder + from the output filenames. + + Returns: + dict: The expected output maps. + + """ + folder = config["exportPath"].replace("\\", "/") + preset = config["defaultExportPreset"] + cmd = f'alg.mapexport.getPathsExportDocumentMaps("{preset}", "{folder}", "{format}")' # noqa + result = substance_painter.js.evaluate(cmd) + + if strip_folder: + for _stack, maps in result.items(): + for map_template, map_filepath in maps.items(): + map_filepath = map_filepath.replace("\\", "/") + assert map_filepath.startswith(folder) + map_filename = map_filepath[len(folder):].lstrip("/") + maps[map_template] = map_filename + + return result + + +def _templates_to_regex(templates, + texture_set, + colorspaces, + project, + mesh): + """Return regex based on a Substance Painter expot filename template. + + This converts Substance Painter export filename templates like + `$mesh_$textureSet_BaseColor(_$colorSpace)(.$udim)` into a regex + which can be used to query an output filename to help retrieve: + + - Which template filename the file belongs to. + - Which color space the file is written with. + - Which udim tile it is exactly. + + This is used by `get_parsed_export_maps` which tries to as explicitly + as possible match the filename pattern against the known possible outputs. + That's why Texture Set name, Color spaces, Project path and mesh path must + be provided. By doing so we get the best shot at correctly matching the + right template because otherwise $texture_set could basically be any string + and thus match even that of a color space or mesh. + + Arguments: + templates (list): List of templates to convert to regex. + texture_set (str): The texture set to match against. + colorspaces (list): The colorspaces defined in the current project. + project (str): Filepath of current substance project. + mesh (str): Path to mesh file used in current project. + + Returns: + dict: Template: Template regex pattern + + """ + def _filename_no_ext(path): + return os.path.splitext(os.path.basename(path))[0] + + if colorspaces and any(colorspaces): + colorspace_match = "|".join(re.escape(c) for c in set(colorspaces)) + colorspace_match = f"({colorspace_match})" + else: + # No colorspace support enabled + colorspace_match = "" + + # Key to regex valid search values + key_matches = { + "$project": re.escape(_filename_no_ext(project)), + "$mesh": re.escape(_filename_no_ext(mesh)), + "$textureSet": re.escape(texture_set), + "$colorSpace": colorspace_match, + "$udim": "([0-9]{4})" + } + + # Turn the templates into regexes + regexes = {} + for template in templates: + + # We need to tweak a temp + search_regex = re.escape(template) + + # Let's assume that any ( and ) character in the file template was + # intended as an optional template key and do a simple `str.replace` + # Note: we are matching against re.escape(template) so will need to + # search for the escaped brackets. + search_regex = search_regex.replace(re.escape("("), "(") + search_regex = search_regex.replace(re.escape(")"), ")?") + + # Substitute each key into a named group + for key, key_expected_regex in key_matches.items(): + + # We want to use the template as a regex basis in the end so will + # escape the whole thing first. Note that thus we'll need to + # search for the escaped versions of the keys too. + escaped_key = re.escape(key) + key_label = key[1:] # key without $ prefix + + key_expected_grp_regex = f"(?P<{key_label}>{key_expected_regex})" + search_regex = search_regex.replace(escaped_key, + key_expected_grp_regex) + + # The filename templates don't include the extension so we add it + # to be able to match the out filename beginning to end + ext_regex = r"(?P\.[A-Za-z][A-Za-z0-9-]*)" + search_regex = rf"^{search_regex}{ext_regex}$" + + regexes[template] = search_regex + + return regexes + + +def strip_template(template, strip="._ "): + """Return static characters in a substance painter filename template. + + >>> strip_template("$textureSet_HELLO(.$udim)") + # HELLO + >>> strip_template("$mesh_$textureSet_HELLO_WORLD_$colorSpace(.$udim)") + # HELLO_WORLD + >>> strip_template("$textureSet_HELLO(.$udim)", strip=None) + # _HELLO + >>> strip_template("$mesh_$textureSet_$colorSpace(.$udim)", strip=None) + # _HELLO_ + >>> strip_template("$textureSet_HELLO(.$udim)") + # _HELLO + + Arguments: + template (str): Filename template to strip. + strip (str, optional): Characters to strip from beginning and end + of the static string in template. Defaults to: `._ `. + + Returns: + str: The static string in filename template. + + """ + # Return only characters that were part of the template that were static. + # Remove all keys + keys = ["$project", "$mesh", "$textureSet", "$udim", "$colorSpace"] + stripped_template = template + for key in keys: + stripped_template = stripped_template.replace(key, "") + + # Everything inside an optional bracket space is excluded since it's not + # static. We keep a counter to track whether we are currently iterating + # over parts of the template that are inside an 'optional' group or not. + counter = 0 + result = "" + for char in stripped_template: + if char == "(": + counter += 1 + elif char == ")": + counter -= 1 + if counter < 0: + counter = 0 + else: + if counter == 0: + result += char + + if strip: + # Strip of any trailing start/end characters. Technically these are + # static but usually start and end separators like space or underscore + # aren't wanted. + result = result.strip(strip) + + return result + + +def get_parsed_export_maps(config): + """Return Export Config's expected output textures with parsed data. + + This tries to parse the texture outputs using a Python API export config. + + Parses template keys: $project, $mesh, $textureSet, $colorSpace, $udim + + Example: + {("DefaultMaterial", ""): { + "$mesh_$textureSet_BaseColor(_$colorSpace)(.$udim)": [ + { + // OUTPUT DATA FOR FILE #1 OF THE TEMPLATE + }, + { + // OUTPUT DATA FOR FILE #2 OF THE TEMPLATE + }, + ] + }, + }} + + File output data (all outputs are `str`). + 1) Parsed tokens: These are parsed tokens from the template, they will + only exist if found in the filename template and output filename. + + project: Workfile filename without extension + mesh: Filename of the loaded mesh without extension + textureSet: The texture set, e.g. "DefaultMaterial", + colorSpace: The color space, e.g. "ACES - ACEScg", + udim: The udim tile, e.g. "1001" + + 2) Template output and filepath + + filepath: Full path to the resulting texture map, e.g. + "/path/to/mesh_DefaultMaterial_BaseColor_ACES - ACEScg.1002.png", + output: "mesh_DefaultMaterial_BaseColor_ACES - ACEScg.1002.png" + Note: if template had slashes (folders) then `output` will too. + So `output` might include a folder. + + Returns: + dict: [texture_set, stack]: {template: [file1_data, file2_data]} + + """ + # Import is here to avoid recursive lib <-> colorspace imports + from .colorspace import get_project_channel_data + + outputs = substance_painter.export.list_project_textures(config) + templates = get_export_templates(config, strip_folder=False) + + # Get all color spaces set for the current project + project_colorspaces = set( + data["colorSpace"] for data in get_project_channel_data().values() + ) + + # Get current project mesh path and project path to explicitly match + # the $mesh and $project tokens + project_mesh_path = substance_painter.project.last_imported_mesh_path() + project_path = substance_painter.project.file_path() + + # Get the current export path to strip this of the beginning of filepath + # results, since filename templates don't have these we'll match without + # that part of the filename. + export_path = config["exportPath"] + export_path = export_path.replace("\\", "/") + if not export_path.endswith("/"): + export_path += "/" + + # Parse the outputs + result = {} + for key, filepaths in outputs.items(): + texture_set, stack = key + + if stack: + stack_path = f"{texture_set}/{stack}" + else: + stack_path = texture_set + + stack_templates = list(templates[stack_path].keys()) + + template_regex = _templates_to_regex(stack_templates, + texture_set=texture_set, + colorspaces=project_colorspaces, + mesh=project_mesh_path, + project=project_path) + + # Let's precompile the regexes + for template, regex in template_regex.items(): + template_regex[template] = re.compile(regex) + + stack_results = defaultdict(list) + for filepath in sorted(filepaths): + # We strip explicitly using the full parent export path instead of + # using `os.path.basename` because export template is allowed to + # have subfolders in its template which we want to match against + filepath = filepath.replace("\\", "/") + assert filepath.startswith(export_path), ( + f"Filepath {filepath} must start with folder {export_path}" + ) + filename = filepath[len(export_path):] + + for template, regex in template_regex.items(): + match = regex.match(filename) + if match: + parsed = match.groupdict(default={}) + + # Include some special outputs for convenience + parsed["filepath"] = filepath + parsed["output"] = filename + + stack_results[template].append(parsed) + break + else: + raise ValueError(f"Unable to match {filename} against any " + f"template in: {list(template_regex.keys())}") + + result[key] = dict(stack_results) + + return result + + +def load_shelf(path, name=None): + """Add shelf to substance painter (for current application session) + + This will dynamically add a Shelf for the current session. It's good + to note however that these will *not* persist on restart of the host. + + Note: + Consider the loaded shelf a static library of resources. + + The shelf will *not* be visible in application preferences in + Edit > Settings > Libraries. + + The shelf will *not* show in the Assets browser if it has no existing + assets + + The shelf will *not* be a selectable option for selecting it as a + destination to import resources too. + + """ + + # Ensure expanded path with forward slashes + path = os.path.expandvars(path) + path = os.path.abspath(path) + path = path.replace("\\", "/") + + # Path must exist + if not os.path.isdir(path): + raise ValueError(f"Path is not an existing folder: {path}") + + # This name must be unique and must only contain lowercase letters, + # numbers, underscores or hyphens. + if name is None: + name = os.path.basename(path) + + name = name.lower() + name = re.sub(r"[^a-z0-9_\-]", "_", name) # sanitize to underscores + + if substance_painter.resource.Shelves.exists(name): + shelf = next( + shelf for shelf in substance_painter.resource.Shelves.all() + if shelf.name() == name + ) + if os.path.normpath(shelf.path()) != os.path.normpath(path): + raise ValueError(f"Shelf with name '{name}' already exists " + f"for a different path: '{shelf.path()}") + + return + + print(f"Adding Shelf '{name}' to path: {path}") + substance_painter.resource.Shelves.add(name, path) + + return name + + +def _get_new_project_action(): + """Return QAction which triggers Substance Painter's new project dialog""" + + main_window = substance_painter.ui.get_main_window() + + # Find the file menu's New file action + menubar = main_window.menuBar() + new_action = None + for action in menubar.actions(): + menu = action.menu() + if not menu: + continue + + if menu.objectName() != "file": + continue + + # Find the action with the CTRL+N key sequence + new_action = next(action for action in menu.actions() + if action.shortcut() == QtGui.QKeySequence.New) + break + + return new_action + + +def prompt_new_file_with_mesh(mesh_filepath): + """Prompts the user for a new file using Substance Painter's own dialog. + + This will set the mesh path to load to the given mesh and disables the + dialog box to disallow the user to change the path. This way we can allow + user configuration of a project but set the mesh path ourselves. + + Warning: + This is very hacky and experimental. + + Note: + If a project is currently open using the same mesh filepath it can't + accurately detect whether the user had actually accepted the new project + dialog or whether the project afterwards is still the original project, + for example when the user might have cancelled the operation. + + """ + + app = QtWidgets.QApplication.instance() + assert os.path.isfile(mesh_filepath), \ + f"Mesh filepath does not exist: {mesh_filepath}" + + def _setup_file_dialog(): + """Set filepath in QFileDialog and trigger accept result""" + file_dialog = app.activeModalWidget() + assert isinstance(file_dialog, QtWidgets.QFileDialog) + + # Quickly hide the dialog + file_dialog.hide() + app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000) + + file_dialog.setDirectory(os.path.dirname(mesh_filepath)) + url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath)) + file_dialog.selectUrl(url) + + # Give the explorer window time to refresh to the folder and select + # the file + while not file_dialog.selectedFiles(): + app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000) + print(f"Selected: {file_dialog.selectedFiles()}") + + # Set it again now we know the path is refreshed - without this + # accepting the dialog will often not trigger the correct filepath + file_dialog.setDirectory(os.path.dirname(mesh_filepath)) + url = QtCore.QUrl.fromLocalFile(os.path.basename(mesh_filepath)) + file_dialog.selectUrl(url) + + file_dialog.done(file_dialog.Accepted) + app.processEvents(QtCore.QEventLoop.AllEvents) + + def _setup_prompt(): + app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents) + dialog = app.activeModalWidget() + assert dialog.objectName() == "NewProjectDialog" + + # Set the window title + mesh = os.path.basename(mesh_filepath) + dialog.setWindowTitle(f"New Project with mesh: {mesh}") + + # Get the select mesh file button + mesh_select = dialog.findChild(QtWidgets.QPushButton, "meshSelect") + + # Hide the select mesh button to the user to block changing of mesh + mesh_select.setVisible(False) + + # Ensure UI is visually up-to-date + app.processEvents(QtCore.QEventLoop.ExcludeUserInputEvents) + + # Trigger the 'select file' dialog to set the path and have the + # new file dialog to use the path. + QtCore.QTimer.singleShot(10, _setup_file_dialog) + mesh_select.click() + + app.processEvents(QtCore.QEventLoop.AllEvents, 5000) + + mesh_filename = dialog.findChild(QtWidgets.QFrame, "meshFileName") + mesh_filename_label = mesh_filename.findChild(QtWidgets.QLabel) + if not mesh_filename_label.text(): + dialog.close() + raise RuntimeError(f"Failed to set mesh path: {mesh_filepath}") + + new_action = _get_new_project_action() + if not new_action: + raise RuntimeError("Unable to detect new file action..") + + QtCore.QTimer.singleShot(0, _setup_prompt) + new_action.trigger() + app.processEvents(QtCore.QEventLoop.AllEvents, 5000) + + if not substance_painter.project.is_open(): + return + + # Confirm mesh was set as expected + project_mesh = substance_painter.project.last_imported_mesh_path() + if os.path.normpath(project_mesh) != os.path.normpath(mesh_filepath): + return + + return project_mesh diff --git a/openpype/hosts/substancepainter/api/pipeline.py b/openpype/hosts/substancepainter/api/pipeline.py new file mode 100644 index 0000000000..9406fb8edb --- /dev/null +++ b/openpype/hosts/substancepainter/api/pipeline.py @@ -0,0 +1,427 @@ +# -*- coding: utf-8 -*- +"""Pipeline tools for OpenPype Substance Painter integration.""" +import os +import logging +from functools import partial + +# Substance 3D Painter modules +import substance_painter.ui +import substance_painter.event +import substance_painter.project + +import pyblish.api + +from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost +from openpype.settings import ( + get_current_project_settings, + get_system_settings +) + +from openpype.pipeline.template_data import get_template_data_with_names +from openpype.pipeline import ( + register_creator_plugin_path, + register_loader_plugin_path, + AVALON_CONTAINER_ID, + Anatomy +) +from openpype.lib import ( + StringTemplate, + register_event_callback, + emit_event, +) +from openpype.pipeline.load import any_outdated_containers +from openpype.hosts.substancepainter import SUBSTANCE_HOST_DIR + +from . import lib + +log = logging.getLogger("openpype.hosts.substance") + +PLUGINS_DIR = os.path.join(SUBSTANCE_HOST_DIR, "plugins") +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "create") +INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") + +OPENPYPE_METADATA_KEY = "OpenPype" +OPENPYPE_METADATA_CONTAINERS_KEY = "containers" # child key +OPENPYPE_METADATA_CONTEXT_KEY = "context" # child key +OPENPYPE_METADATA_INSTANCES_KEY = "instances" # child key + + +class SubstanceHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "substancepainter" + + def __init__(self): + super(SubstanceHost, self).__init__() + self._has_been_setup = False + self.menu = None + self.callbacks = [] + self.shelves = [] + + def install(self): + pyblish.api.register_host("substancepainter") + + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + + log.info("Installing callbacks ... ") + # register_event_callback("init", on_init) + self._register_callbacks() + # register_event_callback("before.save", before_save) + # register_event_callback("save", on_save) + register_event_callback("open", on_open) + # register_event_callback("new", on_new) + + log.info("Installing menu ... ") + self._install_menu() + + project_settings = get_current_project_settings() + self._install_shelves(project_settings) + + self._has_been_setup = True + + def uninstall(self): + self._uninstall_shelves() + self._uninstall_menu() + self._deregister_callbacks() + + def has_unsaved_changes(self): + + if not substance_painter.project.is_open(): + return False + + return substance_painter.project.needs_saving() + + def get_workfile_extensions(self): + return [".spp", ".toc"] + + def save_workfile(self, dst_path=None): + + if not substance_painter.project.is_open(): + return False + + if not dst_path: + dst_path = self.get_current_workfile() + + full_save_mode = substance_painter.project.ProjectSaveMode.Full + substance_painter.project.save_as(dst_path, full_save_mode) + + return dst_path + + def open_workfile(self, filepath): + + if not os.path.exists(filepath): + raise RuntimeError("File does not exist: {}".format(filepath)) + + # We must first explicitly close current project before opening another + if substance_painter.project.is_open(): + substance_painter.project.close() + + substance_painter.project.open(filepath) + return filepath + + def get_current_workfile(self): + if not substance_painter.project.is_open(): + return None + + filepath = substance_painter.project.file_path() + if filepath and filepath.endswith(".spt"): + # When currently in a Substance Painter template assume our + # scene isn't saved. This can be the case directly after doing + # "New project", the path will then be the template used. This + # avoids Workfiles tool trying to save as .spt extension if the + # file hasn't been saved before. + return + + return filepath + + def get_containers(self): + + if not substance_painter.project.is_open(): + return + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) + if containers: + for key, container in containers.items(): + container["objectName"] = key + yield container + + def update_context_data(self, data, changes): + + if not substance_painter.project.is_open(): + return + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + metadata.set(OPENPYPE_METADATA_CONTEXT_KEY, data) + + def get_context_data(self): + + if not substance_painter.project.is_open(): + return + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + return metadata.get(OPENPYPE_METADATA_CONTEXT_KEY) or {} + + def _install_menu(self): + from PySide2 import QtWidgets + from openpype.tools.utils import host_tools + + parent = substance_painter.ui.get_main_window() + + menu = QtWidgets.QMenu("OpenPype") + + action = menu.addAction("Create...") + action.triggered.connect( + lambda: host_tools.show_publisher(parent=parent, + tab="create") + ) + + action = menu.addAction("Load...") + action.triggered.connect( + lambda: host_tools.show_loader(parent=parent, use_context=True) + ) + + action = menu.addAction("Publish...") + action.triggered.connect( + lambda: host_tools.show_publisher(parent=parent, + tab="publish") + ) + + action = menu.addAction("Manage...") + action.triggered.connect( + lambda: host_tools.show_scene_inventory(parent=parent) + ) + + action = menu.addAction("Library...") + action.triggered.connect( + lambda: host_tools.show_library_loader(parent=parent) + ) + + menu.addSeparator() + action = menu.addAction("Work Files...") + action.triggered.connect( + lambda: host_tools.show_workfiles(parent=parent) + ) + + substance_painter.ui.add_menu(menu) + + def on_menu_destroyed(): + self.menu = None + + menu.destroyed.connect(on_menu_destroyed) + + self.menu = menu + + def _uninstall_menu(self): + if self.menu: + self.menu.destroy() + self.menu = None + + def _register_callbacks(self): + # Prepare emit event callbacks + open_callback = partial(emit_event, "open") + + # Connect to the Substance Painter events + dispatcher = substance_painter.event.DISPATCHER + for event, callback in [ + (substance_painter.event.ProjectOpened, open_callback) + ]: + dispatcher.connect(event, callback) + # Keep a reference so we can deregister if needed + self.callbacks.append((event, callback)) + + def _deregister_callbacks(self): + for event, callback in self.callbacks: + substance_painter.event.DISPATCHER.disconnect(event, callback) + self.callbacks.clear() + + def _install_shelves(self, project_settings): + + shelves = project_settings["substancepainter"].get("shelves", {}) + if not shelves: + return + + # Prepare formatting data if we detect any path which might have + # template tokens like {asset} in there. + formatting_data = {} + has_formatting_entries = any("{" in path for path in shelves.values()) + if has_formatting_entries: + project_name = self.get_current_project_name() + asset_name = self.get_current_asset_name() + task_name = self.get_current_asset_name() + system_settings = get_system_settings() + formatting_data = get_template_data_with_names(project_name, + asset_name, + task_name, + system_settings) + anatomy = Anatomy(project_name) + formatting_data["root"] = anatomy.roots + + for name, path in shelves.items(): + shelf_name = None + + # Allow formatting with anatomy for the paths + if "{" in path: + path = StringTemplate.format_template(path, formatting_data) + + try: + shelf_name = lib.load_shelf(path, name=name) + except ValueError as exc: + print(f"Failed to load shelf -> {exc}") + + if shelf_name: + self.shelves.append(shelf_name) + + def _uninstall_shelves(self): + for shelf_name in self.shelves: + substance_painter.resource.Shelves.remove(shelf_name) + self.shelves.clear() + + +def on_open(): + log.info("Running callback on open..") + + if any_outdated_containers(): + from openpype.widgets import popup + + log.warning("Scene has outdated content.") + + # Get main window + parent = substance_painter.ui.get_main_window() + if parent is None: + log.info("Skipping outdated content pop-up " + "because Substance window can't be found.") + else: + + # Show outdated pop-up + def _on_show_inventory(): + from openpype.tools.utils import host_tools + host_tools.show_scene_inventory(parent=parent) + + dialog = popup.Popup(parent=parent) + dialog.setWindowTitle("Substance scene has outdated content") + dialog.setMessage("There are outdated containers in " + "your Substance scene.") + dialog.on_clicked.connect(_on_show_inventory) + dialog.show() + + +def imprint_container(container, + name, + namespace, + context, + loader): + """Imprint a loaded container with metadata. + + Containerisation enables a tracking of version, author and origin + for loaded assets. + + Arguments: + container (dict): The (substance metadata) dictionary to imprint into. + name (str): Name of resulting assembly + namespace (str): Namespace under which to host container + context (dict): Asset information + loader (load.LoaderPlugin): loader instance used to produce container. + + Returns: + None + + """ + + data = [ + ("schema", "openpype:container-2.0"), + ("id", AVALON_CONTAINER_ID), + ("name", str(name)), + ("namespace", str(namespace) if namespace else None), + ("loader", str(loader.__class__.__name__)), + ("representation", str(context["representation"]["_id"])), + ] + for key, value in data: + container[key] = value + + +def set_container_metadata(object_name, container_data, update=False): + """Helper method to directly set the data for a specific container + + Args: + object_name (str): The unique object name identifier for the container + container_data (dict): The data for the container. + Note 'objectName' data is derived from `object_name` and key in + `container_data` will be ignored. + update (bool): Whether to only update the dict data. + + """ + # The objectName is derived from the key in the metadata so won't be stored + # in the metadata in the container's data. + container_data.pop("objectName", None) + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) or {} + if update: + existing_data = containers.setdefault(object_name, {}) + existing_data.update(container_data) # mutable dict, in-place update + else: + containers[object_name] = container_data + metadata.set("containers", containers) + + +def remove_container_metadata(object_name): + """Helper method to remove the data for a specific container""" + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + containers = metadata.get(OPENPYPE_METADATA_CONTAINERS_KEY) + if containers: + containers.pop(object_name, None) + metadata.set("containers", containers) + + +def set_instance(instance_id, instance_data, update=False): + """Helper method to directly set the data for a specific container + + Args: + instance_id (str): Unique identifier for the instance + instance_data (dict): The instance data to store in the metaadata. + """ + set_instances({instance_id: instance_data}, update=update) + + +def set_instances(instance_data_by_id, update=False): + """Store data for multiple instances at the same time. + + This is more optimal than querying and setting them in the metadata one + by one. + """ + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} + + for instance_id, instance_data in instance_data_by_id.items(): + if update: + existing_data = instances.get(instance_id, {}) + existing_data.update(instance_data) + else: + instances[instance_id] = instance_data + + metadata.set("instances", instances) + + +def remove_instance(instance_id): + """Helper method to remove the data for a specific container""" + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + instances = metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} + instances.pop(instance_id, None) + metadata.set("instances", instances) + + +def get_instances_by_id(): + """Return all instances stored in the project instances metadata""" + if not substance_painter.project.is_open(): + return {} + + metadata = substance_painter.project.Metadata(OPENPYPE_METADATA_KEY) + return metadata.get(OPENPYPE_METADATA_INSTANCES_KEY) or {} + + +def get_instances(): + """Return all instances stored in the project instances as a list""" + return list(get_instances_by_id().values()) diff --git a/openpype/hosts/substancepainter/deploy/plugins/openpype_plugin.py b/openpype/hosts/substancepainter/deploy/plugins/openpype_plugin.py new file mode 100644 index 0000000000..e7e1849546 --- /dev/null +++ b/openpype/hosts/substancepainter/deploy/plugins/openpype_plugin.py @@ -0,0 +1,36 @@ + + +def cleanup_openpype_qt_widgets(): + """ + Workaround for Substance failing to shut down correctly + when a Qt window was still open at the time of shutting down. + + This seems to work sometimes, but not all the time. + + """ + # TODO: Create a more reliable method to close down all OpenPype Qt widgets + from PySide2 import QtWidgets + import substance_painter.ui + + # Kill OpenPype Qt widgets + print("Killing OpenPype Qt widgets..") + for widget in QtWidgets.QApplication.topLevelWidgets(): + if widget.__module__.startswith("openpype."): + print(f"Deleting widget: {widget.__class__.__name__}") + substance_painter.ui.delete_ui_element(widget) + + +def start_plugin(): + from openpype.pipeline import install_host + from openpype.hosts.substancepainter.api import SubstanceHost + install_host(SubstanceHost()) + + +def close_plugin(): + from openpype.pipeline import uninstall_host + cleanup_openpype_qt_widgets() + uninstall_host() + + +if __name__ == "__main__": + start_plugin() diff --git a/openpype/hosts/substancepainter/deploy/startup/openpype_load_on_first_run.py b/openpype/hosts/substancepainter/deploy/startup/openpype_load_on_first_run.py new file mode 100644 index 0000000000..04b610b4df --- /dev/null +++ b/openpype/hosts/substancepainter/deploy/startup/openpype_load_on_first_run.py @@ -0,0 +1,43 @@ +"""Ease the OpenPype on-boarding process by loading the plug-in on first run""" + +OPENPYPE_PLUGIN_NAME = "openpype_plugin" + + +def start_plugin(): + try: + # This isn't exposed in the official API so we keep it in a try-except + from painter_plugins_ui import ( + get_settings, + LAUNCH_AT_START_KEY, + ON_STATE, + PLUGINS_MENU, + plugin_manager + ) + + # The `painter_plugins_ui` plug-in itself is also a startup plug-in + # we need to take into account that it could run either earlier or + # later than this startup script, we check whether its menu initialized + is_before_plugins_menu = PLUGINS_MENU is None + + settings = get_settings(OPENPYPE_PLUGIN_NAME) + if settings.value(LAUNCH_AT_START_KEY, None) is None: + print("Initializing OpenPype plug-in on first run...") + if is_before_plugins_menu: + print("- running before 'painter_plugins_ui'") + # Delay the launch to the painter_plugins_ui initialization + settings.setValue(LAUNCH_AT_START_KEY, ON_STATE) + else: + # Launch now + print("- running after 'painter_plugins_ui'") + plugin_manager(OPENPYPE_PLUGIN_NAME)(True) + + # Set the checked state in the menu to avoid confusion + action = next(action for action in PLUGINS_MENU._menu.actions() + if action.text() == OPENPYPE_PLUGIN_NAME) + if action is not None: + action.blockSignals(True) + action.setChecked(True) + action.blockSignals(False) + + except Exception as exc: + print(exc) diff --git a/openpype/hosts/substancepainter/plugins/create/create_textures.py b/openpype/hosts/substancepainter/plugins/create/create_textures.py new file mode 100644 index 0000000000..dece4b2cc1 --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/create/create_textures.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating textures.""" + +from openpype.pipeline import CreatedInstance, Creator, CreatorError +from openpype.lib import ( + EnumDef, + UILabelDef, + NumberDef, + BoolDef +) + +from openpype.hosts.substancepainter.api.pipeline import ( + get_instances, + set_instance, + set_instances, + remove_instance +) +from openpype.hosts.substancepainter.api.lib import get_export_presets + +import substance_painter.project + + +class CreateTextures(Creator): + """Create a texture set.""" + identifier = "io.openpype.creators.substancepainter.textureset" + label = "Textures" + family = "textureSet" + icon = "picture-o" + + default_variant = "Main" + + def create(self, subset_name, instance_data, pre_create_data): + + if not substance_painter.project.is_open(): + raise CreatorError("Can't create a Texture Set instance without " + "an open project.") + + instance = self.create_instance_in_context(subset_name, + instance_data) + set_instance( + instance_id=instance["instance_id"], + instance_data=instance.data_to_store() + ) + + def collect_instances(self): + for instance in get_instances(): + if (instance.get("creator_identifier") == self.identifier or + instance.get("family") == self.family): + self.create_instance_in_context_from_existing(instance) + + def update_instances(self, update_list): + instance_data_by_id = {} + for instance, _changes in update_list: + # Persist the data + instance_id = instance.get("instance_id") + instance_data = instance.data_to_store() + instance_data_by_id[instance_id] = instance_data + set_instances(instance_data_by_id, update=True) + + def remove_instances(self, instances): + for instance in instances: + remove_instance(instance["instance_id"]) + self._remove_instance_from_context(instance) + + # Helper methods (this might get moved into Creator class) + def create_instance_in_context(self, subset_name, data): + instance = CreatedInstance( + self.family, subset_name, data, self + ) + self.create_context.creator_adds_instance(instance) + return instance + + def create_instance_in_context_from_existing(self, data): + instance = CreatedInstance.from_existing(data, self) + self.create_context.creator_adds_instance(instance) + return instance + + def get_instance_attr_defs(self): + + return [ + EnumDef("exportPresetUrl", + items=get_export_presets(), + label="Output Template"), + BoolDef("allowSkippedMaps", + label="Allow Skipped Output Maps", + tooltip="When enabled this allows the publish to ignore " + "output maps in the used output template if one " + "or more maps are skipped due to the required " + "channels not being present in the current file.", + default=True), + EnumDef("exportFileFormat", + items={ + None: "Based on output template", + # TODO: Get available extensions from substance API + "bmp": "bmp", + "ico": "ico", + "jpeg": "jpeg", + "jng": "jng", + "pbm": "pbm", + "pgm": "pgm", + "png": "png", + "ppm": "ppm", + "tga": "targa", + "tif": "tiff", + "wap": "wap", + "wbmp": "wbmp", + "xpm": "xpm", + "gif": "gif", + "hdr": "hdr", + "exr": "exr", + "j2k": "j2k", + "jp2": "jp2", + "pfm": "pfm", + "webp": "webp", + # TODO: Unsure why jxr format fails to export + # "jxr": "jpeg-xr", + # TODO: File formats that combine the exported textures + # like psd are not correctly supported due to + # publishing only a single file + # "psd": "psd", + # "sbsar": "sbsar", + }, + default=None, + label="File type"), + EnumDef("exportSize", + items={ + None: "Based on each Texture Set's size", + # The key is size of the texture file in log2. + # (i.e. 10 means 2^10 = 1024) + 7: "128", + 8: "256", + 9: "512", + 10: "1024", + 11: "2048", + 12: "4096" + }, + default=None, + label="Size"), + + EnumDef("exportPadding", + items={ + "passthrough": "No padding (passthrough)", + "infinite": "Dilation infinite", + "transparent": "Dilation + transparent", + "color": "Dilation + default background color", + "diffusion": "Dilation + diffusion" + }, + default="infinite", + label="Padding"), + NumberDef("exportDilationDistance", + minimum=0, + maximum=256, + decimals=0, + default=16, + label="Dilation Distance"), + UILabelDef("*only used with " + "'Dilation + ' padding"), + ] + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return self.get_instance_attr_defs() diff --git a/openpype/hosts/substancepainter/plugins/create/create_workfile.py b/openpype/hosts/substancepainter/plugins/create/create_workfile.py new file mode 100644 index 0000000000..d7f31f9dcf --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/create/create_workfile.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +"""Creator plugin for creating workfiles.""" + +from openpype.pipeline import CreatedInstance, AutoCreator +from openpype.client import get_asset_by_name + +from openpype.hosts.substancepainter.api.pipeline import ( + set_instances, + set_instance, + get_instances +) + +import substance_painter.project + + +class CreateWorkfile(AutoCreator): + """Workfile auto-creator.""" + identifier = "io.openpype.creators.substancepainter.workfile" + label = "Workfile" + family = "workfile" + icon = "document" + + default_variant = "Main" + + def create(self): + + if not substance_painter.project.is_open(): + return + + variant = self.default_variant + project_name = self.project_name + asset_name = self.create_context.get_current_asset_name() + task_name = self.create_context.get_current_task_name() + host_name = self.create_context.host_name + + # Workfile instance should always exist and must only exist once. + # As such we'll first check if it already exists and is collected. + current_instance = next( + ( + instance for instance in self.create_context.instances + if instance.creator_identifier == self.identifier + ), None) + + if current_instance is None: + self.log.info("Auto-creating workfile instance...") + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + current_instance = self.create_instance_in_context(subset_name, + data) + elif ( + current_instance["asset"] != asset_name + or current_instance["task"] != task_name + ): + # Update instance context if is not the same + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + current_instance["asset"] = asset_name + current_instance["task"] = task_name + current_instance["subset"] = subset_name + + set_instance( + instance_id=current_instance.get("instance_id"), + instance_data=current_instance.data_to_store() + ) + + def collect_instances(self): + for instance in get_instances(): + if (instance.get("creator_identifier") == self.identifier or + instance.get("family") == self.family): + self.create_instance_in_context_from_existing(instance) + + def update_instances(self, update_list): + instance_data_by_id = {} + for instance, _changes in update_list: + # Persist the data + instance_id = instance.get("instance_id") + instance_data = instance.data_to_store() + instance_data_by_id[instance_id] = instance_data + set_instances(instance_data_by_id, update=True) + + # Helper methods (this might get moved into Creator class) + def create_instance_in_context(self, subset_name, data): + instance = CreatedInstance( + self.family, subset_name, data, self + ) + self.create_context.creator_adds_instance(instance) + return instance + + def create_instance_in_context_from_existing(self, data): + instance = CreatedInstance.from_existing(data, self) + self.create_context.creator_adds_instance(instance) + return instance diff --git a/openpype/hosts/substancepainter/plugins/load/load_mesh.py b/openpype/hosts/substancepainter/plugins/load/load_mesh.py new file mode 100644 index 0000000000..822095641d --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/load/load_mesh.py @@ -0,0 +1,124 @@ +from openpype.pipeline import ( + load, + get_representation_path, +) +from openpype.pipeline.load import LoadError +from openpype.hosts.substancepainter.api.pipeline import ( + imprint_container, + set_container_metadata, + remove_container_metadata +) +from openpype.hosts.substancepainter.api.lib import prompt_new_file_with_mesh + +import substance_painter.project +import qargparse + + +class SubstanceLoadProjectMesh(load.LoaderPlugin): + """Load mesh for project""" + + families = ["*"] + representations = ["abc", "fbx", "obj", "gltf"] + + label = "Load mesh" + order = -10 + icon = "code-fork" + color = "orange" + + options = [ + qargparse.Boolean( + "preserve_strokes", + default=True, + help="Preserve strokes positions on mesh.\n" + "(only relevant when loading into existing project)" + ), + qargparse.Boolean( + "import_cameras", + default=True, + help="Import cameras from the mesh file." + ) + ] + + def load(self, context, name, namespace, data): + + # Get user inputs + import_cameras = data.get("import_cameras", True) + preserve_strokes = data.get("preserve_strokes", True) + + if not substance_painter.project.is_open(): + # Allow to 'initialize' a new project + result = prompt_new_file_with_mesh(mesh_filepath=self.fname) + if not result: + self.log.info("User cancelled new project prompt.") + return + + else: + # Reload the mesh + settings = substance_painter.project.MeshReloadingSettings( + import_cameras=import_cameras, + preserve_strokes=preserve_strokes + ) + + def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): # noqa + if status == substance_painter.project.ReloadMeshStatus.SUCCESS: # noqa + self.log.info("Reload succeeded") + else: + raise LoadError("Reload of mesh failed") + + path = self.fname + substance_painter.project.reload_mesh(path, + settings, + on_mesh_reload) + + # Store container + container = {} + project_mesh_object_name = "_ProjectMesh_" + imprint_container(container, + name=project_mesh_object_name, + namespace=project_mesh_object_name, + context=context, + loader=self) + + # We want store some options for updating to keep consistent behavior + # from the user's original choice. We don't store 'preserve_strokes' + # as we always preserve strokes on updates. + container["options"] = { + "import_cameras": import_cameras, + } + + set_container_metadata(project_mesh_object_name, container) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + + path = get_representation_path(representation) + + # Reload the mesh + container_options = container.get("options", {}) + settings = substance_painter.project.MeshReloadingSettings( + import_cameras=container_options.get("import_cameras", True), + preserve_strokes=True + ) + + def on_mesh_reload(status: substance_painter.project.ReloadMeshStatus): + if status == substance_painter.project.ReloadMeshStatus.SUCCESS: + self.log.info("Reload succeeded") + else: + raise LoadError("Reload of mesh failed") + + substance_painter.project.reload_mesh(path, settings, on_mesh_reload) + + # Update container representation + object_name = container["objectName"] + update_data = {"representation": str(representation["_id"])} + set_container_metadata(object_name, update_data, update=True) + + def remove(self, container): + + # Remove OpenPype related settings about what model was loaded + # or close the project? + # TODO: This is likely best 'hidden' away to the user because + # this will leave the project's mesh unmanaged. + remove_container_metadata(container["objectName"]) diff --git a/openpype/hosts/substancepainter/plugins/publish/collect_current_file.py b/openpype/hosts/substancepainter/plugins/publish/collect_current_file.py new file mode 100644 index 0000000000..9a37eb0d1c --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/publish/collect_current_file.py @@ -0,0 +1,17 @@ +import pyblish.api + +from openpype.pipeline import registered_host + + +class CollectCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + order = pyblish.api.CollectorOrder - 0.49 + label = "Current Workfile" + hosts = ["substancepainter"] + + def process(self, context): + host = registered_host() + path = host.get_current_workfile() + context.data["currentFile"] = path + self.log.debug(f"Current workfile: {path}") diff --git a/openpype/hosts/substancepainter/plugins/publish/collect_textureset_images.py b/openpype/hosts/substancepainter/plugins/publish/collect_textureset_images.py new file mode 100644 index 0000000000..d11abd1019 --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/publish/collect_textureset_images.py @@ -0,0 +1,196 @@ +import os +import copy +import pyblish.api + +from openpype.pipeline import publish + +import substance_painter.textureset +from openpype.hosts.substancepainter.api.lib import ( + get_parsed_export_maps, + strip_template +) +from openpype.pipeline.create import get_subset_name +from openpype.client import get_asset_by_name + + +class CollectTextureSet(pyblish.api.InstancePlugin): + """Extract Textures using an output template config""" + # TODO: Production-test usage of color spaces + # TODO: Detect what source data channels end up in each file + + label = "Collect Texture Set images" + hosts = ["substancepainter"] + families = ["textureSet"] + order = pyblish.api.CollectorOrder + + def process(self, instance): + + config = self.get_export_config(instance) + asset_doc = get_asset_by_name( + project_name=instance.context.data["projectName"], + asset_name=instance.data["asset"] + ) + + instance.data["exportConfig"] = config + maps = get_parsed_export_maps(config) + + # Let's break the instance into multiple instances to integrate + # a subset per generated texture or texture UDIM sequence + for (texture_set_name, stack_name), template_maps in maps.items(): + self.log.info(f"Processing {texture_set_name}/{stack_name}") + for template, outputs in template_maps.items(): + self.log.info(f"Processing {template}") + self.create_image_instance(instance, template, outputs, + asset_doc=asset_doc, + texture_set_name=texture_set_name, + stack_name=stack_name) + + def create_image_instance(self, instance, template, outputs, + asset_doc, texture_set_name, stack_name): + """Create a new instance per image or UDIM sequence. + + The new instances will be of family `image`. + + """ + + context = instance.context + first_filepath = outputs[0]["filepath"] + fnames = [os.path.basename(output["filepath"]) for output in outputs] + ext = os.path.splitext(first_filepath)[1] + assert ext.lstrip("."), f"No extension: {ext}" + + always_include_texture_set_name = False # todo: make this configurable + all_texture_sets = substance_painter.textureset.all_texture_sets() + texture_set = substance_painter.textureset.TextureSet.from_name( + texture_set_name + ) + + # Define the suffix we want to give this particular texture + # set and set up a remapped subset naming for it. + suffix = "" + if always_include_texture_set_name or len(all_texture_sets) > 1: + # More than one texture set, include texture set name + suffix += f".{texture_set_name}" + if texture_set.is_layered_material() and stack_name: + # More than one stack, include stack name + suffix += f".{stack_name}" + + # Always include the map identifier + map_identifier = strip_template(template) + suffix += f".{map_identifier}" + + image_subset = get_subset_name( + # TODO: The family actually isn't 'texture' currently but for now + # this is only done so the subset name starts with 'texture' + family="texture", + variant=instance.data["variant"] + suffix, + task_name=instance.data.get("task"), + asset_doc=asset_doc, + project_name=context.data["projectName"], + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] + ) + + # Prepare representation + representation = { + "name": ext.lstrip("."), + "ext": ext.lstrip("."), + "files": fnames if len(fnames) > 1 else fnames[0], + } + + # Mark as UDIM explicitly if it has UDIM tiles. + if bool(outputs[0].get("udim")): + # The representation for a UDIM sequence should have a `udim` key + # that is a list of all udim tiles (str) like: ["1001", "1002"] + # strings. See CollectTextures plug-in and Integrators. + representation["udim"] = [output["udim"] for output in outputs] + + # Set up the representation for thumbnail generation + # TODO: Simplify this once thumbnail extraction is refactored + staging_dir = os.path.dirname(first_filepath) + representation["tags"] = ["review"] + representation["stagingDir"] = staging_dir + + # Clone the instance + image_instance = context.create_instance(image_subset) + image_instance[:] = instance[:] + image_instance.data.update(copy.deepcopy(instance.data)) + image_instance.data["name"] = image_subset + image_instance.data["label"] = image_subset + image_instance.data["subset"] = image_subset + image_instance.data["family"] = "image" + image_instance.data["families"] = ["image", "textures"] + image_instance.data["representations"] = [representation] + + # Group the textures together in the loader + image_instance.data["subsetGroup"] = instance.data["subset"] + + # Store the texture set name and stack name on the instance + image_instance.data["textureSetName"] = texture_set_name + image_instance.data["textureStackName"] = stack_name + + # Store color space with the instance + # Note: The extractor will assign it to the representation + colorspace = outputs[0].get("colorSpace") + if colorspace: + self.log.debug(f"{image_subset} colorspace: {colorspace}") + image_instance.data["colorspace"] = colorspace + + # Store the instance in the original instance as a member + instance.append(image_instance) + + def get_export_config(self, instance): + """Return an export configuration dict for texture exports. + + This config can be supplied to: + - `substance_painter.export.export_project_textures` + - `substance_painter.export.list_project_textures` + + See documentation on substance_painter.export module about the + formatting of the configuration dictionary. + + Args: + instance (pyblish.api.Instance): Texture Set instance to be + published. + + Returns: + dict: Export config + + """ + + creator_attrs = instance.data["creator_attributes"] + preset_url = creator_attrs["exportPresetUrl"] + self.log.debug(f"Exporting using preset: {preset_url}") + + # See: https://substance3d.adobe.com/documentation/ptpy/api/substance_painter/export # noqa + config = { # noqa + "exportShaderParams": True, + "exportPath": publish.get_instance_staging_dir(instance), + "defaultExportPreset": preset_url, + + # Custom overrides to the exporter + "exportParameters": [ + { + "parameters": { + "fileFormat": creator_attrs["exportFileFormat"], + "sizeLog2": creator_attrs["exportSize"], + "paddingAlgorithm": creator_attrs["exportPadding"], + "dilationDistance": creator_attrs["exportDilationDistance"] # noqa + } + } + ] + } + + # Create the list of Texture Sets to export. + config["exportList"] = [] + for texture_set in substance_painter.textureset.all_texture_sets(): + config["exportList"].append({"rootPath": texture_set.name()}) + + # Consider None values from the creator attributes optionals + for override in config["exportParameters"]: + parameters = override.get("parameters") + for key, value in dict(parameters).items(): + if value is None: + parameters.pop(key) + + return config diff --git a/openpype/hosts/substancepainter/plugins/publish/collect_workfile_representation.py b/openpype/hosts/substancepainter/plugins/publish/collect_workfile_representation.py new file mode 100644 index 0000000000..8d98d0b014 --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/publish/collect_workfile_representation.py @@ -0,0 +1,26 @@ +import os +import pyblish.api + + +class CollectWorkfileRepresentation(pyblish.api.InstancePlugin): + """Create a publish representation for the current workfile instance.""" + + order = pyblish.api.CollectorOrder + label = "Workfile representation" + hosts = ["substancepainter"] + families = ["workfile"] + + def process(self, instance): + + context = instance.context + current_file = context.data["currentFile"] + + folder, file = os.path.split(current_file) + filename, ext = os.path.splitext(file) + + instance.data["representations"] = [{ + "name": ext.lstrip("."), + "ext": ext.lstrip("."), + "files": file, + "stagingDir": folder, + }] diff --git a/openpype/hosts/substancepainter/plugins/publish/extract_textures.py b/openpype/hosts/substancepainter/plugins/publish/extract_textures.py new file mode 100644 index 0000000000..bb6f15ead9 --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/publish/extract_textures.py @@ -0,0 +1,62 @@ +import substance_painter.export + +from openpype.pipeline import KnownPublishError, publish + + +class ExtractTextures(publish.Extractor, + publish.ColormanagedPyblishPluginMixin): + """Extract Textures using an output template config. + + Note: + This Extractor assumes that `collect_textureset_images` has prepared + the relevant export config and has also collected the individual image + instances for publishing including its representation. That is why this + particular Extractor doesn't specify representations to integrate. + + """ + + label = "Extract Texture Set" + hosts = ["substancepainter"] + families = ["textureSet"] + + # Run before thumbnail extractors + order = publish.Extractor.order - 0.1 + + def process(self, instance): + + config = instance.data["exportConfig"] + result = substance_painter.export.export_project_textures(config) + + if result.status != substance_painter.export.ExportStatus.Success: + raise KnownPublishError( + "Failed to export texture set: {}".format(result.message) + ) + + # Log what files we generated + for (texture_set_name, stack_name), maps in result.textures.items(): + # Log our texture outputs + self.log.info(f"Exported stack: {texture_set_name} {stack_name}") + for texture_map in maps: + self.log.info(f"Exported texture: {texture_map}") + + # We'll insert the color space data for each image instance that we + # added into this texture set. The collector couldn't do so because + # some anatomy and other instance data needs to be collected prior + context = instance.context + for image_instance in instance: + representation = next(iter(image_instance.data["representations"])) + + colorspace = image_instance.data.get("colorspace") + if not colorspace: + self.log.debug("No color space data present for instance: " + f"{image_instance}") + continue + + self.set_representation_colorspace(representation, + context=context, + colorspace=colorspace) + + # The TextureSet instance should not be integrated. It generates no + # output data. Instead the separated texture instances are generated + # from it which themselves integrate into the database. + instance.data["integrate"] = False diff --git a/openpype/hosts/substancepainter/plugins/publish/increment_workfile.py b/openpype/hosts/substancepainter/plugins/publish/increment_workfile.py new file mode 100644 index 0000000000..b45d66fbb1 --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/publish/increment_workfile.py @@ -0,0 +1,23 @@ +import pyblish.api + +from openpype.lib import version_up +from openpype.pipeline import registered_host + + +class IncrementWorkfileVersion(pyblish.api.ContextPlugin): + """Increment current workfile version.""" + + order = pyblish.api.IntegratorOrder + 1 + label = "Increment Workfile Version" + optional = True + hosts = ["substancepainter"] + + def process(self, context): + + assert all(result["success"] for result in context.data["results"]), ( + "Publishing not successful so version is not increased.") + + host = registered_host() + path = context.data["currentFile"] + self.log.info(f"Incrementing current workfile to: {path}") + host.save_workfile(version_up(path)) diff --git a/openpype/hosts/substancepainter/plugins/publish/save_workfile.py b/openpype/hosts/substancepainter/plugins/publish/save_workfile.py new file mode 100644 index 0000000000..9662f31922 --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/publish/save_workfile.py @@ -0,0 +1,28 @@ +import pyblish.api + +from openpype.pipeline import ( + registered_host, + KnownPublishError +) + + +class SaveCurrentWorkfile(pyblish.api.ContextPlugin): + """Save current workfile""" + + label = "Save current workfile" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["substancepainter"] + + def process(self, context): + + host = registered_host() + current = host.get_current_workfile() + if context.data["currentFile"] != current: + raise KnownPublishError("Workfile has changed during publishing!") + + if host.has_unsaved_changes(): + self.log.info("Saving current file: {}".format(current)) + host.save_workfile() + else: + self.log.debug("Skipping workfile save because there are no " + "unsaved changes.") diff --git a/openpype/hosts/substancepainter/plugins/publish/validate_ouput_maps.py b/openpype/hosts/substancepainter/plugins/publish/validate_ouput_maps.py new file mode 100644 index 0000000000..b57cf4c5a2 --- /dev/null +++ b/openpype/hosts/substancepainter/plugins/publish/validate_ouput_maps.py @@ -0,0 +1,109 @@ +import copy +import os + +import pyblish.api + +import substance_painter.export + +from openpype.pipeline import PublishValidationError + + +class ValidateOutputMaps(pyblish.api.InstancePlugin): + """Validate all output maps for Output Template are generated. + + Output maps will be skipped by Substance Painter if it is an output + map in the Substance Output Template which uses channels that the current + substance painter project has not painted or generated. + + """ + + order = pyblish.api.ValidatorOrder + label = "Validate output maps" + hosts = ["substancepainter"] + families = ["textureSet"] + + def process(self, instance): + + config = instance.data["exportConfig"] + + # Substance Painter API does not allow to query the actual output maps + # it will generate without actually exporting the files. So we try to + # generate the smallest size / fastest export as possible + config = copy.deepcopy(config) + parameters = config["exportParameters"][0]["parameters"] + parameters["sizeLog2"] = [1, 1] # output 2x2 images (smallest) + parameters["paddingAlgorithm"] = "passthrough" # no dilation (faster) + parameters["dithering"] = False # no dithering (faster) + + result = substance_painter.export.export_project_textures(config) + if result.status != substance_painter.export.ExportStatus.Success: + raise PublishValidationError( + "Failed to export texture set: {}".format(result.message) + ) + + generated_files = set() + for texture_maps in result.textures.values(): + for texture_map in texture_maps: + generated_files.add(os.path.normpath(texture_map)) + # Directly clean up our temporary export + os.remove(texture_map) + + creator_attributes = instance.data.get("creator_attributes", {}) + allow_skipped_maps = creator_attributes.get("allowSkippedMaps", True) + error_report_missing = [] + for image_instance in instance: + + # Confirm whether the instance has its expected files generated. + # We assume there's just one representation and that it is + # the actual texture representation from the collector. + representation = next(iter(image_instance.data["representations"])) + staging_dir = representation["stagingDir"] + filenames = representation["files"] + if not isinstance(filenames, (list, tuple)): + # Convert single file to list + filenames = [filenames] + + missing = [] + for filename in filenames: + filepath = os.path.join(staging_dir, filename) + filepath = os.path.normpath(filepath) + if filepath not in generated_files: + self.log.warning(f"Missing texture: {filepath}") + missing.append(filepath) + + if not missing: + continue + + if allow_skipped_maps: + # TODO: This is changing state on the instance's which + # should not be done during validation. + self.log.warning(f"Disabling texture instance: " + f"{image_instance}") + image_instance.data["active"] = False + image_instance.data["integrate"] = False + representation.setdefault("tags", []).append("delete") + continue + else: + error_report_missing.append((image_instance, missing)) + + if error_report_missing: + + message = ( + "The Texture Set skipped exporting some output maps which are " + "defined in the Output Template. This happens if the Output " + "Templates exports maps from channels which you do not " + "have in your current Substance Painter project.\n\n" + "To allow this enable the *Allow Skipped Output Maps* setting " + "on the instance.\n\n" + f"Instance {instance} skipped exporting output maps:\n" + "" + ) + + for image_instance, missing in error_report_missing: + missing_str = ", ".join(missing) + message += f"- **{image_instance}** skipped: {missing_str}\n" + + raise PublishValidationError( + message=message, + title="Missing output maps" + ) diff --git a/openpype/hosts/traypublisher/addon.py b/openpype/hosts/traypublisher/addon.py index c157799898..3b34f9e6e8 100644 --- a/openpype/hosts/traypublisher/addon.py +++ b/openpype/hosts/traypublisher/addon.py @@ -10,7 +10,7 @@ TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction): - label = "New Publish (beta)" + label = "Publisher" name = "traypublisher" host_name = "traypublisher" @@ -19,20 +19,9 @@ class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction): self.publish_paths = [ os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish") ] - self._experimental_tools = None def tray_init(self): - from openpype.tools.experimental_tools import ExperimentalTools - - self._experimental_tools = ExperimentalTools() - - def tray_menu(self, *args, **kwargs): - super(TrayPublishAddon, self).tray_menu(*args, **kwargs) - traypublisher = self._experimental_tools.get("traypublisher") - visible = False - if traypublisher and traypublisher.enabled: - visible = True - self._action_item.setVisible(visible) + return def on_action_trigger(self): self.run_traypublisher() diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py index 293db542a9..e8f76bd314 100644 --- a/openpype/hosts/traypublisher/api/editorial.py +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -8,10 +8,10 @@ from openpype.pipeline.create import CreatorError class ShotMetadataSolver: """ Solving hierarchical metadata - Used during editorial publishing. Works with imput + Used during editorial publishing. Works with input clip name and settings defining python formatable template. Settings also define searching patterns - and its token keys used for formating in templates. + and its token keys used for formatting in templates. """ NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}") @@ -40,13 +40,13 @@ class ShotMetadataSolver: """Shot renaming function Args: - data (dict): formating data + data (dict): formatting data Raises: CreatorError: If missing keys Returns: - str: formated new name + str: formatted new name """ shot_rename_template = self.shot_rename[ "shot_rename_template"] @@ -58,7 +58,7 @@ class ShotMetadataSolver: "Make sure all keys in settings are correct:: \n\n" f"From template string {shot_rename_template} > " f"`{_E}` has no equivalent in \n" - f"{list(data.keys())} input formating keys!" + f"{list(data.keys())} input formatting keys!" )) def _generate_tokens(self, clip_name, source_data): @@ -68,7 +68,7 @@ class ShotMetadataSolver: Args: clip_name (str): name of clip in editorial - source_data (dict): data for formating + source_data (dict): data for formatting Raises: CreatorError: if missing key @@ -106,14 +106,14 @@ class ShotMetadataSolver: return output_data def _create_parents_from_settings(self, parents, data): - """Formating parent components. + """formatting parent components. Args: parents (list): list of dict parent components - data (dict): formating data + data (dict): formatting data Raises: - CreatorError: missing formating key + CreatorError: missing formatting key CreatorError: missing token key KeyError: missing parent token @@ -126,7 +126,7 @@ class ShotMetadataSolver: # fill parent keys data template from anatomy data try: - _parent_tokens_formating_data = { + _parent_tokens_formatting_data = { parent_token["name"]: parent_token["value"].format(**data) for parent_token in hierarchy_parents } @@ -143,17 +143,17 @@ class ShotMetadataSolver: for _index, _parent in enumerate( shot_hierarchy["parents_path"].split("/") ): - # format parent token with value which is formated + # format parent token with value which is formatted try: parent_name = _parent.format( - **_parent_tokens_formating_data) + **_parent_tokens_formatting_data) except KeyError as _E: raise CreatorError(( "Make sure all keys in settings are correct : \n\n" f"`{_E}` from template string " f"{shot_hierarchy['parents_path']}, " f" has no equivalent in \n" - f"{list(_parent_tokens_formating_data.keys())} parents" + f"{list(_parent_tokens_formatting_data.keys())} parents" )) parent_token_name = ( @@ -225,7 +225,7 @@ class ShotMetadataSolver: visual_hierarchy = [asset_doc] current_doc = asset_doc - # looping trought all available visual parents + # looping through all available visual parents # if they are not available anymore than it breaks while True: visual_parent_id = current_doc["data"]["visualParent"] @@ -288,7 +288,7 @@ class ShotMetadataSolver: Args: clip_name (str): clip name - source_data (dict): formating data + source_data (dict): formatting data Returns: (str, dict): shot name and hierarchy data @@ -301,19 +301,19 @@ class ShotMetadataSolver: # match clip to shot name at start shot_name = clip_name - # parse all tokens and generate formating data - formating_data = self._generate_tokens(shot_name, source_data) + # parse all tokens and generate formatting data + formatting_data = self._generate_tokens(shot_name, source_data) # generate parents from selected asset parents = self._get_parents_from_selected_asset(asset_doc, project_doc) if self.shot_rename["enabled"]: - shot_name = self._rename_template(formating_data) + shot_name = self._rename_template(formatting_data) self.log.info(f"Renamed shot name: {shot_name}") if self.shot_hierarchy["enabled"]: parents = self._create_parents_from_settings( - parents, formating_data) + parents, formatting_data) if self.shot_add_tasks: tasks = self._generate_tasks_from_settings( diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index 0a8ddaa343..3264f52b0f 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -37,7 +37,7 @@ class TrayPublisherHost(HostBase, IPublishHost): return HostContext.get_context_data() def update_context_data(self, data, changes): - HostContext.save_context_data(data, changes) + HostContext.save_context_data(data) def set_project_name(self, project_name): # TODO Deregister project specific plugins and register new project diff --git a/openpype/hosts/traypublisher/batch_parsing.py b/openpype/hosts/traypublisher/batch_parsing.py new file mode 100644 index 0000000000..3ce3b095b9 --- /dev/null +++ b/openpype/hosts/traypublisher/batch_parsing.py @@ -0,0 +1,88 @@ +"""Functions to parse asset names, versions from file names""" +import os +import re + +from openpype.lib import Logger +from openpype.client import get_assets, get_asset_by_name + + +def get_asset_doc_from_file_name(source_filename, project_name, + version_regex, all_selected_asset_ids=None): + """Try to parse out asset name from file name provided. + + Artists might provide various file name formats. + Currently handled: + - chair.mov + - chair_v001.mov + - my_chair_to_upload.mov + """ + version = None + asset_name = os.path.splitext(source_filename)[0] + # Always first check if source filename is directly asset (eg. 'chair.mov') + matching_asset_doc = get_asset_by_name_case_not_sensitive( + project_name, asset_name, all_selected_asset_ids) + + if matching_asset_doc is None: + # name contains also a version + matching_asset_doc, version = ( + parse_with_version(project_name, asset_name, version_regex, + all_selected_asset_ids)) + + if matching_asset_doc is None: + matching_asset_doc = parse_containing(project_name, asset_name, + all_selected_asset_ids) + + return matching_asset_doc, version + + +def parse_with_version(project_name, asset_name, version_regex, + all_selected_asset_ids=None, log=None): + """Try to parse asset name from a file name containing version too + + Eg. 'chair_v001.mov' >> 'chair', 1 + """ + if not log: + log = Logger.get_logger(__name__) + log.debug( + ("Asset doc by \"{}\" was not found, trying version regex.". + format(asset_name))) + + matching_asset_doc = version_number = None + + regex_result = version_regex.findall(asset_name) + if regex_result: + _asset_name, _version_number = regex_result[0] + matching_asset_doc = get_asset_by_name_case_not_sensitive( + project_name, _asset_name, + all_selected_asset_ids=all_selected_asset_ids) + if matching_asset_doc: + version_number = int(_version_number) + + return matching_asset_doc, version_number + + +def parse_containing(project_name, asset_name, all_selected_asset_ids=None): + """Look if file name contains any existing asset name""" + for asset_doc in get_assets(project_name, asset_ids=all_selected_asset_ids, + fields=["name"]): + if asset_doc["name"].lower() in asset_name.lower(): + return get_asset_by_name(project_name, asset_doc["name"]) + + +def get_asset_by_name_case_not_sensitive(project_name, asset_name, + all_selected_asset_ids=None, + log=None): + """Handle more cases in file names""" + if not log: + log = Logger.get_logger(__name__) + asset_name = re.compile(asset_name, re.IGNORECASE) + + assets = list(get_assets(project_name, asset_ids=all_selected_asset_ids, + asset_names=[asset_name])) + if assets: + if len(assets) > 1: + log.warning("Too many records found for {}".format( + asset_name)) + return + + return assets.pop() diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py index d1086a1ff3..8640500b18 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_editorial.py +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -30,14 +30,14 @@ from openpype.lib import ( CLIP_ATTR_DEFS = [ EnumDef( "fps", - items={ - "from_selection": "From selection", - 23.997: "23.976", - 24: "24", - 25: "25", - 29.97: "29.97", - 30: "30" - }, + items=[ + {"value": "from_selection", "label": "From selection"}, + {"value": 23.997, "label": "23.976"}, + {"value": 24, "label": "24"}, + {"value": 25, "label": "25"}, + {"value": 29.97, "label": "29.97"}, + {"value": 30, "label": "30"} + ], label="FPS" ), NumberDef( @@ -260,7 +260,7 @@ or updating already created. Publishing will create OTIO file. ) if not first_otio_timeline: - # assing otio timeline for multi file to layer + # assign otio timeline for multi file to layer first_otio_timeline = otio_timeline # create otio editorial instance @@ -283,7 +283,7 @@ or updating already created. Publishing will create OTIO file. Args: subset_name (str): name of subset - data (dict): instnance data + data (dict): instance data sequence_path (str): path to sequence file media_path (str): path to media file otio_timeline (otio.Timeline): otio timeline object @@ -315,7 +315,7 @@ or updating already created. Publishing will create OTIO file. kwargs = {} if extension == ".edl": # EDL has no frame rate embedded so needs explicit - # frame rate else 24 is asssumed. + # frame rate else 24 is assumed. kwargs["rate"] = fps kwargs["ignore_timecode_mismatch"] = True @@ -358,7 +358,7 @@ or updating already created. Publishing will create OTIO file. sequence_file_name, first_otio_timeline=None ): - """Helping function fro creating clip instance + """Helping function for creating clip instance Args: otio_timeline (otio.Timeline): otio timeline object @@ -487,7 +487,22 @@ or updating already created. Publishing will create OTIO file. ) # get video stream data - video_stream = media_data["streams"][0] + video_streams = [] + audio_streams = [] + for stream in media_data["streams"]: + codec_type = stream.get("codec_type") + if codec_type == "audio": + audio_streams.append(stream) + + elif codec_type == "video": + video_streams.append(stream) + + if not video_streams: + raise ValueError( + "Could not find video stream in source file." + ) + + video_stream = video_streams[0] return_data = { "video": True, "start_frame": 0, @@ -500,12 +515,7 @@ or updating already created. Publishing will create OTIO file. } # get audio streams data - audio_stream = [ - stream for stream in media_data["streams"] - if stream["codec_type"] == "audio" - ] - - if audio_stream: + if audio_streams: return_data["audio"] = True except Exception as exc: @@ -527,7 +537,7 @@ or updating already created. Publishing will create OTIO file. Args: otio_clip (otio.Clip): otio clip object - preset (dict): sigle family preset + preset (dict): single family preset instance_data (dict): instance data parenting_data (dict): shot instance parent data @@ -767,7 +777,7 @@ or updating already created. Publishing will create OTIO file. ] def _validate_clip_for_processing(self, otio_clip): - """Validate otio clip attribues + """Validate otio clip attributes Args: otio_clip (otio.Clip): otio clip object @@ -843,7 +853,7 @@ or updating already created. Publishing will create OTIO file. single_item=False, label="Media files", ), - # TODO: perhpas better would be timecode and fps input + # TODO: perhaps better would be timecode and fps input NumberDef( "timeline_offset", default=0, diff --git a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py index cf25a37918..1bed07f785 100644 --- a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py +++ b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py @@ -2,14 +2,12 @@ import copy import os import re -from openpype.client import get_assets, get_asset_by_name from openpype.lib import ( FileDef, BoolDef, ) from openpype.pipeline import ( CreatedInstance, - CreatorError ) from openpype.pipeline.create import ( get_subset_name, @@ -17,6 +15,9 @@ from openpype.pipeline.create import ( ) from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator +from openpype.hosts.traypublisher.batch_parsing import ( + get_asset_doc_from_file_name +) class BatchMovieCreator(TrayPublishCreator): @@ -32,12 +33,12 @@ class BatchMovieCreator(TrayPublishCreator): create_allow_context_change = False version_regex = re.compile(r"^(.+)_v([0-9]+)$") + # Position batch creator after simple creators + order = 110 - def __init__(self, project_settings, *args, **kwargs): - super(BatchMovieCreator, self).__init__(project_settings, - *args, **kwargs) + def apply_settings(self, project_settings, system_settings): creator_settings = ( - project_settings["traypublisher"]["BatchMovieCreator"] + project_settings["traypublisher"]["create"]["BatchMovieCreator"] ) self.default_variants = creator_settings["default_variants"] self.default_tasks = creator_settings["default_tasks"] @@ -57,8 +58,8 @@ class BatchMovieCreator(TrayPublishCreator): filepath = os.path.join(file_info["directory"], file_name) instance_data["creator_attributes"] = {"filepath": filepath} - asset_doc, version = self.get_asset_doc_from_file_name( - file_name, self.project_name) + asset_doc, version = get_asset_doc_from_file_name( + file_name, self.project_name, self.version_regex) subset_name, task_name = self._get_subset_and_task( asset_doc, data["variant"], self.project_name) @@ -71,62 +72,6 @@ class BatchMovieCreator(TrayPublishCreator): instance_data, self) self._store_new_instance(new_instance) - def get_asset_doc_from_file_name(self, source_filename, project_name): - """Try to parse out asset name from file name provided. - - Artists might provide various file name formats. - Currently handled: - - chair.mov - - chair_v001.mov - - my_chair_to_upload.mov - """ - version = None - asset_name = os.path.splitext(source_filename)[0] - # Always first check if source filename is in assets - matching_asset_doc = self._get_asset_by_name_case_not_sensitive( - project_name, asset_name) - - if matching_asset_doc is None: - matching_asset_doc, version = ( - self._parse_with_version(project_name, asset_name)) - - if matching_asset_doc is None: - matching_asset_doc = self._parse_containing(project_name, - asset_name) - - if matching_asset_doc is None: - raise CreatorError( - "Cannot guess asset name from {}".format(source_filename)) - - return matching_asset_doc, version - - def _parse_with_version(self, project_name, asset_name): - """Try to parse asset name from a file name containing version too - - Eg. 'chair_v001.mov' >> 'chair', 1 - """ - self.log.debug(( - "Asset doc by \"{}\" was not found, trying version regex." - ).format(asset_name)) - - matching_asset_doc = version_number = None - - regex_result = self.version_regex.findall(asset_name) - if regex_result: - _asset_name, _version_number = regex_result[0] - matching_asset_doc = self._get_asset_by_name_case_not_sensitive( - project_name, _asset_name) - if matching_asset_doc: - version_number = int(_version_number) - - return matching_asset_doc, version_number - - def _parse_containing(self, project_name, asset_name): - """Look if file name contains any existing asset name""" - for asset_doc in get_assets(project_name, fields=["name"]): - if asset_doc["name"].lower() in asset_name.lower(): - return get_asset_by_name(project_name, asset_doc["name"]) - def _get_subset_and_task(self, asset_doc, variant, project_name): """Create subset name according to standard template process""" task_name = self._get_task_name(asset_doc) @@ -204,16 +149,3 @@ class BatchMovieCreator(TrayPublishCreator): File names must then contain only asset name, or asset name + version. (eg. 'chair.mov', 'chair_v001.mov', not really safe `my_chair_v001.mov` """ - - def _get_asset_by_name_case_not_sensitive(self, project_name, asset_name): - """Handle more cases in file names""" - asset_name = re.compile(asset_name, re.IGNORECASE) - - assets = list(get_assets(project_name, asset_names=[asset_name])) - if assets: - if len(assets) > 1: - self.log.warning("Too many records found for {}".format( - asset_name)) - return - - return assets.pop() diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_review_frames.py b/openpype/hosts/traypublisher/plugins/publish/collect_review_frames.py new file mode 100644 index 0000000000..6b41c0dd21 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_review_frames.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +import pyblish.api + + +class CollectReviewInfo(pyblish.api.InstancePlugin): + """Collect data required for review instances. + + ExtractReview plugin requires frame start/end, fps on instance data which + are missing on instances from TrayPublishes. + + Warning: + This is temporary solution to "make it work". Contains removed changes + from https://github.com/ynput/OpenPype/pull/4383 reduced only for + review instances. + """ + + label = "Collect Review Info" + order = pyblish.api.CollectorOrder + 0.491 + families = ["review"] + hosts = ["traypublisher"] + + def process(self, instance): + asset_entity = instance.data.get("assetEntity") + if instance.data.get("frameStart") is not None or not asset_entity: + self.log.debug("Missing required data on instance") + return + + asset_data = asset_entity["data"] + # Store collected data for logging + collected_data = {} + for key in ( + "fps", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + ): + if key in instance.data or key not in asset_data: + continue + value = asset_data[key] + collected_data[key] = value + instance.data[key] = value + self.log.debug("Collected data: {}".format(str(collected_data))) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py index 183195a515..c081216481 100644 --- a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -14,7 +14,7 @@ class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): There is also possibility to have reviewable representation which can be stored under 'reviewable' attribute stored on instance data. If there was - already created representation with the same files as 'revieable' containes + already created representation with the same files as 'reviewable' contains Representations can be marked for review and in that case is also added 'review' family to instance families. For review can be marked only one diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index 6ac3e6324c..6f76c25e0c 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -309,8 +309,6 @@ class QtTVPaintRpc(BaseTVPaintRpc): self.add_methods( (route_name, self.workfiles_tool), (route_name, self.loader_tool), - (route_name, self.creator_tool), - (route_name, self.subset_manager_tool), (route_name, self.publish_tool), (route_name, self.scene_inventory_tool), (route_name, self.library_loader_tool), @@ -330,21 +328,9 @@ class QtTVPaintRpc(BaseTVPaintRpc): self._execute_in_main_thread(item) return - async def creator_tool(self): - log.info("Triggering Creator tool") - item = MainThreadItem(self.tools_helper.show_creator) - await self._async_execute_in_main_thread(item, wait=False) - - async def subset_manager_tool(self): - log.info("Triggering Subset Manager tool") - item = MainThreadItem(self.tools_helper.show_subset_manager) - # Do not wait for result of callback - self._execute_in_main_thread(item, wait=False) - return - async def publish_tool(self): log.info("Triggering Publish tool") - item = MainThreadItem(self.tools_helper.show_publish) + item = MainThreadItem(self.tools_helper.show_publisher_tool) self._execute_in_main_thread(item) return @@ -403,11 +389,11 @@ class MainThreadItem: self.kwargs = kwargs def execute(self): - """Execute callback and store it's result. + """Execute callback and store its result. Method must be called from main thread. Item is marked as `done` when callback execution finished. Store output of callback of exception - information when callback raise one. + information when callback raises one. """ log.debug("Executing process in main thread") if self.done: @@ -859,10 +845,6 @@ class QtCommunicator(BaseCommunicator): "callback": "loader_tool", "label": "Load", "help": "Open loader tool" - }, { - "callback": "creator_tool", - "label": "Create", - "help": "Open creator tool" }, { "callback": "scene_inventory_tool", "label": "Scene inventory", @@ -875,10 +857,6 @@ class QtCommunicator(BaseCommunicator): "callback": "library_loader_tool", "label": "Library", "help": "Open library loader tool" - }, { - "callback": "subset_manager_tool", - "label": "Subset Manager", - "help": "Open subset manager tool" }, { "callback": "experimental_tools", "label": "Experimental tools", diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index 5e64773b8e..49846d7f29 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -43,14 +43,15 @@ def parse_layers_data(data): layer_id, group_id, visible, position, opacity, name, layer_type, frame_start, frame_end, prelighttable, postlighttable, - selected, editable, sencil_state + selected, editable, sencil_state, is_current ) = layer_raw.split("|") layer = { "layer_id": int(layer_id), "group_id": int(group_id), "visible": visible == "ON", "position": int(position), - "opacity": int(opacity), + # Opacity from 'tv_layerinfo' is always set to '0' so it's unusable + # "opacity": int(opacity), "name": name, "type": layer_type, "frame_start": int(frame_start), @@ -59,7 +60,8 @@ def parse_layers_data(data): "postlighttable": postlighttable == "1", "selected": selected == "1", "editable": editable == "1", - "sencil_state": sencil_state + "sencil_state": sencil_state, + "is_current": is_current == "1" } layers.append(layer) return layers @@ -87,15 +89,17 @@ def get_layers_data_george_script(output_filepath, layer_ids=None): " selected editable sencilState" ), # Check if layer ID match `tv_LayerCurrentID` + "is_current=0", "IF CMP(current_layer_id, layer_id)==1", # - mark layer as selected if layer id match to current layer id + "is_current=1", "selected=1", "END", # Prepare line with data separated by "|" ( "line = layer_id'|'group_id'|'visible'|'position'|'opacity'|'" "name'|'type'|'startFrame'|'endFrame'|'prelighttable'|'" - "postlighttable'|'selected'|'editable'|'sencilState" + "postlighttable'|'selected'|'editable'|'sencilState'|'is_current" ), # Write data to output file "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line", @@ -202,8 +206,9 @@ def get_groups_data(communicator=None): # Variable containing full path to output file "output_path = \"{}\"".format(output_filepath), "empty = 0", - # Loop over 100 groups - "FOR idx = 1 TO 100", + # Loop over 26 groups which is ATM maximum possible (in 11.7) + # - ref: https://www.tvpaint.com/forum/viewtopic.php?t=13880 + "FOR idx = 1 TO 26", # Receive information about groups "tv_layercolor \"getcolor\" 0 idx", "PARSE result clip_id group_index c_red c_green c_blue group_name", diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index 249326791b..58fbd09545 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -8,7 +8,7 @@ import requests import pyblish.api from openpype.client import get_project, get_asset_by_name -from openpype.host import HostBase, IWorkfileHost, ILoadHost +from openpype.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR from openpype.settings import get_current_project_settings from openpype.lib import register_event_callback @@ -18,6 +18,7 @@ from openpype.pipeline import ( register_creator_plugin_path, AVALON_CONTAINER_ID, ) +from openpype.pipeline.context_tools import get_global_context from .lib import ( execute_george, @@ -29,6 +30,7 @@ log = logging.getLogger(__name__) METADATA_SECTION = "avalon" SECTION_NAME_CONTEXT = "context" +SECTION_NAME_CREATE_CONTEXT = "create_context" SECTION_NAME_INSTANCES = "instances" SECTION_NAME_CONTAINERS = "containers" # Maximum length of metadata chunk string @@ -58,7 +60,7 @@ instances=2 """ -class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): +class TVPaintHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): name = "tvpaint" def install(self): @@ -85,14 +87,63 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): registered_callbacks = ( pyblish.api.registered_callbacks().get("instanceToggled") or [] ) - if self.on_instance_toggle not in registered_callbacks: - pyblish.api.register_callback( - "instanceToggled", self.on_instance_toggle - ) register_event_callback("application.launched", self.initial_launch) register_event_callback("application.exit", self.application_exit) + def get_current_project_name(self): + """ + Returns: + Union[str, None]: Current project name. + """ + + return self.get_current_context().get("project_name") + + def get_current_asset_name(self): + """ + Returns: + Union[str, None]: Current asset name. + """ + + return self.get_current_context().get("asset_name") + + def get_current_task_name(self): + """ + Returns: + Union[str, None]: Current task name. + """ + + return self.get_current_context().get("task_name") + + def get_current_context(self): + context = get_current_workfile_context() + if not context: + return get_global_context() + + if "project_name" in context: + return context + # This is legacy way how context was stored + return { + "project_name": context.get("project"), + "asset_name": context.get("asset"), + "task_name": context.get("task") + } + + # --- Create --- + def get_context_data(self): + return get_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, {}) + + def update_context_data(self, data, changes): + return write_workfile_metadata(SECTION_NAME_CREATE_CONTEXT, data) + + def list_instances(self): + """List all created instances from current workfile.""" + return list_instances() + + def write_instances(self, data): + return write_instances(data) + + # --- Workfile --- def open_workfile(self, filepath): george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( filepath.replace("\\", "/") @@ -102,11 +153,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): def save_workfile(self, filepath=None): if not filepath: filepath = self.get_current_workfile() - context = { - "project": legacy_io.Session["AVALON_PROJECT"], - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"] - } + context = get_global_context() save_current_workfile_context(context) # Execute george script to save workfile. @@ -125,6 +172,7 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): def get_workfile_extensions(self): return [".tvpp"] + # --- Load --- def get_containers(self): return get_containers() @@ -137,27 +185,15 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): return log.info("Setting up project...") - set_context_settings() - - def remove_instance(self, instance): - """Remove instance from current workfile metadata. - - Implementation for Subset manager tool. - """ - - current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES) - instance_id = instance.get("uuid") - found_idx = None - if instance_id: - for idx, _inst in enumerate(current_instances): - if _inst["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: + global_context = get_global_context() + project_name = global_context.get("project_name") + asset_name = global_context.get("aset_name") + if not project_name or not asset_name: return - current_instances.pop(found_idx) - write_instances(current_instances) + + asset_doc = get_asset_by_name(project_name, asset_name) + + set_context_settings(project_name, asset_doc) def application_exit(self): """Logic related to TimerManager. @@ -177,34 +213,6 @@ class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) requests.post(rest_api_url) - def on_instance_toggle(self, instance, old_value, new_value): - """Update instance data in workfile on publish toggle.""" - # Review may not have real instance in wokrfile metadata - if not instance.data.get("uuid"): - return - - instance_id = instance.data["uuid"] - found_idx = None - current_instances = list_instances() - for idx, workfile_instance in enumerate(current_instances): - if workfile_instance["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: - return - - if "active" in current_instances[found_idx]: - current_instances[found_idx]["active"] = new_value - self.write_instances(current_instances) - - def list_instances(self): - """List all created instances from current workfile.""" - return list_instances() - - def write_instances(self, data): - return write_instances(data) - def containerise( name, namespace, members, context, loader, current_containers=None @@ -462,23 +470,25 @@ def get_containers(): return output -def set_context_settings(asset_doc=None): +def set_context_settings(project_name, asset_doc): """Set workfile settings by asset document data. Change fps, resolution and frame start/end. """ - project_name = legacy_io.active_project() - if asset_doc is None: - asset_name = legacy_io.Session["AVALON_ASSET"] - # Use current session asset if not passed - asset_doc = get_asset_by_name(project_name, asset_name) + width_key = "resolutionWidth" + height_key = "resolutionHeight" - project_doc = get_project(project_name) + width = asset_doc["data"].get(width_key) + height = asset_doc["data"].get(height_key) + if width is None or height is None: + print("Resolution was not found!") + else: + execute_george( + "tv_resizepage {} {} 0".format(width, height) + ) framerate = asset_doc["data"].get("fps") - if framerate is None: - framerate = project_doc["data"].get("fps") if framerate is not None: execute_george( @@ -487,22 +497,6 @@ def set_context_settings(asset_doc=None): else: print("Framerate was not found!") - width_key = "resolutionWidth" - height_key = "resolutionHeight" - - width = asset_doc["data"].get(width_key) - height = asset_doc["data"].get(height_key) - if width is None or height is None: - width = project_doc["data"].get(width_key) - height = project_doc["data"].get(height_key) - - if width is None or height is None: - print("Resolution was not found!") - else: - execute_george( - "tv_resizepage {} {} 0".format(width, height) - ) - frame_start = asset_doc["data"].get("frameStart") frame_end = asset_doc["data"].get("frameEnd") @@ -510,14 +504,9 @@ def set_context_settings(asset_doc=None): print("Frame range was not found!") return - handles = asset_doc["data"].get("handles") or 0 handle_start = asset_doc["data"].get("handleStart") handle_end = asset_doc["data"].get("handleEnd") - if handle_start is None or handle_end is None: - handle_start = handles - handle_end = handles - # Always start from 0 Mark In and set only Mark Out mark_in = 0 mark_out = mark_in + (frame_end - frame_start) + handle_start + handle_end diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py index da456e7067..96b99199f2 100644 --- a/openpype/hosts/tvpaint/api/plugin.py +++ b/openpype/hosts/tvpaint/api/plugin.py @@ -1,80 +1,142 @@ import re -import uuid -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, - registered_host, +from openpype.pipeline import LoaderPlugin +from openpype.pipeline.create import ( + CreatedInstance, + get_subset_name, + AutoCreator, + Creator, ) +from openpype.pipeline.create.creator_plugins import cache_and_get_instances from .lib import get_layers_data -from .pipeline import get_current_workfile_context -class Creator(LegacyCreator): - def __init__(self, *args, **kwargs): - super(Creator, self).__init__(*args, **kwargs) - # Add unified identifier created with `uuid` module - self.data["uuid"] = str(uuid.uuid4()) +SHARED_DATA_KEY = "openpype.tvpaint.instances" - @classmethod - def get_dynamic_data(cls, *args, **kwargs): - dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs) - # Change asset and name by current workfile context - workfile_context = get_current_workfile_context() - asset_name = workfile_context.get("asset") - task_name = workfile_context.get("task") - if "asset" not in dynamic_data and asset_name: - dynamic_data["asset"] = asset_name +class TVPaintCreatorCommon: + @property + def subset_template_family_filter(self): + return self.family - if "task" not in dynamic_data and task_name: - dynamic_data["task"] = task_name - return dynamic_data - - @staticmethod - def are_instances_same(instance_1, instance_2): - """Compare instances but skip keys with unique values. - - During compare are skipped keys that will be 100% sure - different on new instance, like "id". - - Returns: - bool: True if instances are same. - """ - if ( - not isinstance(instance_1, dict) - or not isinstance(instance_2, dict) - ): - return instance_1 == instance_2 - - checked_keys = set() - checked_keys.add("id") - for key, value in instance_1.items(): - if key not in checked_keys: - if key not in instance_2: - return False - if value != instance_2[key]: - return False - checked_keys.add(key) - - for key in instance_2.keys(): - if key not in checked_keys: - return False - return True - - def write_instances(self, data): - self.log.debug( - "Storing instance data to workfile. {}".format(str(data)) + def _cache_and_get_instances(self): + return cache_and_get_instances( + self, SHARED_DATA_KEY, self.host.list_instances ) - host = registered_host() - return host.write_instances(data) - def process(self): - host = registered_host() - data = host.list_instances() - data.append(self.data) - self.write_instances(data) + def _collect_create_instances(self): + instances_by_identifier = self._cache_and_get_instances() + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def _update_create_instances(self, update_list): + if not update_list: + return + + cur_instances = self.host.list_instances() + cur_instances_by_id = {} + for instance_data in cur_instances: + instance_id = instance_data.get("instance_id") + if instance_id: + cur_instances_by_id[instance_id] = instance_data + + for instance, changes in update_list: + instance_data = changes.new_value + cur_instance_data = cur_instances_by_id.get(instance.id) + if cur_instance_data is None: + cur_instances.append(instance_data) + continue + for key in set(cur_instance_data) - set(instance_data): + cur_instance_data.pop(key) + cur_instance_data.update(instance_data) + self.host.write_instances(cur_instances) + + def _custom_get_subset_name( + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None + ): + dynamic_data = self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + + return get_subset_name( + self.family, + variant, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data, + project_settings=self.project_settings, + family_filter=self.subset_template_family_filter + ) + + +class TVPaintCreator(Creator, TVPaintCreatorCommon): + def collect_instances(self): + self._collect_create_instances() + + def update_instances(self, update_list): + self._update_create_instances(update_list) + + def remove_instances(self, instances): + ids_to_remove = { + instance.id + for instance in instances + } + cur_instances = self.host.list_instances() + changed = False + new_instances = [] + for instance_data in cur_instances: + if instance_data.get("instance_id") in ids_to_remove: + changed = True + else: + new_instances.append(instance_data) + + if changed: + self.host.write_instances(new_instances) + + for instance in instances: + self._remove_instance_from_context(instance) + + def get_dynamic_data(self, *args, **kwargs): + # Change asset and name by current workfile context + create_context = self.create_context + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + output = {} + if asset_name: + output["asset"] = asset_name + if task_name: + output["task"] = task_name + return output + + def get_subset_name(self, *args, **kwargs): + return self._custom_get_subset_name(*args, **kwargs) + + def _store_new_instance(self, new_instance): + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + +class TVPaintAutoCreator(AutoCreator, TVPaintCreatorCommon): + def collect_instances(self): + self._collect_create_instances() + + def update_instances(self, update_list): + self._update_create_instances(update_list) + + def get_subset_name(self, *args, **kwargs): + return self._custom_get_subset_name(*args, **kwargs) class Loader(LoaderPlugin): diff --git a/openpype/hosts/tvpaint/plugins/create/convert_legacy.py b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py new file mode 100644 index 0000000000..5cfa1faa50 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/convert_legacy.py @@ -0,0 +1,150 @@ +import collections + +from openpype.pipeline.create.creator_plugins import ( + SubsetConvertorPlugin, + cache_and_get_instances, +) +from openpype.hosts.tvpaint.api.plugin import SHARED_DATA_KEY +from openpype.hosts.tvpaint.api.lib import get_groups_data + + +class TVPaintLegacyConverted(SubsetConvertorPlugin): + """Conversion of legacy instances in scene to new creators. + + This convertor handles only instances created by core creators. + + All instances that would be created using auto-creators are removed as at + the moment of finding them would there already be existing instances. + """ + + identifier = "tvpaint.legacy.converter" + + def find_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, self.host.list_instances + ) + if instances_by_identifier[None]: + self.add_convertor_item("Convert legacy instances") + + def convert(self): + current_instances = self.host.list_instances() + to_convert = collections.defaultdict(list) + converted = False + for instance in current_instances: + if instance.get("creator_identifier") is not None: + continue + converted = True + + family = instance.get("family") + if family in ( + "renderLayer", + "renderPass", + "renderScene", + "review", + "workfile", + ): + to_convert[family].append(instance) + else: + instance["keep"] = False + + # Skip if nothing was changed + if not converted: + self.remove_convertor_item() + return + + self._convert_render_layers( + to_convert["renderLayer"], current_instances) + self._convert_render_passes( + to_convert["renderPass"], current_instances) + self._convert_render_scenes( + to_convert["renderScene"], current_instances) + self._convert_workfiles( + to_convert["workfile"], current_instances) + self._convert_reviews( + to_convert["review"], current_instances) + + new_instances = [ + instance + for instance in current_instances + if instance.get("keep") is not False + ] + self.host.write_instances(new_instances) + # remove legacy item if all is fine + self.remove_convertor_item() + + def _convert_render_layers(self, render_layers, current_instances): + if not render_layers: + return + + # Look for possible existing render layers in scene + render_layers_by_group_id = {} + for instance in current_instances: + if instance.get("creator_identifier") == "render.layer": + group_id = instance["creator_identifier"]["group_id"] + render_layers_by_group_id[group_id] = instance + + groups_by_id = { + group["group_id"]: group + for group in get_groups_data() + } + for render_layer in render_layers: + group_id = render_layer.pop("group_id") + # Just remove legacy instance if group is already occupied + if group_id in render_layers_by_group_id: + render_layer["keep"] = False + continue + # Add identifier + render_layer["creator_identifier"] = "render.layer" + # Change 'uuid' to 'instance_id' + render_layer["instance_id"] = render_layer.pop("uuid") + # Fill creator attributes + render_layer["creator_attributes"] = { + "group_id": group_id + } + render_layer["family"] = "render" + group = groups_by_id[group_id] + # Use group name for variant + group["variant"] = group["name"] + + def _convert_render_passes(self, render_passes, current_instances): + if not render_passes: + return + + # Render passes must have available render layers so we look for render + # layers first + # - '_convert_render_layers' must be called before this method + render_layers_by_group_id = {} + for instance in current_instances: + if instance.get("creator_identifier") == "render.layer": + group_id = instance["creator_attributes"]["group_id"] + render_layers_by_group_id[group_id] = instance + + for render_pass in render_passes: + group_id = render_pass.pop("group_id") + render_layer = render_layers_by_group_id.get(group_id) + if not render_layer: + render_pass["keep"] = False + continue + + render_pass["creator_identifier"] = "render.pass" + render_pass["instance_id"] = render_pass.pop("uuid") + render_pass["family"] = "render" + + render_pass["creator_attributes"] = { + "render_layer_instance_id": render_layer["instance_id"] + } + render_pass["variant"] = render_pass.pop("pass") + render_pass.pop("renderlayer") + + # Rest of instances are just marked for deletion + def _convert_render_scenes(self, render_scenes, current_instances): + for render_scene in render_scenes: + render_scene["keep"] = False + + def _convert_workfiles(self, workfiles, current_instances): + for render_scene in workfiles: + render_scene["keep"] = False + + def _convert_reviews(self, reviews, current_instances): + for render_scene in reviews: + render_scene["keep"] = False diff --git a/openpype/hosts/tvpaint/plugins/create/create_render.py b/openpype/hosts/tvpaint/plugins/create/create_render.py new file mode 100644 index 0000000000..2369c7329f --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/create_render.py @@ -0,0 +1,1152 @@ +"""Render Layer and Passes creators. + +Render layer is main part which is represented by group in TVPaint. All TVPaint +layers marked with that group color are part of the render layer. To be more +specific about some parts of layer it is possible to create sub-sets of layer +which are named passes. Render pass consist of layers in same color group as +render layer but define more specific part. + +For example render layer could be 'Bob' which consist of 5 TVPaint layers. +- Bob has 'head' which consist of 2 TVPaint layers -> Render pass 'head' +- Bob has 'body' which consist of 1 TVPaint layer -> Render pass 'body' +- Bob has 'arm' which consist of 1 TVPaint layer -> Render pass 'arm' +- Last layer does not belong to render pass at all + +Bob will be rendered as 'beauty' of bob (all visible layers in group). +His head will be rendered too but without any other parts. The same for body +and arm. + +What is this good for? Compositing has more power how the renders are used. +Can do transforms on each render pass without need to modify a re-render them +using TVPaint. + +The workflow may hit issues when there are used other blending modes than +default 'color' blend more. In that case it is not recommended to use this +workflow at all as other blend modes may affect all layers in clip which can't +be done. + +There is special case for simple publishing of scene which is called +'render.scene'. That will use all visible layers and render them as one big +sequence. + +Todos: + Add option to extract marked layers and passes as json output format for + AfterEffects. +""" + +import collections +from typing import Any, Optional, Union + +from openpype.client import get_asset_by_name +from openpype.lib import ( + prepare_template_data, + AbstractAttrDef, + UILabelDef, + UISeparatorDef, + EnumDef, + TextDef, + BoolDef, +) +from openpype.pipeline.create import ( + CreatedInstance, + CreatorError, +) +from openpype.hosts.tvpaint.api.plugin import ( + TVPaintCreator, + TVPaintAutoCreator, +) +from openpype.hosts.tvpaint.api.lib import ( + get_layers_data, + get_groups_data, + execute_george_through_file, +) + +RENDER_LAYER_DETAILED_DESCRIPTIONS = ( + """Render Layer is "a group of TVPaint layers" + +Be aware Render Layer is not TVPaint layer. + +All TVPaint layers in the scene with the color group id are rendered in the +beauty pass. To create sub passes use Render Pass creator which is +dependent on existence of render layer instance. + +The group can represent an asset (tree) or different part of scene that consist +of one or more TVPaint layers that can be used as single item during +compositing (for example). + +In some cases may be needed to have sub parts of the layer. For example 'Bob' +could be Render Layer which has 'Arm', 'Head' and 'Body' as Render Passes. +""" +) + + +RENDER_PASS_DETAILED_DESCRIPTIONS = ( + """Render Pass is sub part of Render Layer. + +Render Pass can consist of one or more TVPaint layers. Render Pass must +belong to a Render Layer. Marked TVPaint layers will change it's group color +to match group color of Render Layer. +""" +) + + +AUTODETECT_RENDER_DETAILED_DESCRIPTION = ( + """Semi-automated Render Layer and Render Pass creation. + +Based on information in TVPaint scene will be created Render Layers and Render +Passes. All color groups used in scene will be used for Render Layer creation. +Name of the group is used as a variant. + +All TVPaint layers under the color group will be created as Render Pass where +layer name is used as variant. + +The plugin will use all used color groups and layers, or can skip those that +are not visible. + +There is option to auto-rename color groups before Render Layer creation. That +is based on settings template where is filled index of used group from bottom +to top. +""" +) + +class CreateRenderlayer(TVPaintCreator): + """Mark layer group as Render layer instance. + + All TVPaint layers in the scene with the color group id are rendered in the + beauty pass. To create sub passes use Render Layer creator which is + dependent on existence of render layer instance. + """ + + label = "Render Layer" + family = "render" + subset_template_family_filter = "renderLayer" + identifier = "render.layer" + icon = "fa5.images" + + # George script to change color group + rename_script_template = ( + "tv_layercolor \"setcolor\"" + " {clip_id} {group_id} {r} {g} {b} \"{name}\"" + ) + # Order to be executed before Render Pass creator + order = 90 + description = "Mark TVPaint color group as one Render Layer." + detailed_description = RENDER_LAYER_DETAILED_DESCRIPTIONS + + # Settings + # - Default render pass name for beauty + default_pass_name = "beauty" + # - Mark by default instance for review + mark_for_review = True + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_layer"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.default_pass_name = plugin_settings["default_pass_name"] + self.mark_for_review = plugin_settings["mark_for_review"] + + def get_dynamic_data( + self, variant, task_name, asset_doc, project_name, host_name, instance + ): + dynamic_data = super().get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + dynamic_data["renderpass"] = self.default_pass_name + dynamic_data["renderlayer"] = variant + return dynamic_data + + def _get_selected_group_ids(self): + return { + layer["group_id"] + for layer in get_layers_data() + if layer["selected"] + } + + def create(self, subset_name, instance_data, pre_create_data): + self.log.debug("Query data from workfile.") + + group_name = instance_data["variant"] + group_id = pre_create_data.get("group_id") + # This creator should run only on one group + if group_id is None or group_id == -1: + selected_groups = self._get_selected_group_ids() + selected_groups.discard(0) + if len(selected_groups) > 1: + raise CreatorError("You have selected more than one group") + + if len(selected_groups) == 0: + raise CreatorError("You don't have selected any group") + group_id = tuple(selected_groups)[0] + + self.log.debug("Querying groups data from workfile.") + groups_data = get_groups_data() + group_item = None + for group_data in groups_data: + if group_data["group_id"] == group_id: + group_item = group_data + + for instance in self.create_context.instances: + if ( + instance.creator_identifier == self.identifier + and instance["creator_attributes"]["group_id"] == group_id + ): + raise CreatorError(( + f"Group \"{group_item.get('name')}\" is already used" + f" by another render layer \"{instance['subset']}\"" + )) + + self.log.debug(f"Selected group id is \"{group_id}\".") + if "creator_attributes" not in instance_data: + instance_data["creator_attributes"] = {} + creator_attributes = instance_data["creator_attributes"] + mark_for_review = pre_create_data.get("mark_for_review") + if mark_for_review is None: + mark_for_review = self.mark_for_review + creator_attributes["group_id"] = group_id + creator_attributes["mark_for_review"] = mark_for_review + + self.log.info(f"Subset name is {subset_name}") + new_instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + self._store_new_instance(new_instance) + + if not group_id or group_item["name"] == group_name: + return new_instance + + self.log.debug("Changing name of the group.") + # Rename TVPaint group (keep color same) + # - groups can't contain spaces + rename_script = self.rename_script_template.format( + clip_id=group_item["clip_id"], + group_id=group_item["group_id"], + r=group_item["red"], + g=group_item["green"], + b=group_item["blue"], + name=group_name + ) + execute_george_through_file(rename_script) + + self.log.info(( + f"Name of group with index {group_id}" + f" was changed to \"{group_name}\"." + )) + return new_instance + + def _get_groups_enum(self): + groups_enum = [] + empty_groups = [] + for group in get_groups_data(): + group_name = group["name"] + item = { + "label": group_name, + "value": group["group_id"] + } + # TVPaint have defined how many color groups is available, but + # the count is not consistent across versions. It is not possible + # to know how many groups there is. + # + if group_name and group_name != "0": + if empty_groups: + groups_enum.extend(empty_groups) + empty_groups = [] + groups_enum.append(item) + else: + empty_groups.append(item) + return groups_enum + + def get_pre_create_attr_defs(self): + groups_enum = self._get_groups_enum() + groups_enum.insert(0, {"label": "", "value": -1}) + + return [ + EnumDef( + "group_id", + label="Group", + items=groups_enum + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def get_instance_attr_defs(self): + groups_enum = self._get_groups_enum() + return [ + EnumDef( + "group_id", + label="Group", + items=groups_enum + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def update_instances(self, update_list): + self._update_color_groups() + self._update_renderpass_groups() + + super().update_instances(update_list) + + def _update_color_groups(self): + render_layer_instances = [] + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + render_layer_instances.append(instance) + + if not render_layer_instances: + return + + groups_by_id = { + group["group_id"]: group + for group in get_groups_data() + } + grg_script_lines = [] + for instance in render_layer_instances: + group_id = instance["creator_attributes"]["group_id"] + variant = instance["variant"] + group = groups_by_id[group_id] + if group["name"] == variant: + continue + + grg_script_lines.append(self.rename_script_template.format( + clip_id=group["clip_id"], + group_id=group["group_id"], + r=group["red"], + g=group["green"], + b=group["blue"], + name=variant + )) + + if grg_script_lines: + execute_george_through_file("\n".join(grg_script_lines)) + + def _update_renderpass_groups(self): + render_layer_instances = {} + render_pass_instances = collections.defaultdict(list) + + for instance in self.create_context.instances: + if instance.creator_identifier == CreateRenderPass.identifier: + render_layer_id = ( + instance["creator_attributes"]["render_layer_instance_id"] + ) + render_pass_instances[render_layer_id].append(instance) + elif instance.creator_identifier == self.identifier: + render_layer_instances[instance.id] = instance + + if not render_pass_instances or not render_layer_instances: + return + + layers_data = get_layers_data() + layers_by_name = collections.defaultdict(list) + for layer in layers_data: + layers_by_name[layer["name"]].append(layer) + + george_lines = [] + for render_layer_id, instances in render_pass_instances.items(): + render_layer_inst = render_layer_instances.get(render_layer_id) + if render_layer_inst is None: + continue + group_id = render_layer_inst["creator_attributes"]["group_id"] + layer_names = set() + for instance in instances: + layer_names |= set(instance["layer_names"]) + + for layer_name in layer_names: + george_lines.extend( + f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" + for layer in layers_by_name[layer_name] + if layer["group_id"] != group_id + ) + if george_lines: + execute_george_through_file("\n".join(george_lines)) + + +class CreateRenderPass(TVPaintCreator): + family = "render" + subset_template_family_filter = "renderPass" + identifier = "render.pass" + label = "Render Pass" + icon = "fa5.image" + description = "Mark selected TVPaint layers as pass of Render Layer." + detailed_description = RENDER_PASS_DETAILED_DESCRIPTIONS + + order = CreateRenderlayer.order + 10 + + # Settings + mark_for_review = True + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_pass"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.mark_for_review = plugin_settings["mark_for_review"] + + def collect_instances(self): + instances_by_identifier = self._cache_and_get_instances() + render_layers = { + instance_data["instance_id"]: { + "variant": instance_data["variant"], + "template_data": prepare_template_data({ + "renderlayer": instance_data["variant"] + }) + } + for instance_data in ( + instances_by_identifier[CreateRenderlayer.identifier] + ) + } + + for instance_data in instances_by_identifier[self.identifier]: + render_layer_instance_id = ( + instance_data + .get("creator_attributes", {}) + .get("render_layer_instance_id") + ) + render_layer_info = render_layers.get(render_layer_instance_id, {}) + self.update_instance_labels( + instance_data, + render_layer_info.get("variant"), + render_layer_info.get("template_data") + ) + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def get_dynamic_data( + self, variant, task_name, asset_doc, project_name, host_name, instance + ): + dynamic_data = super().get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name, instance + ) + dynamic_data["renderpass"] = variant + dynamic_data["renderlayer"] = "{renderlayer}" + return dynamic_data + + def update_instance_labels( + self, instance, render_layer_variant, render_layer_data=None + ): + old_label = instance.get("label") + old_group = instance.get("group") + new_label = None + new_group = None + if render_layer_variant is not None: + if render_layer_data is None: + render_layer_data = prepare_template_data({ + "renderlayer": render_layer_variant + }) + try: + new_label = instance["subset"].format(**render_layer_data) + except (KeyError, ValueError): + pass + + new_group = f"{self.get_group_label()} ({render_layer_variant})" + + instance["label"] = new_label + instance["group"] = new_group + return old_group != new_group or old_label != new_label + + def create(self, subset_name, instance_data, pre_create_data): + render_layer_instance_id = pre_create_data.get( + "render_layer_instance_id" + ) + if not render_layer_instance_id: + raise CreatorError(( + "You cannot create a Render Pass without a Render Layer." + " Please select one first" + )) + + render_layer_instance = self.create_context.instances_by_id.get( + render_layer_instance_id + ) + if render_layer_instance is None: + raise CreatorError(( + "RenderLayer instance was not found" + f" by id \"{render_layer_instance_id}\"" + )) + + group_id = render_layer_instance["creator_attributes"]["group_id"] + self.log.debug("Query data from workfile.") + layers_data = get_layers_data() + + self.log.debug("Checking selection.") + # Get all selected layers and their group ids + marked_layer_names = pre_create_data.get("layer_names") + if marked_layer_names is not None: + layers_by_name = {layer["name"]: layer for layer in layers_data} + marked_layers = [] + for layer_name in marked_layer_names: + layer = layers_by_name.get(layer_name) + if layer is None: + raise CreatorError( + f"Layer with name \"{layer_name}\" was not found") + marked_layers.append(layer) + + else: + marked_layers = [ + layer + for layer in layers_data + if layer["selected"] + ] + + # Raise if nothing is selected + if not marked_layers: + raise CreatorError( + "Nothing is selected. Please select layers.") + + marked_layer_names = {layer["name"] for layer in marked_layers} + + marked_layer_names = set(marked_layer_names) + + instances_to_remove = [] + for instance in self.create_context.instances: + if instance.creator_identifier != self.identifier: + continue + cur_layer_names = set(instance["layer_names"]) + if not cur_layer_names.intersection(marked_layer_names): + continue + new_layer_names = cur_layer_names - marked_layer_names + if new_layer_names: + instance["layer_names"] = list(new_layer_names) + else: + instances_to_remove.append(instance) + + render_layer = render_layer_instance["variant"] + subset_name_fill_data = {"renderlayer": render_layer} + + # Format dynamic keys in subset name + label = subset_name + try: + label = label.format( + **prepare_template_data(subset_name_fill_data) + ) + except (KeyError, ValueError): + pass + + self.log.info(f"New subset name is \"{label}\".") + instance_data["label"] = label + instance_data["group"] = f"{self.get_group_label()} ({render_layer})" + instance_data["layer_names"] = list(marked_layer_names) + if "creator_attributes" not in instance_data: + instance_data["creator_attributes"] = {} + + creator_attributes = instance_data["creator_attributes"] + mark_for_review = pre_create_data.get("mark_for_review") + if mark_for_review is None: + mark_for_review = self.mark_for_review + creator_attributes["mark_for_review"] = mark_for_review + creator_attributes["render_layer_instance_id"] = ( + render_layer_instance_id + ) + + new_instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self + ) + instances_data = self._remove_and_filter_instances( + instances_to_remove + ) + instances_data.append(new_instance.data_to_store()) + + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + self._change_layers_group(marked_layers, group_id) + + return new_instance + + def _change_layers_group(self, layers, group_id): + filtered_layers = [ + layer + for layer in layers + if layer["group_id"] != group_id + ] + if filtered_layers: + self.log.info(( + "Changing group of " + f"{','.join([l['name'] for l in filtered_layers])}" + f" to {group_id}" + )) + george_lines = [ + f"tv_layercolor \"set\" {layer['layer_id']} {group_id}" + for layer in filtered_layers + ] + execute_george_through_file("\n".join(george_lines)) + + def _remove_and_filter_instances(self, instances_to_remove): + instances_data = self.host.list_instances() + if not instances_to_remove: + return instances_data + + removed_ids = set() + for instance in instances_to_remove: + removed_ids.add(instance.id) + self._remove_instance_from_context(instance) + + return [ + instance_data + for instance_data in instances_data + if instance_data.get("instance_id") not in removed_ids + ] + + def get_pre_create_attr_defs(self): + # Find available Render Layers + # - instances are created after creators reset + current_instances = self.host.list_instances() + render_layers = [ + { + "value": inst["instance_id"], + "label": inst["subset"] + } + for inst in current_instances + if inst.get("creator_identifier") == CreateRenderlayer.identifier + ] + if not render_layers: + render_layers.append({"value": None, "label": "N/A"}) + + return [ + EnumDef( + "render_layer_instance_id", + label="Render Layer", + items=render_layers + ), + UILabelDef( + "NOTE: Try to hit refresh if you don't see a Render Layer" + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + def get_instance_attr_defs(self): + # Find available Render Layers + current_instances = self.create_context.instances + render_layers = [ + { + "value": instance.id, + "label": instance.label + } + for instance in current_instances + if instance.creator_identifier == CreateRenderlayer.identifier + ] + if not render_layers: + render_layers.append({"value": None, "label": "N/A"}) + + return [ + EnumDef( + "render_layer_instance_id", + label="Render Layer", + items=render_layers + ), + UILabelDef( + "NOTE: Try to hit refresh if you don't see a Render Layer" + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] + + +class TVPaintAutoDetectRenderCreator(TVPaintCreator): + """Create Render Layer and Render Pass instances based on scene data. + + This is auto-detection creator which can be triggered by user to create + instances based on information in scene. Each used color group in scene + will be created as Render Layer where group name is used as variant and + each TVPaint layer as Render Pass where layer name is used as variant. + + Never will have any instances, all instances belong to different creators. + """ + + family = "render" + label = "Render Layer/Passes" + identifier = "render.auto.detect.creator" + order = CreateRenderPass.order + 10 + description = ( + "Create Render Layers and Render Passes based on scene setup" + ) + detailed_description = AUTODETECT_RENDER_DETAILED_DESCRIPTION + + # Settings + enabled = False + allow_group_rename = True + group_name_template = "L{group_index}" + group_idx_offset = 10 + group_idx_padding = 3 + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings + ["tvpaint"] + ["create"] + ["auto_detect_render"] + ) + self.enabled = plugin_settings.get("enabled", False) + self.allow_group_rename = plugin_settings["allow_group_rename"] + self.group_name_template = plugin_settings["group_name_template"] + self.group_idx_offset = plugin_settings["group_idx_offset"] + self.group_idx_padding = plugin_settings["group_idx_padding"] + + def _rename_groups( + self, + groups_order: list[int], + scene_groups: list[dict[str, Any]] + ): + new_group_name_by_id: dict[int, str] = {} + groups_by_id: dict[int, dict[str, Any]] = { + group["group_id"]: group + for group in scene_groups + } + # Count only renamed groups + for idx, group_id in enumerate(groups_order): + group_index_value: str = ( + "{{:0>{}}}" + .format(self.group_idx_padding) + .format((idx + 1) * self.group_idx_offset) + ) + group_name_fill_values: dict[str, str] = { + "groupIdx": group_index_value, + "groupidx": group_index_value, + "group_idx": group_index_value, + "group_index": group_index_value, + } + + group_name: str = self.group_name_template.format( + **group_name_fill_values + ) + group: dict[str, Any] = groups_by_id[group_id] + if group["name"] != group_name: + new_group_name_by_id[group_id] = group_name + + grg_lines: list[str] = [] + for group_id, group_name in new_group_name_by_id.items(): + group: dict[str, Any] = groups_by_id[group_id] + grg_line: str = "tv_layercolor \"setcolor\" {} {} {} {} {}".format( + group["clip_id"], + group_id, + group["red"], + group["green"], + group["blue"], + group_name + ) + grg_lines.append(grg_line) + group["name"] = group_name + + if grg_lines: + execute_george_through_file("\n".join(grg_lines)) + + def _prepare_render_layer( + self, + project_name: str, + asset_doc: dict[str, Any], + task_name: str, + group_id: int, + groups: list[dict[str, Any]], + mark_for_review: bool, + existing_instance: Optional[CreatedInstance] = None, + ) -> Union[CreatedInstance, None]: + match_group: Union[dict[str, Any], None] = next( + ( + group + for group in groups + if group["group_id"] == group_id + ), + None + ) + if not match_group: + return None + + variant: str = match_group["name"] + creator: CreateRenderlayer = ( + self.create_context.creators[CreateRenderlayer.identifier] + ) + + subset_name: str = creator.get_subset_name( + variant, + task_name, + asset_doc, + project_name, + host_name=self.create_context.host_name, + ) + if existing_instance is not None: + existing_instance["asset"] = asset_doc["name"] + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name + return existing_instance + + instance_data: dict[str, str] = { + "asset": asset_doc["name"], + "task": task_name, + "family": creator.family, + "variant": variant + } + pre_create_data: dict[str, str] = { + "group_id": group_id, + "mark_for_review": mark_for_review + } + return creator.create(subset_name, instance_data, pre_create_data) + + def _prepare_render_passes( + self, + project_name: str, + asset_doc: dict[str, Any], + task_name: str, + render_layer_instance: CreatedInstance, + layers: list[dict[str, Any]], + mark_for_review: bool, + existing_render_passes: list[CreatedInstance] + ): + creator: CreateRenderPass = ( + self.create_context.creators[CreateRenderPass.identifier] + ) + render_pass_by_layer_name = {} + for render_pass in existing_render_passes: + for layer_name in render_pass["layer_names"]: + render_pass_by_layer_name[layer_name] = render_pass + + for layer in layers: + layer_name = layer["name"] + variant = layer_name + render_pass = render_pass_by_layer_name.get(layer_name) + if render_pass is not None: + if (render_pass["layer_names"]) > 1: + variant = render_pass["variant"] + + subset_name = creator.get_subset_name( + variant, + task_name, + asset_doc, + project_name, + host_name=self.create_context.host_name, + instance=render_pass + ) + + if render_pass is not None: + render_pass["asset"] = asset_doc["name"] + render_pass["task"] = task_name + render_pass["subset"] = subset_name + continue + + instance_data: dict[str, str] = { + "asset": asset_doc["name"], + "task": task_name, + "family": creator.family, + "variant": variant + } + pre_create_data: dict[str, Any] = { + "render_layer_instance_id": render_layer_instance.id, + "layer_names": [layer_name], + "mark_for_review": mark_for_review + } + creator.create(subset_name, instance_data, pre_create_data) + + def _filter_groups( + self, + layers_by_group_id, + groups_order, + only_visible_groups + ): + new_groups_order = [] + for group_id in groups_order: + layers: list[dict[str, Any]] = layers_by_group_id[group_id] + if not layers: + continue + + if ( + only_visible_groups + and not any( + layer + for layer in layers + if layer["visible"] + ) + ): + continue + new_groups_order.append(group_id) + return new_groups_order + + def create(self, subset_name, instance_data, pre_create_data): + project_name: str = self.create_context.get_current_project_name() + asset_name: str = instance_data["asset"] + task_name: str = instance_data["task"] + asset_doc: dict[str, Any] = get_asset_by_name(project_name, asset_name) + + render_layers_by_group_id: dict[int, CreatedInstance] = {} + render_passes_by_render_layer_id: dict[int, list[CreatedInstance]] = ( + collections.defaultdict(list) + ) + for instance in self.create_context.instances: + if instance.creator_identifier == CreateRenderlayer.identifier: + group_id = instance["creator_attributes"]["group_id"] + render_layers_by_group_id[group_id] = instance + elif instance.creator_identifier == CreateRenderPass.identifier: + render_layer_id = ( + instance + ["creator_attributes"] + ["render_layer_instance_id"] + ) + render_passes_by_render_layer_id[render_layer_id].append( + instance + ) + + layers_by_group_id: dict[int, list[dict[str, Any]]] = ( + collections.defaultdict(list) + ) + scene_layers: list[dict[str, Any]] = get_layers_data() + scene_groups: list[dict[str, Any]] = get_groups_data() + groups_order: list[int] = [] + for layer in scene_layers: + group_id: int = layer["group_id"] + # Skip 'default' group + if group_id == 0: + continue + + layers_by_group_id[group_id].append(layer) + if group_id not in groups_order: + groups_order.append(group_id) + + groups_order.reverse() + + mark_layers_for_review = pre_create_data.get( + "mark_layers_for_review", False + ) + mark_passes_for_review = pre_create_data.get( + "mark_passes_for_review", False + ) + rename_groups = pre_create_data.get("rename_groups", False) + only_visible_groups = pre_create_data.get("only_visible_groups", False) + groups_order = self._filter_groups( + layers_by_group_id, + groups_order, + only_visible_groups + ) + if not groups_order: + return + + if rename_groups: + self._rename_groups(groups_order, scene_groups) + + # Make sure all render layers are created + for group_id in groups_order: + instance: Union[CreatedInstance, None] = ( + self._prepare_render_layer( + project_name, + asset_doc, + task_name, + group_id, + scene_groups, + mark_layers_for_review, + render_layers_by_group_id.get(group_id), + ) + ) + if instance is not None: + render_layers_by_group_id[group_id] = instance + + for group_id in groups_order: + layers: list[dict[str, Any]] = layers_by_group_id[group_id] + render_layer_instance: Union[CreatedInstance, None] = ( + render_layers_by_group_id.get(group_id) + ) + if not layers or render_layer_instance is None: + continue + + self._prepare_render_passes( + project_name, + asset_doc, + task_name, + render_layer_instance, + layers, + mark_passes_for_review, + render_passes_by_render_layer_id[render_layer_instance.id] + ) + + def get_pre_create_attr_defs(self) -> list[AbstractAttrDef]: + render_layer_creator: CreateRenderlayer = ( + self.create_context.creators[CreateRenderlayer.identifier] + ) + render_pass_creator: CreateRenderPass = ( + self.create_context.creators[CreateRenderPass.identifier] + ) + output = [] + if self.allow_group_rename: + output.extend([ + BoolDef( + "rename_groups", + label="Rename color groups", + tooltip="Will rename color groups using studio template", + default=True + ), + BoolDef( + "only_visible_groups", + label="Only visible color groups", + tooltip=( + "Render Layers and rename will happen only on color" + " groups with visible layers." + ), + default=True + ), + UISeparatorDef() + ]) + output.extend([ + BoolDef( + "mark_layers_for_review", + label="Mark RenderLayers for review", + default=render_layer_creator.mark_for_review + ), + BoolDef( + "mark_passes_for_review", + label="Mark RenderPasses for review", + default=render_pass_creator.mark_for_review + ) + ]) + return output + + +class TVPaintSceneRenderCreator(TVPaintAutoCreator): + family = "render" + subset_template_family_filter = "renderScene" + identifier = "render.scene" + label = "Scene Render" + icon = "fa.file-image-o" + + # Settings + default_pass_name = "beauty" + mark_for_review = True + active_on_create = False + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_render_scene"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.mark_for_review = plugin_settings["mark_for_review"] + self.active_on_create = plugin_settings["active_on_create"] + self.default_pass_name = plugin_settings["default_pass_name"] + + def get_dynamic_data(self, variant, *args, **kwargs): + dynamic_data = super().get_dynamic_data(variant, *args, **kwargs) + dynamic_data["renderpass"] = "{renderpass}" + dynamic_data["renderlayer"] = variant + return dynamic_data + + def _create_new_instance(self): + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": self.default_variant, + "creator_attributes": { + "render_pass_name": self.default_pass_name, + "mark_for_review": True + }, + "label": self._get_label( + subset_name, + self.default_pass_name + ) + } + if not self.active_on_create: + data["active"] = False + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + return new_instance + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + if existing_instance is None: + return self._create_new_instance() + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name + + existing_instance["label"] = self._get_label( + existing_instance["subset"], + existing_instance["creator_attributes"]["render_pass_name"] + ) + + def _get_label(self, subset_name, render_pass_name): + try: + subset_name = subset_name.format(**prepare_template_data({ + "renderpass": render_pass_name + })) + except (KeyError, ValueError): + pass + + return subset_name + + def get_instance_attr_defs(self): + return [ + TextDef( + "render_pass_name", + label="Pass Name", + default=self.default_pass_name, + tooltip=( + "Value is calculated during publishing and UI will update" + " label after refresh." + ) + ), + BoolDef( + "mark_for_review", + label="Review", + default=self.mark_for_review + ) + ] diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py deleted file mode 100644 index 009b69c4f1..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py +++ /dev/null @@ -1,231 +0,0 @@ -from openpype.lib import prepare_template_data -from openpype.pipeline import CreatorError -from openpype.hosts.tvpaint.api import ( - plugin, - CommunicationWrapper -) -from openpype.hosts.tvpaint.api.lib import ( - get_layers_data, - get_groups_data, - execute_george_through_file, -) -from openpype.hosts.tvpaint.api.pipeline import list_instances - - -class CreateRenderlayer(plugin.Creator): - """Mark layer group as one instance.""" - name = "render_layer" - label = "RenderLayer" - family = "renderLayer" - icon = "cube" - defaults = ["Main"] - - rename_group = True - render_pass = "beauty" - - rename_script_template = ( - "tv_layercolor \"setcolor\"" - " {clip_id} {group_id} {r} {g} {b} \"{name}\"" - ) - - dynamic_subset_keys = [ - "renderpass", "renderlayer", "render_pass", "render_layer", "group" - ] - - @classmethod - def get_dynamic_data( - cls, variant, task_name, asset_id, project_name, host_name - ): - dynamic_data = super(CreateRenderlayer, cls).get_dynamic_data( - variant, task_name, asset_id, project_name, host_name - ) - # Use render pass name from creator's plugin - dynamic_data["renderpass"] = cls.render_pass - # Add variant to render layer - dynamic_data["renderlayer"] = variant - # Change family for subset name fill - dynamic_data["family"] = "render" - - # TODO remove - Backwards compatibility for old subset name templates - # - added 2022/04/28 - dynamic_data["render_pass"] = dynamic_data["renderpass"] - dynamic_data["render_layer"] = dynamic_data["renderlayer"] - - return dynamic_data - - @classmethod - def get_default_variant(cls): - """Default value for variant in Creator tool. - - Method checks if TVPaint implementation is running and tries to find - selected layers from TVPaint. If only one is selected it's name is - returned. - - Returns: - str: Default variant name for Creator tool. - """ - # Validate that communication is initialized - if CommunicationWrapper.communicator: - # Get currently selected layers - layers_data = get_layers_data() - - selected_layers = [ - layer - for layer in layers_data - if layer["selected"] - ] - # Return layer name if only one is selected - if len(selected_layers) == 1: - return selected_layers[0]["name"] - - # Use defaults - if cls.defaults: - return cls.defaults[0] - return None - - def process(self): - self.log.debug("Query data from workfile.") - instances = list_instances() - layers_data = get_layers_data() - - self.log.debug("Checking for selection groups.") - # Collect group ids from selection - group_ids = set() - for layer in layers_data: - if layer["selected"]: - group_ids.add(layer["group_id"]) - - # Raise if there is no selection - if not group_ids: - raise CreatorError("Nothing is selected.") - - # This creator should run only on one group - if len(group_ids) > 1: - raise CreatorError("More than one group is in selection.") - - group_id = tuple(group_ids)[0] - # If group id is `0` it is `default` group which is invalid - if group_id == 0: - raise CreatorError( - "Selection is not in group. Can't mark selection as Beauty." - ) - - self.log.debug(f"Selected group id is \"{group_id}\".") - self.data["group_id"] = group_id - - group_data = get_groups_data() - group_name = None - for group in group_data: - if group["group_id"] == group_id: - group_name = group["name"] - break - - if group_name is None: - raise AssertionError( - "Couldn't find group by id \"{}\"".format(group_id) - ) - - subset_name_fill_data = { - "group": group_name - } - - family = self.family = self.data["family"] - - # Fill dynamic key 'group' - subset_name = self.data["subset"].format( - **prepare_template_data(subset_name_fill_data) - ) - self.data["subset"] = subset_name - - # Check for instances of same group - existing_instance = None - existing_instance_idx = None - # Check if subset name is not already taken - same_subset_instance = None - same_subset_instance_idx = None - for idx, instance in enumerate(instances): - if instance["family"] == family: - if instance["group_id"] == group_id: - existing_instance = instance - existing_instance_idx = idx - elif instance["subset"] == subset_name: - same_subset_instance = instance - same_subset_instance_idx = idx - - if ( - same_subset_instance_idx is not None - and existing_instance_idx is not None - ): - break - - if same_subset_instance_idx is not None: - if self._ask_user_subset_override(same_subset_instance): - instances.pop(same_subset_instance_idx) - else: - return - - if existing_instance is not None: - self.log.info( - f"Beauty instance for group id {group_id} already exists" - ", overriding" - ) - instances[existing_instance_idx] = self.data - else: - instances.append(self.data) - - self.write_instances(instances) - - if not self.rename_group: - self.log.info("Group rename function is turned off. Skipping") - return - - self.log.debug("Querying groups data from workfile.") - groups_data = get_groups_data() - - self.log.debug("Changing name of the group.") - selected_group = None - for group_data in groups_data: - if group_data["group_id"] == group_id: - selected_group = group_data - - # Rename TVPaint group (keep color same) - # - groups can't contain spaces - new_group_name = self.data["variant"].replace(" ", "_") - rename_script = self.rename_script_template.format( - clip_id=selected_group["clip_id"], - group_id=selected_group["group_id"], - r=selected_group["red"], - g=selected_group["green"], - b=selected_group["blue"], - name=new_group_name - ) - execute_george_through_file(rename_script) - - self.log.info( - f"Name of group with index {group_id}" - f" was changed to \"{new_group_name}\"." - ) - - def _ask_user_subset_override(self, instance): - from qtpy import QtCore - from qtpy.QtWidgets import QMessageBox - - title = "Subset \"{}\" already exist".format(instance["subset"]) - text = ( - "Instance with subset name \"{}\" already exists." - "\n\nDo you want to override existing?" - ).format(instance["subset"]) - - dialog = QMessageBox() - dialog.setWindowFlags( - dialog.windowFlags() - | QtCore.Qt.WindowStaysOnTopHint - ) - dialog.setWindowTitle(title) - dialog.setText(text) - dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No) - dialog.setDefaultButton(QMessageBox.Yes) - dialog.exec_() - if dialog.result() == QMessageBox.Yes: - return True - return False diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py deleted file mode 100644 index a44cb29f20..0000000000 --- a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py +++ /dev/null @@ -1,167 +0,0 @@ -from openpype.pipeline import CreatorError -from openpype.lib import prepare_template_data -from openpype.hosts.tvpaint.api import ( - plugin, - CommunicationWrapper -) -from openpype.hosts.tvpaint.api.lib import get_layers_data -from openpype.hosts.tvpaint.api.pipeline import list_instances - - -class CreateRenderPass(plugin.Creator): - """Render pass is combination of one or more layers from same group. - - Requirement to create Render Pass is to have already created beauty - instance. Beauty instance is used as base for subset name. - """ - name = "render_pass" - label = "RenderPass" - family = "renderPass" - icon = "cube" - defaults = ["Main"] - - dynamic_subset_keys = [ - "renderpass", "renderlayer", "render_pass", "render_layer" - ] - - @classmethod - def get_dynamic_data( - cls, variant, task_name, asset_id, project_name, host_name - ): - dynamic_data = super(CreateRenderPass, cls).get_dynamic_data( - variant, task_name, asset_id, project_name, host_name - ) - dynamic_data["renderpass"] = variant - dynamic_data["family"] = "render" - - # TODO remove - Backwards compatibility for old subset name templates - # - added 2022/04/28 - dynamic_data["render_pass"] = dynamic_data["renderpass"] - - return dynamic_data - - @classmethod - def get_default_variant(cls): - """Default value for variant in Creator tool. - - Method checks if TVPaint implementation is running and tries to find - selected layers from TVPaint. If only one is selected it's name is - returned. - - Returns: - str: Default variant name for Creator tool. - """ - # Validate that communication is initialized - if CommunicationWrapper.communicator: - # Get currently selected layers - layers_data = get_layers_data() - - selected_layers = [ - layer - for layer in layers_data - if layer["selected"] - ] - # Return layer name if only one is selected - if len(selected_layers) == 1: - return selected_layers[0]["name"] - - # Use defaults - if cls.defaults: - return cls.defaults[0] - return None - - def process(self): - self.log.debug("Query data from workfile.") - instances = list_instances() - layers_data = get_layers_data() - - self.log.debug("Checking selection.") - # Get all selected layers and their group ids - group_ids = set() - selected_layers = [] - for layer in layers_data: - if layer["selected"]: - selected_layers.append(layer) - group_ids.add(layer["group_id"]) - - # Raise if nothing is selected - if not selected_layers: - raise CreatorError("Nothing is selected.") - - # Raise if layers from multiple groups are selected - if len(group_ids) != 1: - raise CreatorError("More than one group is in selection.") - - group_id = tuple(group_ids)[0] - self.log.debug(f"Selected group id is \"{group_id}\".") - - # Find beauty instance for selected layers - beauty_instance = None - for instance in instances: - if ( - instance["family"] == "renderLayer" - and instance["group_id"] == group_id - ): - beauty_instance = instance - break - - # Beauty is required for this creator so raise if was not found - if beauty_instance is None: - raise CreatorError("Beauty pass does not exist yet.") - - subset_name = self.data["subset"] - - subset_name_fill_data = {} - - # Backwards compatibility - # - beauty may be created with older creator where variant was not - # stored - if "variant" not in beauty_instance: - render_layer = beauty_instance["name"] - else: - render_layer = beauty_instance["variant"] - - subset_name_fill_data["renderlayer"] = render_layer - subset_name_fill_data["render_layer"] = render_layer - - # Format dynamic keys in subset name - new_subset_name = subset_name.format( - **prepare_template_data(subset_name_fill_data) - ) - self.data["subset"] = new_subset_name - self.log.info(f"New subset name is \"{new_subset_name}\".") - - family = self.data["family"] - variant = self.data["variant"] - - self.data["group_id"] = group_id - self.data["pass"] = variant - self.data["renderlayer"] = render_layer - - # Collect selected layer ids to be stored into instance - layer_names = [layer["name"] for layer in selected_layers] - self.data["layer_names"] = layer_names - - # Check if same instance already exists - existing_instance = None - existing_instance_idx = None - for idx, instance in enumerate(instances): - if ( - instance["family"] == family - and instance["group_id"] == group_id - and instance["pass"] == variant - ): - existing_instance = instance - existing_instance_idx = idx - break - - if existing_instance is not None: - self.log.info( - f"Render pass instance for group id {group_id}" - f" and name \"{variant}\" already exists, overriding." - ) - instances[existing_instance_idx] = self.data - else: - instances.append(self.data) - - self.write_instances(instances) diff --git a/openpype/hosts/tvpaint/plugins/create/create_review.py b/openpype/hosts/tvpaint/plugins/create/create_review.py new file mode 100644 index 0000000000..886dae7c39 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/create_review.py @@ -0,0 +1,76 @@ +from openpype.client import get_asset_by_name +from openpype.pipeline import CreatedInstance +from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator + + +class TVPaintReviewCreator(TVPaintAutoCreator): + family = "review" + identifier = "scene.review" + label = "Review" + icon = "ei.video" + + # Settings + active_on_create = True + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_review"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + self.active_on_create = plugin_settings["active_on_create"] + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": self.default_variant + } + if not self.active_on_create: + data["active"] = False + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/tvpaint/plugins/create/create_workfile.py b/openpype/hosts/tvpaint/plugins/create/create_workfile.py new file mode 100644 index 0000000000..41347576d5 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/create/create_workfile.py @@ -0,0 +1,70 @@ +from openpype.client import get_asset_by_name +from openpype.pipeline import CreatedInstance +from openpype.hosts.tvpaint.api.plugin import TVPaintAutoCreator + + +class TVPaintWorkfileCreator(TVPaintAutoCreator): + family = "workfile" + identifier = "workfile" + label = "Workfile" + icon = "fa.file-o" + + def apply_settings(self, project_settings, system_settings): + plugin_settings = ( + project_settings["tvpaint"]["create"]["create_workfile"] + ) + self.default_variant = plugin_settings["default_variant"] + self.default_variants = plugin_settings["default_variants"] + + def create(self): + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + create_context = self.create_context + host_name = create_context.host_name + project_name = create_context.get_current_project_name() + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + self.default_variant, + task_name, + asset_doc, + project_name, + host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": self.default_variant + } + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + instances_data = self.host.list_instances() + instances_data.append(new_instance.data_to_store()) + self.host.write_instances(instances_data) + self._add_instance_to_context(new_instance) + + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + existing_instance["variant"], + task_name, + asset_doc, + project_name, + host_name, + existing_instance + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py index d5b79758ad..63f04cf3ce 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -1,37 +1,36 @@ import pyblish.api -class CollectOutputFrameRange(pyblish.api.ContextPlugin): +class CollectOutputFrameRange(pyblish.api.InstancePlugin): """Collect frame start/end from context. When instances are collected context does not contain `frameStart` and `frameEnd` keys yet. They are collected in global plugin `CollectContextEntities`. """ + label = "Collect output frame range" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.4999 hosts = ["tvpaint"] + families = ["review", "render"] - def process(self, context): - for instance in context: - frame_start = instance.data.get("frameStart") - frame_end = instance.data.get("frameEnd") - if frame_start is not None and frame_end is not None: - self.log.debug( - "Instance {} already has set frames {}-{}".format( - str(instance), frame_start, frame_end - ) - ) - return + def process(self, instance): + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + return - frame_start = context.data.get("frameStart") - frame_end = context.data.get("frameEnd") + context = instance.context - instance.data["frameStart"] = frame_start - instance.data["frameEnd"] = frame_end - - self.log.info( - "Set frames {}-{} on instance {} ".format( - frame_start, frame_end, str(instance) - ) + frame_start = asset_doc["data"]["frameStart"] + fps = asset_doc["data"]["fps"] + frame_end = frame_start + ( + context.data["sceneMarkOut"] - context.data["sceneMarkIn"] + ) + instance.data["fps"] = fps + instance.data["frameStart"] = frame_start + instance.data["frameEnd"] = frame_end + self.log.info( + "Set frames {}-{} on instance {} ".format( + frame_start, frame_end, instance.data["subset"] ) + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py deleted file mode 100644 index ae1326a5bd..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ /dev/null @@ -1,280 +0,0 @@ -import json -import copy -import pyblish.api - -from openpype.client import get_asset_by_name -from openpype.pipeline import legacy_io -from openpype.pipeline.create import get_subset_name - - -class CollectInstances(pyblish.api.ContextPlugin): - label = "Collect Instances" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["tvpaint"] - - def process(self, context): - workfile_instances = context.data["workfileInstances"] - - self.log.debug("Collected ({}) instances:\n{}".format( - len(workfile_instances), - json.dumps(workfile_instances, indent=4) - )) - - filtered_instance_data = [] - # Backwards compatibility for workfiles that already have review - # instance in metadata. - review_instance_exist = False - for instance_data in workfile_instances: - family = instance_data["family"] - if family == "review": - review_instance_exist = True - - elif family not in ("renderPass", "renderLayer"): - self.log.info("Unknown family \"{}\". Skipping {}".format( - family, json.dumps(instance_data, indent=4) - )) - continue - - filtered_instance_data.append(instance_data) - - # Fake review instance if review was not found in metadata families - if not review_instance_exist: - filtered_instance_data.append( - self._create_review_instance_data(context) - ) - - for instance_data in filtered_instance_data: - instance_data["fps"] = context.data["sceneFps"] - - # Conversion from older instances - # - change 'render_layer' to 'renderlayer' - render_layer = instance_data.get("instance_data") - if not render_layer: - # Render Layer has only variant - if instance_data["family"] == "renderLayer": - render_layer = instance_data.get("variant") - - # Backwards compatibility for renderPasses - elif "render_layer" in instance_data: - render_layer = instance_data["render_layer"] - - if render_layer: - instance_data["renderlayer"] = render_layer - - # Store workfile instance data to instance data - instance_data["originData"] = copy.deepcopy(instance_data) - # Global instance data modifications - # Fill families - family = instance_data["family"] - families = [family] - if family != "review": - families.append("review") - # Add `review` family for thumbnail integration - instance_data["families"] = families - - # Instance name - subset_name = instance_data["subset"] - name = instance_data.get("name", subset_name) - instance_data["name"] = name - instance_data["label"] = "{} [{}-{}]".format( - name, - context.data["sceneMarkIn"] + 1, - context.data["sceneMarkOut"] + 1 - ) - - active = instance_data.get("active", True) - instance_data["active"] = active - instance_data["publish"] = active - # Add representations key - instance_data["representations"] = [] - - # Different instance creation based on family - instance = None - if family == "review": - # Change subset name of review instance - - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] - - # Collect asset doc to get asset id - # - not sure if it's good idea to require asset id in - # get_subset_name? - asset_name = context.data["workfile_context"]["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - - # Host name from environment variable - host_name = context.data["hostName"] - # Use empty variant value - variant = "" - task_name = legacy_io.Session["AVALON_TASK"] - new_subset_name = get_subset_name( - family, - variant, - task_name, - asset_doc, - project_name, - host_name, - project_settings=context.data["project_settings"] - ) - instance_data["subset"] = new_subset_name - - instance = context.create_instance(**instance_data) - - instance.data["layers"] = copy.deepcopy( - context.data["layersData"] - ) - - elif family == "renderLayer": - instance = self.create_render_layer_instance( - context, instance_data - ) - elif family == "renderPass": - instance = self.create_render_pass_instance( - context, instance_data - ) - - if instance is None: - continue - - any_visible = False - for layer in instance.data["layers"]: - if layer["visible"]: - any_visible = True - break - - instance.data["publish"] = any_visible - - self.log.debug("Created instance: {}\n{}".format( - instance, json.dumps(instance.data, indent=4) - )) - - def _create_review_instance_data(self, context): - """Fake review instance data.""" - - return { - "family": "review", - "asset": context.data["asset"], - # Dummy subset name - "subset": "reviewMain" - } - - def create_render_layer_instance(self, context, instance_data): - name = instance_data["name"] - # Change label - subset_name = instance_data["subset"] - - # Backwards compatibility - # - subset names were not stored as final subset names during creation - if "variant" not in instance_data: - instance_data["label"] = "{}_Beauty".format(name) - - # Change subset name - # Final family of an instance will be `render` - new_family = "render" - task_name = legacy_io.Session["AVALON_TASK"] - new_subset_name = "{}{}_{}_Beauty".format( - new_family, task_name.capitalize(), name - ) - instance_data["subset"] = new_subset_name - self.log.debug("Changed subset name \"{}\"->\"{}\"".format( - subset_name, new_subset_name - )) - - # Get all layers for the layer - layers_data = context.data["layersData"] - group_id = instance_data["group_id"] - group_layers = [] - for layer in layers_data: - if layer["group_id"] == group_id: - group_layers.append(layer) - - if not group_layers: - # Should be handled here? - self.log.warning(( - f"Group with id {group_id} does not contain any layers." - f" Instance \"{name}\" not created." - )) - return None - - instance_data["layers"] = group_layers - - return context.create_instance(**instance_data) - - def create_render_pass_instance(self, context, instance_data): - pass_name = instance_data["pass"] - self.log.info( - "Creating render pass instance. \"{}\"".format(pass_name) - ) - # Change label - render_layer = instance_data["renderlayer"] - - # Backwards compatibility - # - subset names were not stored as final subset names during creation - if "variant" not in instance_data: - instance_data["label"] = "{}_{}".format(render_layer, pass_name) - # Change subset name - # Final family of an instance will be `render` - new_family = "render" - old_subset_name = instance_data["subset"] - task_name = legacy_io.Session["AVALON_TASK"] - new_subset_name = "{}{}_{}_{}".format( - new_family, task_name.capitalize(), render_layer, pass_name - ) - instance_data["subset"] = new_subset_name - self.log.debug("Changed subset name \"{}\"->\"{}\"".format( - old_subset_name, new_subset_name - )) - - layers_data = context.data["layersData"] - layers_by_name = { - layer["name"]: layer - for layer in layers_data - } - - if "layer_names" in instance_data: - layer_names = instance_data["layer_names"] - else: - # Backwards compatibility - # - not 100% working as it was found out that layer ids can't be - # used as unified identifier across multiple workstations - layers_by_id = { - layer["layer_id"]: layer - for layer in layers_data - } - layer_ids = instance_data["layer_ids"] - layer_names = [] - for layer_id in layer_ids: - layer = layers_by_id.get(layer_id) - if layer: - layer_names.append(layer["name"]) - - if not layer_names: - raise ValueError(( - "Metadata contain old way of storing layers information." - " It is not possible to identify layers to publish with" - " these data. Please remove Render Pass instances with" - " Subset manager and use Creator tool to recreate them." - )) - - render_pass_layers = [] - for layer_name in layer_names: - layer = layers_by_name.get(layer_name) - # NOTE This is kind of validation before validators? - if not layer: - self.log.warning( - f"Layer with name {layer_name} was not found." - ) - continue - - render_pass_layers.append(layer) - - if not render_pass_layers: - name = instance_data["name"] - self.log.warning( - f"None of the layers from the RenderPass \"{name}\"" - " exist anymore. Instance not created." - ) - return None - - instance_data["layers"] = render_pass_layers - return context.create_instance(**instance_data) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py new file mode 100644 index 0000000000..e89fbf7882 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/collect_render_instances.py @@ -0,0 +1,114 @@ +import copy +import pyblish.api +from openpype.lib import prepare_template_data + + +class CollectRenderInstances(pyblish.api.InstancePlugin): + label = "Collect Render Instances" + order = pyblish.api.CollectorOrder - 0.4 + hosts = ["tvpaint"] + families = ["render", "review"] + + ignore_render_pass_transparency = False + + def process(self, instance): + context = instance.context + creator_identifier = instance.data["creator_identifier"] + if creator_identifier == "render.layer": + self._collect_data_for_render_layer(instance) + + elif creator_identifier == "render.pass": + self._collect_data_for_render_pass(instance) + + elif creator_identifier == "render.scene": + self._collect_data_for_render_scene(instance) + + else: + if creator_identifier == "scene.review": + self._collect_data_for_review(instance) + return + + subset_name = instance.data["subset"] + instance.data["name"] = subset_name + instance.data["label"] = "{} [{}-{}]".format( + subset_name, + context.data["sceneMarkIn"] + 1, + context.data["sceneMarkOut"] + 1 + ) + + def _collect_data_for_render_layer(self, instance): + instance.data["families"].append("renderLayer") + creator_attributes = instance.data["creator_attributes"] + group_id = creator_attributes["group_id"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + layers_data = instance.context.data["layersData"] + instance.data["layers"] = [ + copy.deepcopy(layer) + for layer in layers_data + if layer["group_id"] == group_id + ] + + def _collect_data_for_render_pass(self, instance): + instance.data["families"].append("renderPass") + + layer_names = set(instance.data["layer_names"]) + layers_data = instance.context.data["layersData"] + + creator_attributes = instance.data["creator_attributes"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + instance.data["layers"] = [ + copy.deepcopy(layer) + for layer in layers_data + if layer["name"] in layer_names + ] + instance.data["ignoreLayersTransparency"] = ( + self.ignore_render_pass_transparency + ) + + render_layer_data = None + render_layer_id = creator_attributes["render_layer_instance_id"] + for in_data in instance.context.data["workfileInstances"]: + if ( + in_data["creator_identifier"] == "render.layer" + and in_data["instance_id"] == render_layer_id + ): + render_layer_data = in_data + break + + instance.data["renderLayerData"] = copy.deepcopy(render_layer_data) + # Invalid state + if render_layer_data is None: + return + render_layer_name = render_layer_data["variant"] + subset_name = instance.data["subset"] + instance.data["subset"] = subset_name.format( + **prepare_template_data({"renderlayer": render_layer_name}) + ) + + def _collect_data_for_render_scene(self, instance): + instance.data["families"].append("renderScene") + + creator_attributes = instance.data["creator_attributes"] + if creator_attributes["mark_for_review"]: + instance.data["families"].append("review") + + instance.data["layers"] = copy.deepcopy( + instance.context.data["layersData"] + ) + + render_pass_name = ( + instance.data["creator_attributes"]["render_pass_name"] + ) + subset_name = instance.data["subset"] + instance.data["subset"] = subset_name.format( + **prepare_template_data({"renderpass": render_pass_name}) + ) + + def _collect_data_for_review(self, instance): + instance.data["layers"] = copy.deepcopy( + instance.context.data["layersData"] + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py deleted file mode 100644 index 92a2815ba0..0000000000 --- a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py +++ /dev/null @@ -1,114 +0,0 @@ -import json -import copy -import pyblish.api - -from openpype.client import get_asset_by_name -from openpype.pipeline.create import get_subset_name - - -class CollectRenderScene(pyblish.api.ContextPlugin): - """Collect instance which renders whole scene in PNG. - - Creates instance with family 'renderScene' which will have all layers - to render which will be composite into one result. The instance is not - collected from scene. - - Scene will be rendered with all visible layers similar way like review is. - - Instance is disabled if there are any created instances of 'renderLayer' - or 'renderPass'. That is because it is expected that this instance is - used as lazy publish of TVPaint file. - - Subset name is created similar way like 'renderLayer' family. It can use - `renderPass` and `renderLayer` keys which can be set using settings and - `variant` is filled using `renderPass` value. - """ - label = "Collect Render Scene" - order = pyblish.api.CollectorOrder - 0.39 - hosts = ["tvpaint"] - - # Value of 'render_pass' in subset name template - render_pass = "beauty" - - # Settings attributes - enabled = False - # Value of 'render_layer' and 'variant' in subset name template - render_layer = "Main" - - def process(self, context): - # Check if there are created instances of renderPass and renderLayer - # - that will define if renderScene instance is enabled after - # collection - any_created_instance = False - for instance in context: - family = instance.data["family"] - if family in ("renderPass", "renderLayer"): - any_created_instance = True - break - - # Global instance data modifications - # Fill families - family = "renderScene" - # Add `review` family for thumbnail integration - families = [family, "review"] - - # Collect asset doc to get asset id - # - not sure if it's good idea to require asset id in - # get_subset_name? - workfile_context = context.data["workfile_context"] - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] - asset_name = workfile_context["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - - # Host name from environment variable - host_name = context.data["hostName"] - # Variant is using render pass name - variant = self.render_layer - dynamic_data = { - "renderlayer": self.render_layer, - "renderpass": self.render_pass, - } - # TODO remove - Backwards compatibility for old subset name templates - # - added 2022/04/28 - dynamic_data["render_layer"] = dynamic_data["renderlayer"] - dynamic_data["render_pass"] = dynamic_data["renderpass"] - - task_name = workfile_context["task"] - subset_name = get_subset_name( - "render", - variant, - task_name, - asset_doc, - project_name, - host_name, - dynamic_data=dynamic_data, - project_settings=context.data["project_settings"] - ) - - instance_data = { - "family": family, - "families": families, - "fps": context.data["sceneFps"], - "subset": subset_name, - "name": subset_name, - "label": "{} [{}-{}]".format( - subset_name, - context.data["sceneMarkIn"] + 1, - context.data["sceneMarkOut"] + 1 - ), - "active": not any_created_instance, - "publish": not any_created_instance, - "representations": [], - "layers": copy.deepcopy(context.data["layersData"]), - "asset": asset_name, - "task": task_name, - # Add render layer to instance data - "renderlayer": self.render_layer - } - - instance = context.create_instance(**instance_data) - - self.log.debug("Created instance: {}\n{}".format( - instance, json.dumps(instance.data, indent=4) - )) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py index 8c7c8c3899..a3449663f8 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py @@ -2,17 +2,15 @@ import os import json import pyblish.api -from openpype.client import get_asset_by_name -from openpype.pipeline import legacy_io -from openpype.pipeline.create import get_subset_name - -class CollectWorkfile(pyblish.api.ContextPlugin): +class CollectWorkfile(pyblish.api.InstancePlugin): label = "Collect Workfile" order = pyblish.api.CollectorOrder - 0.4 hosts = ["tvpaint"] + families = ["workfile"] - def process(self, context): + def process(self, instance): + context = instance.context current_file = context.data["currentFile"] self.log.info( @@ -21,49 +19,14 @@ class CollectWorkfile(pyblish.api.ContextPlugin): dirpath, filename = os.path.split(current_file) basename, ext = os.path.splitext(filename) - instance = context.create_instance(name=basename) - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] - - # Get subset name of workfile instance - # Collect asset doc to get asset id - # - not sure if it's good idea to require asset id in - # get_subset_name? - family = "workfile" - asset_name = context.data["workfile_context"]["asset"] - asset_doc = get_asset_by_name(project_name, asset_name) - - # Host name from environment variable - host_name = os.environ["AVALON_APP"] - # Use empty variant value - variant = "" - task_name = legacy_io.Session["AVALON_TASK"] - subset_name = get_subset_name( - family, - variant, - task_name, - asset_doc, - project_name, - host_name, - project_settings=context.data["project_settings"] - ) - - # Create Workfile instance - instance.data.update({ - "subset": subset_name, - "asset": context.data["asset"], - "label": subset_name, - "publish": True, - "family": "workfile", - "families": ["workfile"], - "representations": [{ - "name": ext.lstrip("."), - "ext": ext.lstrip("."), - "files": filename, - "stagingDir": dirpath - }] + instance.data["representations"].append({ + "name": ext.lstrip("."), + "ext": ext.lstrip("."), + "files": filename, + "stagingDir": dirpath }) + self.log.info("Collected workfile instance: {}".format( json.dumps(instance.data, indent=4) )) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index 8fe71a4a46..95a5cd77bd 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -65,9 +65,9 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect and store current context to have reference current_context = { - "project": legacy_io.Session["AVALON_PROJECT"], - "asset": legacy_io.Session["AVALON_ASSET"], - "task": legacy_io.Session["AVALON_TASK"] + "project_name": context.data["projectName"], + "asset_name": context.data["asset"], + "task_name": context.data["task"] } context.data["previous_context"] = current_context self.log.debug("Current context is: {}".format(current_context)) @@ -76,25 +76,31 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): self.log.info("Collecting workfile context") workfile_context = get_current_workfile_context() + if "project" in workfile_context: + workfile_context = { + "project_name": workfile_context.get("project"), + "asset_name": workfile_context.get("asset"), + "task_name": workfile_context.get("task"), + } # Store workfile context to pyblish context context.data["workfile_context"] = workfile_context if workfile_context: # Change current context with context from workfile key_map = ( - ("AVALON_ASSET", "asset"), - ("AVALON_TASK", "task") + ("AVALON_ASSET", "asset_name"), + ("AVALON_TASK", "task_name") ) for env_key, key in key_map: legacy_io.Session[env_key] = workfile_context[key] os.environ[env_key] = workfile_context[key] self.log.info("Context changed to: {}".format(workfile_context)) - asset_name = workfile_context["asset"] - task_name = workfile_context["task"] + asset_name = workfile_context["asset_name"] + task_name = workfile_context["task_name"] else: - asset_name = current_context["asset"] - task_name = current_context["task"] + asset_name = current_context["asset_name"] + task_name = current_context["task_name"] # Handle older workfiles or workfiles without metadata self.log.warning(( "Workfile does not contain information about context." @@ -103,6 +109,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Store context asset name context.data["asset"] = asset_name + context.data["task"] = task_name self.log.info( "Context is set to Asset: \"{}\" and Task: \"{}\"".format( asset_name, task_name diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 78074f720c..8a610cf388 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -6,6 +6,7 @@ from PIL import Image import pyblish.api +from openpype.pipeline.publish import KnownPublishError from openpype.hosts.tvpaint.api.lib import ( execute_george, execute_george_through_file, @@ -24,8 +25,7 @@ from openpype.hosts.tvpaint.lib import ( class ExtractSequence(pyblish.api.Extractor): label = "Extract Sequence" hosts = ["tvpaint"] - families = ["review", "renderPass", "renderLayer", "renderScene"] - families_to_review = ["review"] + families = ["review", "render"] # Modifiable with settings review_bg = [255, 255, 255, 255] @@ -59,6 +59,10 @@ class ExtractSequence(pyblish.api.Extractor): ) ) + ignore_layers_transparency = instance.data.get( + "ignoreLayersTransparency", False + ) + family_lowered = instance.data["family"].lower() mark_in = instance.context.data["sceneMarkIn"] mark_out = instance.context.data["sceneMarkOut"] @@ -114,7 +118,11 @@ class ExtractSequence(pyblish.api.Extractor): else: # Render output result = self.render( - output_dir, mark_in, mark_out, filtered_layers + output_dir, + mark_in, + mark_out, + filtered_layers, + ignore_layers_transparency ) output_filepaths_by_frame_idx, thumbnail_fullpath = result @@ -136,7 +144,7 @@ class ExtractSequence(pyblish.api.Extractor): # Fill tags and new families from project settings tags = [] - if family_lowered in self.families_to_review: + if "review" in instance.data["families"]: tags.append("review") # Sequence of one frame @@ -162,10 +170,6 @@ class ExtractSequence(pyblish.api.Extractor): instance.data["representations"].append(new_repre) - if family_lowered in ("renderpass", "renderlayer", "renderscene"): - # Change family to render - instance.data["family"] = "render" - if not thumbnail_fullpath: return @@ -259,7 +263,7 @@ class ExtractSequence(pyblish.api.Extractor): output_filepaths_by_frame_idx[frame_idx] = filepath if not os.path.exists(filepath): - raise AssertionError( + raise KnownPublishError( "Output was not rendered. File was not found {}".format( filepath ) @@ -278,7 +282,9 @@ class ExtractSequence(pyblish.api.Extractor): return output_filepaths_by_frame_idx, thumbnail_filepath - def render(self, output_dir, mark_in, mark_out, layers): + def render( + self, output_dir, mark_in, mark_out, layers, ignore_layer_opacity + ): """ Export images from TVPaint. Args: @@ -286,6 +292,7 @@ class ExtractSequence(pyblish.api.Extractor): mark_in (int): Starting frame index from which export will begin. mark_out (int): On which frame index export will end. layers (list): List of layers to be exported. + ignore_layer_opacity (bool): Layer's opacity will be ignored. Returns: tuple: With 2 items first is list of filenames second is path to @@ -327,7 +334,7 @@ class ExtractSequence(pyblish.api.Extractor): for layer_id, render_data in extraction_data_by_layer_id.items(): layer = layers_by_id[layer_id] filepaths_by_layer_id[layer_id] = self._render_layer( - render_data, layer, output_dir + render_data, layer, output_dir, ignore_layer_opacity ) # Prepare final filepaths where compositing should store result @@ -384,7 +391,9 @@ class ExtractSequence(pyblish.api.Extractor): red, green, blue = self.review_bg return (red, green, blue) - def _render_layer(self, render_data, layer, output_dir): + def _render_layer( + self, render_data, layer, output_dir, ignore_layer_opacity + ): frame_references = render_data["frame_references"] filenames_by_frame_index = render_data["filenames_by_frame_index"] @@ -393,6 +402,12 @@ class ExtractSequence(pyblish.api.Extractor): "tv_layerset {}".format(layer_id), "tv_SaveMode \"PNG\"" ] + # Set density to 100 and store previous opacity + if ignore_layer_opacity: + george_script_lines.extend([ + "tv_layerdensity 100", + "orig_opacity = result", + ]) filepaths_by_frame = {} frames_to_render = [] @@ -413,6 +428,10 @@ class ExtractSequence(pyblish.api.Extractor): # Store image to output george_script_lines.append("tv_saveimage \"{}\"".format(dst_path)) + # Set density back to origin opacity + if ignore_layer_opacity: + george_script_lines.append("tv_layerdensity orig_opacity") + self.log.debug("Rendering Exposure frames {} of layer {} ({})".format( ",".join(frames_to_render), layer_id, layer["name"] )) diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml index e7be735888..5832c74350 100644 --- a/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_layers_visibility.xml @@ -1,7 +1,7 @@ -Layers visiblity +Layers visibility ## All layers are not visible Layers visibility was changed during publishing which caused that all layers for subset "{instance_name}" are hidden. diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml new file mode 100644 index 0000000000..a95387356f --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_render_layer_group.xml @@ -0,0 +1,18 @@ + + + +Overused Color group +## One Color group is used by multiple Render Layers + +Single color group used by multiple Render Layers would cause clashes of rendered TVPaint layers. The same layers would be used for output files of both groups. + +### Missing layer names + +{groups_information} + +### How to repair? + +Refresh, go to 'Publish' tab and go through Render Layers and change their groups to not clash each other. If you reach limit of TVPaint color groups there is nothing you can do about it to fix the issue. + + + diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml index 7397f6ef0b..0fc03c2948 100644 --- a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_metadata.xml @@ -11,7 +11,7 @@ Your scene does not contain metadata about {missing_metadata}. Resave the scene using Workfiles tool or hit the "Repair" button on the right. -### How this could happend? +### How this could happen? You're using scene file that was not created using Workfiles tool. diff --git a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml index c4ffafc8b5..bb57e93bf2 100644 --- a/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml +++ b/openpype/hosts/tvpaint/plugins/publish/help/validate_workfile_project_name.xml @@ -13,7 +13,7 @@ If the workfile belongs to project "{env_project_name}" then use Workfiles tool Otherwise close TVPaint and launch it again from project you want to publish in. -### How this could happend? +### How this could happen? You've opened workfile from different project. You've opened TVPaint on a task from "{env_project_name}" then you've opened TVPaint again on task from "{workfile_project_name}" without closing the TVPaint. Because TVPaint can run only once the project didn't change. diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py index 7e35726030..9347960d3f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -1,5 +1,8 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.pipeline import ( list_instances, write_instances, @@ -31,8 +34,11 @@ class FixAssetNames(pyblish.api.Action): write_instances(new_instance_items) -class ValidateAssetNames(pyblish.api.ContextPlugin): - """Validate assset name present on instance. +class ValidateAssetName( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): + """Validate asset name present on instance. Asset name on instance should be the same as context's. """ @@ -43,6 +49,8 @@ class ValidateAssetNames(pyblish.api.ContextPlugin): actions = [FixAssetNames] def process(self, context): + if not self.is_active(context.data): + return context_asset_name = context.data["asset"] for instance in context: asset_name = instance.data.get("asset") diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py index 9f61bdbcd0..722d76b4d2 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_duplicated_layer_names.py @@ -20,6 +20,9 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin): duplicated_layer_names = [] for layer_name in layer_names: layers = layers_by_name.get(layer_name) + # It is not job of this validator to handle missing layers + if layers is None: + continue if len(layers) > 1: duplicated_layer_names.append(layer_name) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index d3a04cc69f..8e52a636f4 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -8,11 +8,16 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): label = "Validate Layers Visibility" order = pyblish.api.ValidatorOrder - families = ["review", "renderPass", "renderLayer", "renderScene"] + families = ["review", "render"] def process(self, instance): + layers = instance.data.get("layers") + # Instance have empty layers + # - it is not job of this validator to check that + if not layers: + return layer_names = set() - for layer in instance.data["layers"]: + for layer in layers: layer_names.add(layer["name"]) if layer["visible"]: return diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index 0030b0fd1c..7b2cc62bb5 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -1,7 +1,10 @@ import json import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.lib import execute_george @@ -23,7 +26,10 @@ class ValidateMarksRepair(pyblish.api.Action): ) -class ValidateMarks(pyblish.api.ContextPlugin): +class ValidateMarks( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate mark in and out are enabled and it's duration. Mark In/Out does not have to match frameStart and frameEnd but duration is @@ -59,6 +65,9 @@ class ValidateMarks(pyblish.api.ContextPlugin): } def process(self, context): + if not self.is_active(context.data): + return + current_data = { "markIn": context.data["sceneMarkIn"], "markInState": context.data["sceneMarkInState"], diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py b/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py new file mode 100644 index 0000000000..bb0a9a4ffe --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/validate_render_layer_group.py @@ -0,0 +1,74 @@ +import collections +import pyblish.api +from openpype.pipeline import PublishXmlValidationError + + +class ValidateRenderLayerGroups(pyblish.api.ContextPlugin): + """Validate group ids of renderLayer subsets. + + Validate that there are not 2 render layers using the same group. + """ + + label = "Validate Render Layers Group" + order = pyblish.api.ValidatorOrder + 0.1 + + def process(self, context): + # Prepare layers + render_layers_by_group_id = collections.defaultdict(list) + for instance in context: + families = instance.data.get("families") + if not families or "renderLayer" not in families: + continue + + group_id = instance.data["creator_attributes"]["group_id"] + render_layers_by_group_id[group_id].append(instance) + + duplicated_instances = [] + for group_id, instances in render_layers_by_group_id.items(): + if len(instances) > 1: + duplicated_instances.append((group_id, instances)) + + if not duplicated_instances: + return + + # Exception message preparations + groups_data = context.data["groupsData"] + groups_by_id = { + group["group_id"]: group + for group in groups_data + } + + per_group_msgs = [] + groups_information_lines = [] + for group_id, instances in duplicated_instances: + group = groups_by_id[group_id] + group_label = "Group \"{}\" ({})".format( + group["name"], + group["group_id"], + ) + line_join_subset_names = "\n".join([ + f" - {instance['subset']}" + for instance in instances + ]) + joined_subset_names = ", ".join([ + f"\"{instance['subset']}\"" + for instance in instances + ]) + per_group_msgs.append( + "{} < {} >".format(group_label, joined_subset_names) + ) + groups_information_lines.append( + "{}\n{}".format(group_label, line_join_subset_names) + ) + + # Raise an error + raise PublishXmlValidationError( + self, + ( + "More than one Render Layer is using the same TVPaint" + " group color. {}" + ).format(" | ".join(per_group_msgs)), + formatting_data={ + "groups_information": "\n".join(groups_information_lines) + } + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py index 0fbfca6c56..2a3173c698 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_render_pass_group.py @@ -85,6 +85,5 @@ class ValidateLayersGroup(pyblish.api.InstancePlugin): ), "expected_group": correct_group["name"], "layer_names": ", ".join(invalid_layer_names) - } ) diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py index d235215ac9..0ab8e811f5 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_scene_settings.py @@ -1,11 +1,17 @@ import json import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) # TODO @iLliCiTiT add fix action for fps -class ValidateProjectSettings(pyblish.api.ContextPlugin): +class ValidateProjectSettings( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate scene settings against database.""" label = "Validate Scene Settings" @@ -13,6 +19,9 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin): optional = True def process(self, context): + if not self.is_active(context.data): + return + expected_data = context.data["assetEntity"]["data"] scene_data = { "fps": context.data.get("sceneFps"), @@ -42,7 +51,7 @@ class ValidateProjectSettings(pyblish.api.ContextPlugin): "expected_width": expected_data["resolutionWidth"], "expected_height": expected_data["resolutionHeight"], "current_width": scene_data["resolutionWidth"], - "current_height": scene_data["resolutionWidth"], + "current_height": scene_data["resolutionHeight"], "expected_pixel_ratio": expected_data["pixelAspect"], "current_pixel_ratio": scene_data["pixelAspect"] } diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py index 066e54c670..229ccfcd18 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -1,5 +1,8 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) from openpype.hosts.tvpaint.api.lib import execute_george @@ -14,7 +17,10 @@ class RepairStartFrame(pyblish.api.Action): execute_george("tv_startframe 0") -class ValidateStartFrame(pyblish.api.ContextPlugin): +class ValidateStartFrame( + OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin +): """Validate start frame being at frame 0.""" label = "Validate Start Frame" @@ -24,6 +30,9 @@ class ValidateStartFrame(pyblish.api.ContextPlugin): optional = True def process(self, context): + if not self.is_active(context.data): + return + start_frame = execute_george("tv_startframe") if start_frame == 0: return diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py index d66ae50c60..b38231e208 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py @@ -1,5 +1,9 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError, registered_host +from openpype.pipeline import ( + PublishXmlValidationError, + PublishValidationError, + registered_host, +) class ValidateWorkfileMetadataRepair(pyblish.api.Action): @@ -27,13 +31,18 @@ class ValidateWorkfileMetadata(pyblish.api.ContextPlugin): actions = [ValidateWorkfileMetadataRepair] - required_keys = {"project", "asset", "task"} + required_keys = {"project_name", "asset_name", "task_name"} def process(self, context): workfile_context = context.data["workfile_context"] if not workfile_context: - raise AssertionError( - "Current workfile is missing whole metadata about context." + raise PublishValidationError( + "Current workfile is missing whole metadata about context.", + "Missing context", + ( + "Current workfile is missing metadata about task." + " To fix this issue save the file using Workfiles tool." + ) ) missing_keys = [] diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py index 0f25f2f7be..2ed5afa11c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_project_name.py @@ -1,4 +1,3 @@ -import os import pyblish.api from openpype.pipeline import PublishXmlValidationError @@ -16,15 +15,15 @@ class ValidateWorkfileProjectName(pyblish.api.ContextPlugin): def process(self, context): workfile_context = context.data.get("workfile_context") # If workfile context is missing than project is matching to - # `AVALON_PROJECT` value for 100% + # global project if not workfile_context: self.log.info( "Workfile context (\"workfile_context\") is not filled." ) return - workfile_project_name = workfile_context["project"] - env_project_name = os.environ["AVALON_PROJECT"] + workfile_project_name = workfile_context["project_name"] + env_project_name = context.data["projectName"] if workfile_project_name == env_project_name: self.log.info(( "Both workfile project and environment project are same. {}" diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp index bb67715cbd..88106bc770 100644 --- a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp +++ b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp @@ -302,8 +302,9 @@ private: std::string websocket_url; // Should be avalon plugin available? // - this may change during processing if websocketet url is not set or server is down - bool use_avalon; + bool server_available; public: + Communicator(std::string url); Communicator(); websocket_endpoint endpoint; bool is_connected(); @@ -314,43 +315,45 @@ public: void call_notification(std::string method_name, nlohmann::json params); }; -Communicator::Communicator() { + +Communicator::Communicator(std::string url) { // URL to websocket server - websocket_url = std::getenv("WEBSOCKET_URL"); + websocket_url = url; // Should be avalon plugin available? // - this may change during processing if websocketet url is not set or server is down - if (websocket_url == "") { - use_avalon = false; + if (url == "") { + server_available = false; } else { - use_avalon = true; + server_available = true; } } + bool Communicator::is_connected(){ return endpoint.connected(); } bool Communicator::is_usable(){ - return use_avalon; + return server_available; } void Communicator::connect() { - if (!use_avalon) { + if (!server_available) { return; } int con_result; con_result = endpoint.connect(websocket_url); if (con_result == -1) { - use_avalon = false; + server_available = false; } else { - use_avalon = true; + server_available = true; } } void Communicator::call_notification(std::string method_name, nlohmann::json params) { - if (!use_avalon || !is_connected()) {return;} + if (!server_available || !is_connected()) {return;} jsonrpcpp::Notification notification = {method_name, params}; endpoint.send_notification(¬ification); @@ -358,7 +361,7 @@ void Communicator::call_notification(std::string method_name, nlohmann::json par jsonrpcpp::Response Communicator::call_method(std::string method_name, nlohmann::json params) { jsonrpcpp::Response response; - if (!use_avalon || !is_connected()) + if (!server_available || !is_connected()) { return response; } @@ -382,7 +385,7 @@ jsonrpcpp::Response Communicator::call_method(std::string method_name, nlohmann: } void Communicator::process_requests() { - if (!use_avalon || !is_connected() || Data.messages.empty()) {return;} + if (!server_available || !is_connected() || Data.messages.empty()) {return;} std::string msg = Data.messages.front(); Data.messages.pop(); @@ -458,7 +461,7 @@ void register_callbacks(){ parser.register_request_callback("execute_george", execute_george); } -Communicator communication; +Communicator* communication = nullptr; //////////////////////////////////////////////////////////////////////////////////////// @@ -484,7 +487,7 @@ static char* GetLocalString( PIFilter* iFilter, int iNum, char* iDefault ) // in the localized file (or the localized file doesn't exist). std::string label_from_evn() { - std::string _plugin_label = "Avalon"; + std::string _plugin_label = "OpenPype"; if (std::getenv("AVALON_LABEL") && std::getenv("AVALON_LABEL") != "") { _plugin_label = std::getenv("AVALON_LABEL"); @@ -540,9 +543,12 @@ int FAR PASCAL PI_Open( PIFilter* iFilter ) { PI_Parameters( iFilter, NULL ); // NULL as iArg means "open the requester" } - - communication.connect(); - register_callbacks(); + char *env_value = std::getenv("WEBSOCKET_URL"); + if (env_value != NULL) { + communication = new Communicator(env_value); + communication->connect(); + register_callbacks(); + } return 1; // OK } @@ -560,7 +566,10 @@ void FAR PASCAL PI_Close( PIFilter* iFilter ) { TVCloseReq( iFilter, Data.mReq ); } - communication.endpoint.close_connection(); + if (communication != nullptr) { + communication->endpoint.close_connection(); + delete communication; + } } @@ -709,7 +718,7 @@ int FAR PASCAL PI_Msg( PIFilter* iFilter, INTPTR iEvent, INTPTR iReq, INTPTR* iA if (Data.menuItemsById.contains(button_up_item_id_str)) { std::string callback_name = Data.menuItemsById[button_up_item_id_str].get(); - communication.call_method(callback_name, nlohmann::json::array()); + communication->call_method(callback_name, nlohmann::json::array()); } TVExecute( iFilter ); break; @@ -737,7 +746,9 @@ int FAR PASCAL PI_Msg( PIFilter* iFilter, INTPTR iEvent, INTPTR iReq, INTPTR* iA { newMenuItemsProcess(iFilter); } - communication.process_requests(); + if (communication != nullptr) { + communication->process_requests(); + } } return 1; diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll index f7f5119ef3..7081778bee 100644 Binary files a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll and b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll differ diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll index f35e3ffe86..0f2afec245 100644 Binary files a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll and b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll differ diff --git a/openpype/hosts/unreal/README.md b/openpype/hosts/unreal/README.md index 0a69b9e0cf..d131105659 100644 --- a/openpype/hosts/unreal/README.md +++ b/openpype/hosts/unreal/README.md @@ -4,6 +4,6 @@ Supported Unreal Engine version is 4.26+ (mainly because of major Python changes ### Project naming Unreal doesn't support project names starting with non-alphabetic character. So names like `123_myProject` are -invalid. If OpenPype detects such name it automatically prepends letter **P** to make it valid name, so `123_myProject` +invalid. If Ayon detects such name it automatically prepends letter **P** to make it valid name, so `123_myProject` will become `P123_myProject`. There is also soft-limit on project name length to be shorter than 20 characters. -Longer names will issue warning in Unreal Editor that there might be possible side effects. \ No newline at end of file +Longer names will issue warning in Unreal Editor that there might be possible side effects. diff --git a/openpype/hosts/unreal/addon.py b/openpype/hosts/unreal/addon.py index e2c8484651..ed23950b35 100644 --- a/openpype/hosts/unreal/addon.py +++ b/openpype/hosts/unreal/addon.py @@ -1,5 +1,7 @@ import os -from openpype.modules import OpenPypeModule, IHostAddon +import re +from openpype.modules import IHostAddon, OpenPypeModule +from openpype.widgets.message_window import Window UNREAL_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -13,14 +15,41 @@ class UnrealAddon(OpenPypeModule, IHostAddon): def add_implementation_envs(self, env, app): """Modify environments to contain all required for implementation.""" - # Set OPENPYPE_UNREAL_PLUGIN required for Unreal implementation + # Set AYON_UNREAL_PLUGIN required for Unreal implementation + # Imports are in this method for Python 2 compatiblity of an addon + from pathlib import Path - ue_plugin = "UE_5.0" if app.name[:1] == "5" else "UE_4.7" + from .lib import get_compatible_integration + + pattern = re.compile(r'^\d+-\d+$') + + if not pattern.match(app.name): + msg = ( + "Unreal application key in the settings must be in format" + "'5-0' or '5-1'" + ) + Window( + parent=None, + title="Unreal application name format", + message=msg, + level="critical") + raise ValueError(msg) + + ue_version = app.name.replace("-", ".") unreal_plugin_path = os.path.join( - UNREAL_ROOT_DIR, "integration", ue_plugin + UNREAL_ROOT_DIR, "integration", "UE_{}".format(ue_version), "Ayon" ) - if not env.get("OPENPYPE_UNREAL_PLUGIN"): - env["OPENPYPE_UNREAL_PLUGIN"] = unreal_plugin_path + if not Path(unreal_plugin_path).exists(): + compatible_versions = get_compatible_integration( + ue_version, Path(UNREAL_ROOT_DIR) / "integration" + ) + if compatible_versions: + unreal_plugin_path = compatible_versions[-1] / "Ayon" + unreal_plugin_path = unreal_plugin_path.as_posix() + + if not env.get("AYON_UNREAL_PLUGIN") or \ + env.get("AYON_UNREAL_PLUGIN") != unreal_plugin_path: + env["AYON_UNREAL_PLUGIN"] = unreal_plugin_path # Set default environments if are not set via settings defaults = { diff --git a/openpype/hosts/unreal/api/__init__.py b/openpype/hosts/unreal/api/__init__.py index ca9db259e6..ac6a91eae9 100644 --- a/openpype/hosts/unreal/api/__init__.py +++ b/openpype/hosts/unreal/api/__init__.py @@ -1,7 +1,11 @@ # -*- coding: utf-8 -*- -"""Unreal Editor OpenPype host API.""" +"""Unreal Editor Ayon host API.""" -from .plugin import Loader +from .plugin import ( + UnrealActorCreator, + UnrealAssetCreator, + Loader +) from .pipeline import ( install, @@ -18,6 +22,8 @@ from .pipeline import ( show_tools_popup, instantiate, UnrealHost, + set_sequence_hierarchy, + generate_sequence, maintained_selection ) @@ -37,5 +43,7 @@ __all__ = [ "show_tools_popup", "instantiate", "UnrealHost", + "set_sequence_hierarchy", + "generate_sequence", "maintained_selection" ] diff --git a/openpype/hosts/unreal/api/helpers.py b/openpype/hosts/unreal/api/helpers.py index 0b6f07f52f..e9ab3fb4c5 100644 --- a/openpype/hosts/unreal/api/helpers.py +++ b/openpype/hosts/unreal/api/helpers.py @@ -2,15 +2,15 @@ import unreal # noqa -class OpenPypeUnrealException(Exception): +class AyonUnrealException(Exception): pass @unreal.uclass() -class OpenPypeHelpers(unreal.OpenPypeLib): - """Class wrapping some useful functions for OpenPype. +class AyonHelpers(unreal.AyonLib): + """Class wrapping some useful functions for Ayon. - This class is extending native BP class in OpenPype Integration Plugin. + This class is extending native BP class in Ayon Integration Plugin. """ @@ -29,13 +29,13 @@ class OpenPypeHelpers(unreal.OpenPypeLib): Example: - OpenPypeHelpers().set_folder_color( + AyonHelpers().set_folder_color( "/Game/Path", unreal.LinearColor(a=1.0, r=1.0, g=0.5, b=0) ) Note: This will take effect only after Editor is restarted. I couldn't - find a way to refresh it. Also this saves the color definition + find a way to refresh it. Also, this saves the color definition into the project config, binding this path with color. So if you delete this path and later re-create, it will set this color again. diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index 2081c8fd13..72816c9b81 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -1,30 +1,36 @@ # -*- coding: utf-8 -*- import os +import json import logging from typing import List from contextlib import contextmanager import semver +import time import pyblish.api +from openpype.client import get_asset_by_name, get_assets from openpype.pipeline import ( register_loader_plugin_path, register_creator_plugin_path, deregister_loader_plugin_path, deregister_creator_plugin_path, - AVALON_CONTAINER_ID, + AYON_CONTAINER_ID, + legacy_io, ) from openpype.tools.utils import host_tools import openpype.hosts.unreal -from openpype.host import HostBase, ILoadHost +from openpype.host import HostBase, ILoadHost, IPublishHost import unreal # noqa - +# Rename to Ayon once parent module renames logger = logging.getLogger("openpype.hosts.unreal") -OPENPYPE_CONTAINERS = "OpenPypeContainers" + +AYON_CONTAINERS = "AyonContainers" +CONTEXT_CONTAINER = "Ayon/context.json" UNREAL_VERSION = semver.VersionInfo( - *os.getenv("OPENPYPE_UNREAL_VERSION").split(".") + *os.getenv("AYON_UNREAL_VERSION").split(".") ) HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.unreal.__file__)) @@ -35,7 +41,7 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") -class UnrealHost(HostBase, ILoadHost): +class UnrealHost(HostBase, ILoadHost, IPublishHost): """Unreal host implementation. For some time this class will re-use functions from module based @@ -50,35 +56,73 @@ class UnrealHost(HostBase, ILoadHost): def get_containers(self): return ls() - def show_tools_popup(self): + @staticmethod + def show_tools_popup(): """Show tools popup with actions leading to show other tools.""" - show_tools_popup() - def show_tools_dialog(self): + @staticmethod + def show_tools_dialog(): """Show tools dialog with actions leading to show other tools.""" - show_tools_dialog() + def update_context_data(self, data, changes): + content_path = unreal.Paths.project_content_dir() + op_ctx = content_path + CONTEXT_CONTAINER + attempts = 3 + for i in range(attempts): + try: + with open(op_ctx, "w+") as f: + json.dump(data, f) + break + except IOError as e: + if i == attempts - 1: + raise Exception( + "Failed to write context data. Aborting.") from e + unreal.log_warning("Failed to write context data. Retrying...") + i += 1 + time.sleep(3) + continue + + def get_context_data(self): + content_path = unreal.Paths.project_content_dir() + op_ctx = content_path + CONTEXT_CONTAINER + if not os.path.isfile(op_ctx): + return {} + with open(op_ctx, "r") as fp: + data = json.load(fp) + return data + def install(): """Install Unreal configuration for OpenPype.""" print("-=" * 40) logo = '''. . - ____________ - / \\ __ \\ - \\ \\ \\/_\\ \\ - \\ \\ _____/ ______ - \\ \\ \\___// \\ \\ - \\ \\____\\ \\ \\_____\\ - \\/_____/ \\/______/ PYPE Club . + ยท + โ”‚ + ยทโˆ™/ + ยท-โˆ™โ€ขโˆ™-ยท + / \\ /โˆ™ยท / \\ + โˆ™ \\ โ”‚ / โˆ™ + \\ \\ ยท / / + \\\\ โˆ™ โˆ™ // + \\\\/ \\// + ___ + โ”‚ โ”‚ + โ”‚ โ”‚ + โ”‚ โ”‚ + โ”‚___โ”‚ + -ยท + + ยท-โ”€โ•โ”€-โˆ™ A Y O N โˆ™-โ”€โ•โ”€-ยท + by YNPUT . ''' print(logo) - print("installing OpenPype for Unreal ...") + print("installing Ayon for Unreal ...") print("-=" * 40) - logger.info("installing OpenPype for Unreal") + logger.info("installing Ayon for Unreal") pyblish.api.register_host("unreal") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) register_loader_plugin_path(str(LOAD_PATH)) @@ -88,7 +132,7 @@ def install(): def uninstall(): - """Uninstall Unreal configuration for Avalon.""" + """Uninstall Unreal configuration for Ayon.""" pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) deregister_loader_plugin_path(str(LOAD_PATH)) deregister_creator_plugin_path(str(CREATE_PATH)) @@ -96,14 +140,14 @@ def uninstall(): def _register_callbacks(): """ - TODO: Implement callbacks if supported by UE4 + TODO: Implement callbacks if supported by UE """ pass def _register_events(): """ - TODO: Implement callbacks if supported by UE4 + TODO: Implement callbacks if supported by UE """ pass @@ -117,24 +161,45 @@ def ls(): """ ar = unreal.AssetRegistryHelpers.get_asset_registry() # UE 5.1 changed how class name is specified - class_name = ["/Script/OpenPype", "AssetContainer"] if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor > 0 else "AssetContainer" # noqa - openpype_containers = ar.get_assets_by_class(class_name, True) + class_name = ["/Script/Ayon", "AyonAssetContainer"] if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor > 0 else "AyonAssetContainer" # noqa + ayon_containers = ar.get_assets_by_class(class_name, True) # get_asset_by_class returns AssetData. To get all metadata we need to # load asset. get_tag_values() work only on metadata registered in # Asset Registry Project settings (and there is no way to set it with # python short of editing ini configuration file). - for asset_data in openpype_containers: + for asset_data in ayon_containers: asset = asset_data.get_asset() data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) data["objectName"] = asset_data.asset_name - data = cast_map_to_str_dict(data) + yield cast_map_to_str_dict(data) - yield data + +def ls_inst(): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + # UE 5.1 changed how class name is specified + class_name = [ + "/Script/Ayon", + "AyonPublishInstance" + ] if ( + UNREAL_VERSION.major == 5 + and UNREAL_VERSION.minor > 0 + ) else "AyonPublishInstance" # noqa + instances = ar.get_assets_by_class(class_name, True) + + # get_asset_by_class returns AssetData. To get all metadata we need to + # load asset. get_tag_values() work only on metadata registered in + # Asset Registry Project settings (and there is no way to set it with + # python short of editing ini configuration file). + for asset_data in instances: + asset = asset_data.get_asset() + data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) + data["objectName"] = asset_data.asset_name + yield cast_map_to_str_dict(data) def parse_container(container): - """To get data from container, AssetContainer must be loaded. + """To get data from container, AyonAssetContainer must be loaded. Args: container(str): path to container @@ -163,7 +228,7 @@ def containerise(name, namespace, nodes, context, loader=None, suffix="_CON"): Unreal doesn't support *groups* of assets that you can add metadata to. But it does support folders that helps to organize asset. Unfortunately those folders are just that - you cannot add any additional information - to them. OpenPype Integration Plugin is providing way out - Implementing + to them. Ayon Integration Plugin is providing way out - Implementing `AssetContainer` Blueprint class. This class when added to folder can handle metadata on it using standard :func:`unreal.EditorAssetLibrary.set_metadata_tag()` and @@ -172,30 +237,30 @@ def containerise(name, namespace, nodes, context, loader=None, suffix="_CON"): those assets is available as `assets` property. This is list of strings starting with asset type and ending with its path: - `Material /Game/OpenPype/Test/TestMaterial.TestMaterial` + `Material /Game/Ayon/Test/TestMaterial.TestMaterial` """ # 1 - create directory for container root = "/Game" - container_name = "{}{}".format(name, suffix) + container_name = f"{name}{suffix}" new_name = move_assets_to_path(root, container_name, nodes) # 2 - create Asset Container there - path = "{}/{}".format(root, new_name) + path = f"{root}/{new_name}" create_container(container=container_name, path=path) namespace = path data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "name": new_name, "namespace": namespace, "loader": str(loader), "representation": context["representation"]["_id"], } # 3 - imprint data - imprint("{}/{}".format(path, container_name), data) + imprint(f"{path}/{container_name}", data) return path @@ -203,7 +268,7 @@ def instantiate(root, name, data, assets=None, suffix="_INS"): """Bundles *nodes* into *container*. Marking it with metadata as publishable instance. If assets are provided, - they are moved to new path where `OpenPypePublishInstance` class asset is + they are moved to new path where `AyonPublishInstance` class asset is created and imprinted with metadata. This can then be collected for publishing by Pyblish for example. @@ -217,7 +282,7 @@ def instantiate(root, name, data, assets=None, suffix="_INS"): suffix (str): suffix string to append to instance name """ - container_name = "{}{}".format(name, suffix) + container_name = f"{name}{suffix}" # if we specify assets, create new folder and move them there. If not, # just create empty folder @@ -226,10 +291,10 @@ def instantiate(root, name, data, assets=None, suffix="_INS"): else: new_name = create_folder(root, name) - path = "{}/{}".format(root, new_name) + path = f"{root}/{new_name}" create_publish_instance(instance=container_name, path=path) - imprint("{}/{}".format(path, container_name), data) + imprint(f"{path}/{container_name}", data) def imprint(node, data): @@ -245,14 +310,14 @@ def imprint(node, data): loaded_asset, key, str(value) ) - with unreal.ScopedEditorTransaction("OpenPype containerising"): + with unreal.ScopedEditorTransaction("Ayon containerising"): unreal.EditorAssetLibrary.save_asset(node) def show_tools_popup(): """Show popup with tools. - Popup will disappear on click or loosing focus. + Popup will disappear on click or losing focus. """ from openpype.hosts.unreal.api import tools_ui @@ -312,11 +377,11 @@ def create_folder(root: str, name: str) -> str: eal = unreal.EditorAssetLibrary index = 1 while True: - if eal.does_directory_exist("{}/{}".format(root, name)): - name = "{}{}".format(name, index) + if eal.does_directory_exist(f"{root}/{name}"): + name = f"{name}{index}" index += 1 else: - eal.make_directory("{}/{}".format(root, name)) + eal.make_directory(f"{root}/{name}") break return name @@ -349,9 +414,7 @@ def move_assets_to_path(root: str, name: str, assets: List[str]) -> str: unreal.log(assets) for asset in assets: loaded = eal.load_asset(asset) - eal.rename_asset( - asset, "{}/{}/{}".format(root, name, loaded.get_name()) - ) + eal.rename_asset(asset, f"{root}/{name}/{loaded.get_name()}") return name @@ -378,17 +441,16 @@ def create_container(container: str, path: str) -> unreal.Object: ) """ - factory = unreal.AssetContainerFactory() + factory = unreal.AyonAssetContainerFactory() tools = unreal.AssetToolsHelpers().get_asset_tools() - asset = tools.create_asset(container, path, None, factory) - return asset + return tools.create_asset(container, path, None, factory) def create_publish_instance(instance: str, path: str) -> unreal.Object: - """Helper function to create OpenPype Publish Instance on given path. + """Helper function to create Ayon Publish Instance on given path. - This behaves similarly as :func:`create_openpype_container`. + This behaves similarly as :func:`create_ayon_container`. Args: path (str): Path where to create Publish Instance. @@ -406,10 +468,9 @@ def create_publish_instance(instance: str, path: str) -> unreal.Object: ) """ - factory = unreal.OpenPypePublishInstanceFactory() + factory = unreal.AyonPublishInstanceFactory() tools = unreal.AssetToolsHelpers().get_asset_tools() - asset = tools.create_asset(instance, path, None, factory) - return asset + return tools.create_asset(instance, path, None, factory) def cast_map_to_str_dict(umap) -> dict: @@ -440,16 +501,154 @@ def get_subsequences(sequence: unreal.LevelSequence): """ tracks = sequence.get_master_tracks() - subscene_track = None - for t in tracks: - if t.get_class() == unreal.MovieSceneSubTrack.static_class(): - subscene_track = t - break + subscene_track = next( + ( + t + for t in tracks + if t.get_class() == unreal.MovieSceneSubTrack.static_class() + ), + None, + ) if subscene_track is not None and subscene_track.get_sections(): return subscene_track.get_sections() return [] +def set_sequence_hierarchy( + seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths +): + # Get existing sequencer tracks or create them if they don't exist + tracks = seq_i.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + unreal.MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if not subscene_track: + subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack) + if not visibility_track: + visibility_track = seq_i.add_master_track( + unreal.MovieSceneLevelVisibilityTrack) + + # Create the sub-scene section + subscenes = subscene_track.get_sections() + subscene = None + for s in subscenes: + if s.get_editor_property('sub_sequence') == seq_j: + subscene = s + break + if not subscene: + subscene = subscene_track.add_section() + subscene.set_row_index(len(subscene_track.get_sections())) + subscene.set_editor_property('sub_sequence', seq_j) + subscene.set_range( + min_frame_j, + max_frame_j + 1) + + # Create the visibility section + ar = unreal.AssetRegistryHelpers.get_asset_registry() + maps = [] + for m in map_paths: + # Unreal requires to load the level to get the map name + unreal.EditorLevelLibrary.save_all_dirty_levels() + unreal.EditorLevelLibrary.load_level(m) + maps.append(str(ar.get_asset_by_object_path(m).asset_name)) + + vis_section = visibility_track.add_section() + index = len(visibility_track.get_sections()) + + vis_section.set_range( + min_frame_j, + max_frame_j + 1) + vis_section.set_visibility(unreal.LevelVisibility.VISIBLE) + vis_section.set_row_index(index) + vis_section.set_level_names(maps) + + if min_frame_j > 1: + hid_section = visibility_track.add_section() + hid_section.set_range( + 1, + min_frame_j) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + if max_frame_j < max_frame_i: + hid_section = visibility_track.add_section() + hid_section.set_range( + max_frame_j + 1, + max_frame_i + 1) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + + +def generate_sequence(h, h_dir): + tools = unreal.AssetToolsHelpers().get_asset_tools() + + sequence = tools.create_asset( + asset_name=h, + package_path=h_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + project_name = legacy_io.active_project() + asset_data = get_asset_by_name( + project_name, + h_dir.split('/')[-1], + fields=["_id", "data.fps"] + ) + + start_frames = [] + end_frames = [] + + elements = list(get_assets( + project_name, + parent_ids=[asset_data["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + for e in elements: + start_frames.append(e.get('data').get('clipIn')) + end_frames.append(e.get('data').get('clipOut')) + + elements.extend(get_assets( + project_name, + parent_ids=[e["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + + min_frame = min(start_frames) + max_frame = max(end_frames) + + fps = asset_data.get('data').get("fps") + + sequence.set_display_rate( + unreal.FrameRate(fps, 1.0)) + sequence.set_playback_start(min_frame) + sequence.set_playback_end(max_frame) + + sequence.set_work_range_start(min_frame / fps) + sequence.set_work_range_end(max_frame / fps) + sequence.set_view_range_start(min_frame / fps) + sequence.set_view_range_end(max_frame / fps) + + tracks = sequence.get_master_tracks() + track = None + for t in tracks: + if (t.get_class() == + unreal.MovieSceneCameraCutTrack.static_class()): + track = t + break + if not track: + track = sequence.add_master_track( + unreal.MovieSceneCameraCutTrack) + + return sequence, (min_frame, max_frame) + + @contextmanager def maintained_selection(): """Stub to be either implemented or replaced. diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py index 6fc00cb71c..26ef69af86 100644 --- a/openpype/hosts/unreal/api/plugin.py +++ b/openpype/hosts/unreal/api/plugin.py @@ -1,9 +1,247 @@ # -*- coding: utf-8 -*- -from abc import ABC +import ast +import collections +import sys +import six +from abc import ( + ABC, + ABCMeta, +) -from openpype.pipeline import LoaderPlugin +import unreal + +from .pipeline import ( + create_publish_instance, + imprint, + ls_inst, + UNREAL_VERSION +) +from openpype.lib import ( + BoolDef, + UILabelDef +) +from openpype.pipeline import ( + Creator, + LoaderPlugin, + CreatorError, + CreatedInstance +) + + +@six.add_metaclass(ABCMeta) +class UnrealBaseCreator(Creator): + """Base class for Unreal creator plugins.""" + root = "/Game/Ayon/AyonPublishInstances" + suffix = "_INS" + + @staticmethod + def cache_subsets(shared_data): + """Cache instances for Creators to shared data. + + Create `unreal_cached_subsets` key when needed in shared data and + fill it with all collected instances from the scene under its + respective creator identifiers. + + If legacy instances are detected in the scene, create + `unreal_cached_legacy_subsets` there and fill it with + all legacy subsets under family as a key. + + Args: + Dict[str, Any]: Shared data. + + Return: + Dict[str, Any]: Shared data dictionary. + + """ + if shared_data.get("unreal_cached_subsets") is None: + unreal_cached_subsets = collections.defaultdict(list) + unreal_cached_legacy_subsets = collections.defaultdict(list) + for instance in ls_inst(): + creator_id = instance.get("creator_identifier") + if creator_id: + unreal_cached_subsets[creator_id].append(instance) + else: + family = instance.get("family") + unreal_cached_legacy_subsets[family].append(instance) + + shared_data["unreal_cached_subsets"] = unreal_cached_subsets + shared_data["unreal_cached_legacy_subsets"] = ( + unreal_cached_legacy_subsets + ) + return shared_data + + def create(self, subset_name, instance_data, pre_create_data): + try: + instance_name = f"{subset_name}{self.suffix}" + pub_instance = create_publish_instance(instance_name, self.root) + + instance_data["subset"] = subset_name + instance_data["instance_path"] = f"{self.root}/{instance_name}" + + instance = CreatedInstance( + self.family, + subset_name, + instance_data, + self) + self._add_instance_to_context(instance) + + pub_instance.set_editor_property('add_external_assets', True) + assets = pub_instance.get_editor_property('asset_data_external') + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for member in pre_create_data.get("members", []): + obj = ar.get_asset_by_object_path(member).get_asset() + assets.add(obj) + + imprint(f"{self.root}/{instance_name}", instance.data_to_store()) + + return instance + + except Exception as er: + six.reraise( + CreatorError, + CreatorError(f"Creator error: {er}"), + sys.exc_info()[2]) + + def collect_instances(self): + # cache instances if missing + self.cache_subsets(self.collection_shared_data) + for instance in self.collection_shared_data[ + "unreal_cached_subsets"].get(self.identifier, []): + # Unreal saves metadata as string, so we need to convert it back + instance['creator_attributes'] = ast.literal_eval( + instance.get('creator_attributes', '{}')) + instance['publish_attributes'] = ast.literal_eval( + instance.get('publish_attributes', '{}')) + created_instance = CreatedInstance.from_existing(instance, self) + self._add_instance_to_context(created_instance) + + def update_instances(self, update_list): + for created_inst, changes in update_list: + instance_node = created_inst.get("instance_path", "") + + if not instance_node: + unreal.log_warning( + f"Instance node not found for {created_inst}") + continue + + new_values = { + key: changes[key].new_value + for key in changes.changed_keys + } + imprint( + instance_node, + new_values + ) + + def remove_instances(self, instances): + for instance in instances: + instance_node = instance.data.get("instance_path", "") + if instance_node: + unreal.EditorAssetLibrary.delete_asset(instance_node) + + self._remove_instance_from_context(instance) + + +@six.add_metaclass(ABCMeta) +class UnrealAssetCreator(UnrealBaseCreator): + """Base class for Unreal creator plugins based on assets.""" + + def create(self, subset_name, instance_data, pre_create_data): + """Create instance of the asset. + + Args: + subset_name (str): Name of the subset. + instance_data (dict): Data for the instance. + pre_create_data (dict): Data for the instance. + + Returns: + CreatedInstance: Created instance. + """ + try: + # Check if instance data has members, filled by the plugin. + # If not, use selection. + if not pre_create_data.get("members"): + pre_create_data["members"] = [] + + if pre_create_data.get("use_selection"): + utilib = unreal.EditorUtilityLibrary + sel_objects = utilib.get_selected_assets() + pre_create_data["members"] = [ + a.get_path_name() for a in sel_objects] + + super(UnrealAssetCreator, self).create( + subset_name, + instance_data, + pre_create_data) + + except Exception as er: + six.reraise( + CreatorError, + CreatorError(f"Creator error: {er}"), + sys.exc_info()[2]) + + def get_pre_create_attr_defs(self): + return [ + BoolDef("use_selection", label="Use selection", default=True) + ] + + +@six.add_metaclass(ABCMeta) +class UnrealActorCreator(UnrealBaseCreator): + """Base class for Unreal creator plugins based on actors.""" + + def create(self, subset_name, instance_data, pre_create_data): + """Create instance of the asset. + + Args: + subset_name (str): Name of the subset. + instance_data (dict): Data for the instance. + pre_create_data (dict): Data for the instance. + + Returns: + CreatedInstance: Created instance. + """ + try: + if UNREAL_VERSION.major == 5: + world = unreal.UnrealEditorSubsystem().get_editor_world() + else: + world = unreal.EditorLevelLibrary.get_editor_world() + + # Check if the level is saved + if world.get_path_name().startswith("/Temp/"): + raise CreatorError( + "Level must be saved before creating instances.") + + # Check if instance data has members, filled by the plugin. + # If not, use selection. + if not instance_data.get("members"): + actor_subsystem = unreal.EditorActorSubsystem() + sel_actors = actor_subsystem.get_selected_level_actors() + selection = [a.get_path_name() for a in sel_actors] + + instance_data["members"] = selection + + instance_data["level"] = world.get_path_name() + + super(UnrealActorCreator, self).create( + subset_name, + instance_data, + pre_create_data) + + except Exception as er: + six.reraise( + CreatorError, + CreatorError(f"Creator error: {er}"), + sys.exc_info()[2]) + + def get_pre_create_attr_defs(self): + return [ + UILabelDef("Select actors to create instance from them.") + ] class Loader(LoaderPlugin, ABC): - """This serves as skeleton for future OpenPype specific functionality""" + """This serves as skeleton for future Ayon specific functionality""" pass diff --git a/openpype/hosts/unreal/api/rendering.py b/openpype/hosts/unreal/api/rendering.py index 29e4747f6e..efe6fc54ad 100644 --- a/openpype/hosts/unreal/api/rendering.py +++ b/openpype/hosts/unreal/api/rendering.py @@ -2,8 +2,10 @@ import os import unreal +from openpype.settings import get_project_settings from openpype.pipeline import Anatomy from openpype.hosts.unreal.api import pipeline +from openpype.widgets.message_window import Window queue = None @@ -32,15 +34,24 @@ def start_rendering(): """ Start the rendering process. """ - print("Starting rendering...") + unreal.log("Starting rendering...") # Get selected sequences assets = unreal.EditorUtilityLibrary.get_selected_assets() + if not assets: + Window( + parent=None, + title="No assets selected", + message="No assets selected. Select a render instance.", + level="warning") + raise RuntimeError( + "No assets selected. You need to select a render instance.") + # instances = pipeline.ls_inst() instances = [ a for a in assets - if a.get_class().get_name() == "OpenPypePublishInstance"] + if a.get_class().get_name() == "AyonPublishInstance"] inst_data = [] @@ -53,8 +64,9 @@ def start_rendering(): project = os.environ.get("AVALON_PROJECT") anatomy = Anatomy(project) root = anatomy.roots['renders'] - except Exception: - raise Exception("Could not find render root in anatomy settings.") + except Exception as e: + raise Exception( + "Could not find render root in anatomy settings.") from e render_dir = f"{root}/{project}" @@ -66,6 +78,13 @@ def start_rendering(): ar = unreal.AssetRegistryHelpers.get_asset_registry() + data = get_project_settings(project) + config = None + config_path = str(data.get("unreal").get("render_config_path")) + if config_path and unreal.EditorAssetLibrary.does_asset_exist(config_path): + unreal.log("Found saved render configuration") + config = ar.get_asset_by_object_path(config_path).get_asset() + for i in inst_data: sequence = ar.get_asset_by_object_path(i["sequence"]).get_asset() @@ -81,55 +100,80 @@ def start_rendering(): # Get all the sequences to render. If there are subsequences, # add them and their frame ranges to the render list. We also # use the names for the output paths. - for s in sequences: - subscenes = pipeline.get_subsequences(s.get('sequence')) + for seq in sequences: + subscenes = pipeline.get_subsequences(seq.get('sequence')) if subscenes: - for ss in subscenes: + for sub_seq in subscenes: sequences.append({ - "sequence": ss.get_sequence(), - "output": (f"{s.get('output')}/" - f"{ss.get_sequence().get_name()}"), + "sequence": sub_seq.get_sequence(), + "output": (f"{seq.get('output')}/" + f"{sub_seq.get_sequence().get_name()}"), "frame_range": ( - ss.get_start_frame(), ss.get_end_frame()) + sub_seq.get_start_frame(), sub_seq.get_end_frame()) }) else: # Avoid rendering camera sequences - if "_camera" not in s.get('sequence').get_name(): - render_list.append(s) + if "_camera" not in seq.get('sequence').get_name(): + render_list.append(seq) # Create the rendering jobs and add them to the queue. - for r in render_list: + for render_setting in render_list: job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob) job.sequence = unreal.SoftObjectPath(i["master_sequence"]) job.map = unreal.SoftObjectPath(i["master_level"]) - job.author = "OpenPype" + job.author = "Ayon" + + # If we have a saved configuration, copy it to the job. + if config: + job.get_configuration().copy_from(config) # User data could be used to pass data to the job, that can be # read in the job's OnJobFinished callback. We could, - # for instance, pass the AvalonPublishInstance's path to the job. + # for instance, pass the AyonPublishInstance's path to the job. # job.user_data = "" + output_dir = render_setting.get('output') + shot_name = render_setting.get('sequence').get_name() + settings = job.get_configuration().find_or_add_setting_by_class( unreal.MoviePipelineOutputSetting) settings.output_resolution = unreal.IntPoint(1920, 1080) - settings.custom_start_frame = r.get("frame_range")[0] - settings.custom_end_frame = r.get("frame_range")[1] + settings.custom_start_frame = render_setting.get("frame_range")[0] + settings.custom_end_frame = render_setting.get("frame_range")[1] settings.use_custom_playback_range = True - settings.file_name_format = "{sequence_name}.{frame_number}" - settings.output_directory.path = f"{render_dir}/{r.get('output')}" - - renderPass = job.get_configuration().find_or_add_setting_by_class( - unreal.MoviePipelineDeferredPassBase) - renderPass.disable_multisample_effects = True + settings.file_name_format = f"{shot_name}" + ".{frame_number}" + settings.output_directory.path = f"{render_dir}/{output_dir}" job.get_configuration().find_or_add_setting_by_class( - unreal.MoviePipelineImageSequenceOutput_PNG) + unreal.MoviePipelineDeferredPassBase) + + render_format = data.get("unreal").get("render_format", "png") + + if render_format == "png": + job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineImageSequenceOutput_PNG) + elif render_format == "exr": + job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineImageSequenceOutput_EXR) + elif render_format == "jpg": + job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineImageSequenceOutput_JPG) + elif render_format == "bmp": + job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineImageSequenceOutput_BMP) # If there are jobs in the queue, start the rendering process. if queue.get_jobs(): global executor executor = unreal.MoviePipelinePIEExecutor() + + preroll_frames = data.get("unreal").get("preroll_frames", 0) + + settings = unreal.MoviePipelinePIEExecutorSettings() + settings.set_editor_property( + "initial_delay_frame_count", preroll_frames) + executor.on_executor_finished_delegate.add_callable_unique( _queue_finish_callback) executor.on_individual_job_finished_delegate.add_callable_unique( diff --git a/openpype/hosts/unreal/api/tools_ui.py b/openpype/hosts/unreal/api/tools_ui.py index 708e167a65..5a4c689918 100644 --- a/openpype/hosts/unreal/api/tools_ui.py +++ b/openpype/hosts/unreal/api/tools_ui.py @@ -17,9 +17,8 @@ class ToolsBtnsWidget(QtWidgets.QWidget): def __init__(self, parent=None): super(ToolsBtnsWidget, self).__init__(parent) - create_btn = QtWidgets.QPushButton("Create...", self) load_btn = QtWidgets.QPushButton("Load...", self) - publish_btn = QtWidgets.QPushButton("Publish...", self) + publish_btn = QtWidgets.QPushButton("Publisher...", self) manage_btn = QtWidgets.QPushButton("Manage...", self) render_btn = QtWidgets.QPushButton("Render...", self) experimental_tools_btn = QtWidgets.QPushButton( @@ -28,7 +27,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget): layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(create_btn, 0) layout.addWidget(load_btn, 0) layout.addWidget(publish_btn, 0) layout.addWidget(manage_btn, 0) @@ -36,7 +34,6 @@ class ToolsBtnsWidget(QtWidgets.QWidget): layout.addWidget(experimental_tools_btn, 0) layout.addStretch(1) - create_btn.clicked.connect(self._on_create) load_btn.clicked.connect(self._on_load) publish_btn.clicked.connect(self._on_publish) manage_btn.clicked.connect(self._on_manage) @@ -50,7 +47,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget): self.tool_required.emit("loader") def _on_publish(self): - self.tool_required.emit("publish") + self.tool_required.emit("publisher") def _on_manage(self): self.tool_required.emit("sceneinventory") @@ -67,7 +64,7 @@ class ToolsDialog(QtWidgets.QDialog): def __init__(self, *args, **kwargs): super(ToolsDialog, self).__init__(*args, **kwargs) - self.setWindowTitle("OpenPype tools") + self.setWindowTitle("Ayon tools") icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) self.setWindowIcon(icon) diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py index 2dc6fb9f42..f01609d314 100644 --- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py +++ b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py @@ -3,7 +3,14 @@ import os import copy from pathlib import Path +from openpype.widgets.splash_screen import SplashScreen +from qtpy import QtCore +from openpype.hosts.unreal.ue_workers import ( + UEProjectGenerationWorker, + UEPluginInstallWorker +) +from openpype import resources from openpype.lib import ( PreLaunchHook, ApplicationLaunchFailed, @@ -17,11 +24,12 @@ class UnrealPrelaunchHook(PreLaunchHook): """Hook to handle launching Unreal. This hook will check if current workfile path has Unreal - project inside. IF not, it initialize it and finally it pass + project inside. IF not, it initializes it, and finally it pass path to the project by environment variable to Unreal launcher shell script. """ + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -53,14 +61,87 @@ class UnrealPrelaunchHook(PreLaunchHook): project_name=project_doc["name"] ) # Fill templates - filled_anatomy = anatomy.format(workdir_data) + template_obj = anatomy.templates_obj[workfile_template_key]["file"] # Return filename - return filled_anatomy[workfile_template_key]["file"] + return template_obj.format_strict(workdir_data) + + def exec_plugin_install(self, engine_path: Path, env: dict = None): + # set up the QThread and worker with necessary signals + env = env or os.environ + q_thread = QtCore.QThread() + ue_plugin_worker = UEPluginInstallWorker() + + q_thread.started.connect(ue_plugin_worker.run) + ue_plugin_worker.setup(engine_path, env) + ue_plugin_worker.moveToThread(q_thread) + + splash_screen = SplashScreen( + "Installing plugin", + resources.get_resource("app_icons", "ue4.png") + ) + + # set up the splash screen with necessary triggers + ue_plugin_worker.installing.connect( + splash_screen.update_top_label_text + ) + ue_plugin_worker.progress.connect(splash_screen.update_progress) + ue_plugin_worker.log.connect(splash_screen.append_log) + ue_plugin_worker.finished.connect(splash_screen.quit_and_close) + ue_plugin_worker.failed.connect(splash_screen.fail) + + splash_screen.start_thread(q_thread) + splash_screen.show_ui() + + if not splash_screen.was_proc_successful(): + raise ApplicationLaunchFailed("Couldn't run the application! " + "Plugin failed to install!") + + def exec_ue_project_gen(self, + engine_version: str, + unreal_project_name: str, + engine_path: Path, + project_dir: Path): + self.log.info(( + f"{self.signature} Creating unreal " + f"project [ {unreal_project_name} ]" + )) + + q_thread = QtCore.QThread() + ue_project_worker = UEProjectGenerationWorker() + ue_project_worker.setup( + engine_version, + unreal_project_name, + engine_path, + project_dir + ) + ue_project_worker.moveToThread(q_thread) + q_thread.started.connect(ue_project_worker.run) + + splash_screen = SplashScreen( + "Initializing UE project", + resources.get_resource("app_icons", "ue4.png") + ) + + ue_project_worker.stage_begin.connect( + splash_screen.update_top_label_text + ) + ue_project_worker.progress.connect(splash_screen.update_progress) + ue_project_worker.log.connect(splash_screen.append_log) + ue_project_worker.finished.connect(splash_screen.quit_and_close) + ue_project_worker.failed.connect(splash_screen.fail) + + splash_screen.start_thread(q_thread) + splash_screen.show_ui() + + if not splash_screen.was_proc_successful(): + raise ApplicationLaunchFailed("Couldn't run the application! " + "Failed to generate the project!") def execute(self): """Hook entry method.""" workdir = self.launch_context.env["AVALON_WORKDIR"] + executable = str(self.launch_context.executable) engine_version = self.app_name.split("/")[-1].replace("-", ".") try: if int(engine_version.split(".")[0]) < 4 and \ @@ -72,16 +153,16 @@ class UnrealPrelaunchHook(PreLaunchHook): # there can be string in minor version and in that case # int cast is failing. This probably happens only with # early access versions and is of no concert for this check - # so lets keep it quite. + # so let's keep it quiet. ... unreal_project_filename = self._get_work_filename() unreal_project_name = os.path.splitext(unreal_project_filename)[0] # Unreal is sensitive about project names longer then 20 chars if len(unreal_project_name) > 20: - self.log.warning(( - f"Project name exceed 20 characters ({unreal_project_name})!" - )) + raise ApplicationLaunchFailed( + f"Project name exceeds 20 characters ({unreal_project_name})!" + ) # Unreal doesn't accept non alphabet characters at the start # of the project name. This is because project name is then used @@ -103,54 +184,36 @@ class UnrealPrelaunchHook(PreLaunchHook): f"[ {engine_version} ]" )) - detected = unreal_lib.get_engine_versions(self.launch_context.env) - detected_str = ', '.join(detected.keys()) or 'none' - self.log.info(( - f"{self.signature} detected UE versions: " - f"[ {detected_str} ]" - )) - if not detected: - raise ApplicationNotFound("No Unreal Engines are found.") - - engine_version = ".".join(engine_version.split(".")[:2]) - if engine_version not in detected.keys(): - raise ApplicationLaunchFailed(( - f"{self.signature} requested version not " - f"detected [ {engine_version} ]" - )) - - ue_path = unreal_lib.get_editor_executable_path( - Path(detected[engine_version]), engine_version) - - self.launch_context.launch_args = [ue_path.as_posix()] project_path.mkdir(parents=True, exist_ok=True) - project_file = project_path / unreal_project_filename - if not project_file.is_file(): - engine_path = detected[engine_version] + # Set "AYON_UNREAL_PLUGIN" to current process environment for + # execution of `create_unreal_project` + + if self.launch_context.env.get("AYON_UNREAL_PLUGIN"): self.log.info(( - f"{self.signature} creating unreal " - f"project [ {unreal_project_name} ]" + f"{self.signature} using Ayon plugin from " + f"{self.launch_context.env.get('AYON_UNREAL_PLUGIN')}" )) - # Set "OPENPYPE_UNREAL_PLUGIN" to current process environment for - # execution of `create_unreal_project` - if self.launch_context.env.get("OPENPYPE_UNREAL_PLUGIN"): - self.log.info(( - f"{self.signature} using OpenPype plugin from " - f"{self.launch_context.env.get('OPENPYPE_UNREAL_PLUGIN')}" - )) - env_key = "OPENPYPE_UNREAL_PLUGIN" - if self.launch_context.env.get(env_key): - os.environ[env_key] = self.launch_context.env[env_key] + env_key = "AYON_UNREAL_PLUGIN" + if self.launch_context.env.get(env_key): + os.environ[env_key] = self.launch_context.env[env_key] - unreal_lib.create_unreal_project( - unreal_project_name, - engine_version, - project_path, - engine_path=Path(engine_path) - ) + # engine_path points to the specific Unreal Engine root + # so, we are going up from the executable itself 3 levels. + engine_path: Path = Path(executable).parents[3] - self.launch_context.env["OPENPYPE_UNREAL_VERSION"] = engine_version + if not unreal_lib.check_plugin_existence(engine_path): + self.exec_plugin_install(engine_path) + + project_file = project_path / unreal_project_filename + + if not project_file.is_file(): + self.exec_ue_project_gen(engine_version, + unreal_project_name, + engine_path, + project_path) + + self.launch_context.env["AYON_UNREAL_VERSION"] = engine_version # Append project file to launch arguments self.launch_context.launch_args.append( f"\"{project_file.as_posix()}\"") diff --git a/openpype/hosts/unreal/integration b/openpype/hosts/unreal/integration new file mode 160000 index 0000000000..ff15c70077 --- /dev/null +++ b/openpype/hosts/unreal/integration @@ -0,0 +1 @@ +Subproject commit ff15c700771e719cc5f3d561ac5d6f7590623986 diff --git a/openpype/hosts/unreal/integration/UE_4.7/.gitignore b/openpype/hosts/unreal/integration/UE_4.7/.gitignore deleted file mode 100644 index b32a6f55e5..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/.gitignore +++ /dev/null @@ -1,35 +0,0 @@ -# Prerequisites -*.d - -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod -*.smod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app - -/Binaries -/Intermediate diff --git a/openpype/hosts/unreal/integration/UE_4.7/Config/DefaultOpenPypeSettings.ini b/openpype/hosts/unreal/integration/UE_4.7/Config/DefaultOpenPypeSettings.ini deleted file mode 100644 index 8a883cf1db..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Config/DefaultOpenPypeSettings.ini +++ /dev/null @@ -1,2 +0,0 @@ -๏ปฟ[/Script/OpenPype.OpenPypeSettings] -FolderColor=(R=91,G=197,B=220,A=255) \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py deleted file mode 100644 index b85f970699..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py +++ /dev/null @@ -1,30 +0,0 @@ -import unreal - -openpype_detected = True -try: - from openpype.pipeline import install_host - from openpype.hosts.unreal.api import UnrealHost - - openpype_host = UnrealHost() -except ImportError as exc: - openpype_host = None - openpype_detected = False - unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) - -if openpype_detected: - install_host(openpype_host) - - -@unreal.uclass() -class OpenPypeIntegration(unreal.OpenPypePythonBridge): - @unreal.ufunction(override=True) - def RunInPython_Popup(self): - unreal.log_warning("OpenPype: showing tools popup") - if openpype_detected: - openpype_host.show_tools_popup() - - @unreal.ufunction(override=True) - def RunInPython_Dialog(self): - unreal.log_warning("OpenPype: showing tools dialog") - if openpype_detected: - openpype_host.show_tools_dialog() diff --git a/openpype/hosts/unreal/integration/UE_4.7/OpenPype.uplugin b/openpype/hosts/unreal/integration/UE_4.7/OpenPype.uplugin deleted file mode 100644 index 4c7a74403c..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/OpenPype.uplugin +++ /dev/null @@ -1,24 +0,0 @@ -{ - "FileVersion": 3, - "Version": 1, - "VersionName": "1.0", - "FriendlyName": "OpenPype", - "Description": "OpenPype Integration", - "Category": "OpenPype.Integration", - "CreatedBy": "Ondrej Samohel", - "CreatedByURL": "https://openpype.io", - "DocsURL": "https://openpype.io/docs/artist_hosts_unreal", - "MarketplaceURL": "", - "SupportURL": "https://pype.club/", - "CanContainContent": true, - "IsBetaVersion": true, - "IsExperimentalVersion": false, - "Installed": false, - "Modules": [ - { - "Name": "OpenPype", - "Type": "Editor", - "LoadingPhase": "Default" - } - ] -} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_4.7/README.md b/openpype/hosts/unreal/integration/UE_4.7/README.md deleted file mode 100644 index a08c1ada39..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# OpenPype Unreal Integration plugin - UE 4.x - -This is plugin for Unreal Editor, creating menu for [OpenPype](https://github.com/getavalon) tools to run. - -## How does this work - -Plugin is creating basic menu items in **Window/OpenPype** section of Unreal Editor main menu and a button -on the main toolbar with associated menu. Clicking on those menu items is calling callbacks that are -declared in c++ but needs to be implemented during Unreal Editor -startup in `Plugins/OpenPype/Content/Python/init_unreal.py` - this should be executed by Unreal Editor -automatically. diff --git a/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype128.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype128.png deleted file mode 100644 index abe8a807ef..0000000000 Binary files a/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype128.png and /dev/null differ diff --git a/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype40.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype40.png deleted file mode 100644 index f983e7a1f2..0000000000 Binary files a/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype40.png and /dev/null differ diff --git a/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype512.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype512.png deleted file mode 100644 index 97c4d4326b..0000000000 Binary files a/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype512.png and /dev/null differ diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs deleted file mode 100644 index 46e5dcb2df..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. - -using UnrealBuildTool; - -public class OpenPype : ModuleRules -{ - public OpenPype(ReadOnlyTargetRules Target) : base(Target) - { - PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs; - - PublicIncludePaths.AddRange( - new string[] { - // ... add public include paths required here ... - } - ); - - - PrivateIncludePaths.AddRange( - new string[] { - // ... add other private include paths required here ... - } - ); - - - PublicDependencyModuleNames.AddRange( - new string[] - { - "Core", - // ... add other public dependencies that you statically link with here ... - } - ); - - - PrivateDependencyModuleNames.AddRange( - new string[] - { - "Projects", - "InputCore", - "UnrealEd", - "LevelEditor", - "CoreUObject", - "Engine", - "Slate", - "SlateCore", - "AssetTools" - // ... add private dependencies that you statically link with here ... - } - ); - - - DynamicallyLoadedModuleNames.AddRange( - new string[] - { - // ... add any modules that your module loads dynamically here ... - } - ); - } -} diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainer.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainer.cpp deleted file mode 100644 index c766f87a8e..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainer.cpp +++ /dev/null @@ -1,115 +0,0 @@ -// Fill out your copyright notice in the Description page of Project Settings. - -#include "AssetContainer.h" -#include "AssetRegistryModule.h" -#include "Misc/PackageName.h" -#include "Engine.h" -#include "Containers/UnrealString.h" - -UAssetContainer::UAssetContainer(const FObjectInitializer& ObjectInitializer) -: UAssetUserData(ObjectInitializer) -{ - FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked("AssetRegistry"); - FString path = UAssetContainer::GetPathName(); - UE_LOG(LogTemp, Warning, TEXT("UAssetContainer %s"), *path); - FARFilter Filter; - Filter.PackagePaths.Add(FName(*path)); - - AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UAssetContainer::OnAssetAdded); - AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UAssetContainer::OnAssetRemoved); - AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UAssetContainer::OnAssetRenamed); -} - -void UAssetContainer::OnAssetAdded(const FAssetData& AssetData) -{ - TArray split; - - // get directory of current container - FString selfFullPath = UAssetContainer::GetPathName(); - FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); - - // get asset path and class - FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClass.ToString(); - - // split path - assetPath.ParseIntoArray(split, TEXT(" "), true); - - FString assetDir = FPackageName::GetLongPackagePath(*split[1]); - - // take interest only in paths starting with path of current container - if (assetDir.StartsWith(*selfDir)) - { - // exclude self - if (assetFName != "AssetContainer") - { - assets.Add(assetPath); - assetsData.Add(AssetData); - UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir); - } - } -} - -void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData) -{ - TArray split; - - // get directory of current container - FString selfFullPath = UAssetContainer::GetPathName(); - FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); - - // get asset path and class - FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClass.ToString(); - - // split path - assetPath.ParseIntoArray(split, TEXT(" "), true); - - FString assetDir = FPackageName::GetLongPackagePath(*split[1]); - - // take interest only in paths starting with path of current container - FString path = UAssetContainer::GetPathName(); - FString lpp = FPackageName::GetLongPackagePath(*path); - - if (assetDir.StartsWith(*selfDir)) - { - // exclude self - if (assetFName != "AssetContainer") - { - // UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp); - assets.Remove(assetPath); - assetsData.Remove(AssetData); - } - } -} - -void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString& str) -{ - TArray split; - - // get directory of current container - FString selfFullPath = UAssetContainer::GetPathName(); - FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); - - // get asset path and class - FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClass.ToString(); - - // split path - assetPath.ParseIntoArray(split, TEXT(" "), true); - - FString assetDir = FPackageName::GetLongPackagePath(*split[1]); - if (assetDir.StartsWith(*selfDir)) - { - // exclude self - if (assetFName != "AssetContainer") - { - - assets.Remove(str); - assets.Add(assetPath); - assetsData.Remove(AssetData); - // UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str); - } - } -} - diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainerFactory.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainerFactory.cpp deleted file mode 100644 index b943150bdd..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainerFactory.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "AssetContainerFactory.h" -#include "AssetContainer.h" - -UAssetContainerFactory::UAssetContainerFactory(const FObjectInitializer& ObjectInitializer) - : UFactory(ObjectInitializer) -{ - SupportedClass = UAssetContainer::StaticClass(); - bCreateNew = false; - bEditorImport = true; -} - -UObject* UAssetContainerFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) -{ - UAssetContainer* AssetContainer = NewObject(InParent, Class, Name, Flags); - return AssetContainer; -} - -bool UAssetContainerFactory::ShouldShowInNewMenu() const { - return false; -} diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp deleted file mode 100644 index d06a08eb43..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp +++ /dev/null @@ -1,152 +0,0 @@ -#include "OpenPype.h" - -#include "ISettingsContainer.h" -#include "ISettingsModule.h" -#include "ISettingsSection.h" -#include "LevelEditor.h" -#include "OpenPypePythonBridge.h" -#include "OpenPypeSettings.h" -#include "OpenPypeStyle.h" - - -static const FName OpenPypeTabName("OpenPype"); - -#define LOCTEXT_NAMESPACE "FOpenPypeModule" - -// This function is triggered when the plugin is staring up -void FOpenPypeModule::StartupModule() -{ - FOpenPypeStyle::Initialize(); - FOpenPypeStyle::SetIcon("Logo", "openpype40"); - - // Create the Extender that will add content to the menu - FLevelEditorModule& LevelEditorModule = FModuleManager::LoadModuleChecked("LevelEditor"); - - TSharedPtr MenuExtender = MakeShareable(new FExtender()); - TSharedPtr ToolbarExtender = MakeShareable(new FExtender()); - - MenuExtender->AddMenuExtension( - "LevelEditor", - EExtensionHook::After, - NULL, - FMenuExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddMenuEntry) - ); - ToolbarExtender->AddToolBarExtension( - "Settings", - EExtensionHook::After, - NULL, - FToolBarExtensionDelegate::CreateRaw(this, &FOpenPypeModule::AddToobarEntry)); - - - LevelEditorModule.GetMenuExtensibilityManager()->AddExtender(MenuExtender); - LevelEditorModule.GetToolBarExtensibilityManager()->AddExtender(ToolbarExtender); - - RegisterSettings(); -} - -void FOpenPypeModule::ShutdownModule() -{ - FOpenPypeStyle::Shutdown(); -} - - -void FOpenPypeModule::AddMenuEntry(FMenuBuilder& MenuBuilder) -{ - // Create Section - MenuBuilder.BeginSection("OpenPype", TAttribute(FText::FromString("OpenPype"))); - { - // Create a Submenu inside of the Section - MenuBuilder.AddMenuEntry( - FText::FromString("Tools..."), - FText::FromString("Pipeline tools"), - FSlateIcon(FOpenPypeStyle::GetStyleSetName(), "OpenPype.Logo"), - FUIAction(FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuPopup)) - ); - - MenuBuilder.AddMenuEntry( - FText::FromString("Tools dialog..."), - FText::FromString("Pipeline tools dialog"), - FSlateIcon(FOpenPypeStyle::GetStyleSetName(), "OpenPype.Logo"), - FUIAction(FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuDialog)) - ); - } - MenuBuilder.EndSection(); -} - -void FOpenPypeModule::AddToobarEntry(FToolBarBuilder& ToolbarBuilder) -{ - ToolbarBuilder.BeginSection(TEXT("OpenPype")); - { - ToolbarBuilder.AddToolBarButton( - FUIAction( - FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuPopup), - NULL, - FIsActionChecked() - - ), - NAME_None, - LOCTEXT("OpenPype_label", "OpenPype"), - LOCTEXT("OpenPype_tooltip", "OpenPype Tools"), - FSlateIcon(FOpenPypeStyle::GetStyleSetName(), "OpenPype.Logo") - ); - } - ToolbarBuilder.EndSection(); -} - -void FOpenPypeModule::RegisterSettings() -{ - ISettingsModule& SettingsModule = FModuleManager::LoadModuleChecked("Settings"); - - // Create the new category - // TODO: After the movement of the plugin from the game to editor, it might be necessary to move this! - ISettingsContainerPtr SettingsContainer = SettingsModule.GetContainer("Project"); - - UOpenPypeSettings* Settings = GetMutableDefault(); - - // Register the settings - ISettingsSectionPtr SettingsSection = SettingsModule.RegisterSettings("Project", "OpenPype", "General", - LOCTEXT("RuntimeGeneralSettingsName", - "General"), - LOCTEXT("RuntimeGeneralSettingsDescription", - "Base configuration for Open Pype Module"), - Settings - ); - - // Register the save handler to your settings, you might want to use it to - // validate those or just act to settings changes. - if (SettingsSection.IsValid()) - { - SettingsSection->OnModified().BindRaw(this, &FOpenPypeModule::HandleSettingsSaved); - } -} - -bool FOpenPypeModule::HandleSettingsSaved() -{ - UOpenPypeSettings* Settings = GetMutableDefault(); - bool ResaveSettings = false; - - // You can put any validation code in here and resave the settings in case an invalid - // value has been entered - - if (ResaveSettings) - { - Settings->SaveConfig(); - } - - return true; -} - - -void FOpenPypeModule::MenuPopup() -{ - UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); - bridge->RunInPython_Popup(); -} - -void FOpenPypeModule::MenuDialog() -{ - UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); - bridge->RunInPython_Dialog(); -} - -IMPLEMENT_MODULE(FOpenPypeModule, OpenPype) diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp deleted file mode 100644 index a58e921288..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#include "OpenPypeLib.h" - -#include "AssetViewUtils.h" -#include "Misc/Paths.h" -#include "Misc/ConfigCacheIni.h" -#include "UObject/UnrealType.h" - -/** - * Sets color on folder icon on given path - * @param InPath - path to folder - * @param InFolderColor - color of the folder - * @warning This color will appear only after Editor restart. Is there a better way? - */ - -bool UOpenPypeLib::SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor, const bool& bForceAdd) -{ - if (AssetViewUtils::DoesFolderExist(FolderPath)) - { - const TSharedPtr LinearColor = MakeShared(FolderColor); - - AssetViewUtils::SaveColor(FolderPath, LinearColor, true); - UE_LOG(LogAssetData, Display, TEXT("A color {%s} has been set to folder \"%s\""), *LinearColor->ToString(), - *FolderPath) - return true; - } - - UE_LOG(LogAssetData, Display, TEXT("Setting a color {%s} to folder \"%s\" has failed! Directory doesn't exist!"), - *FolderColor.ToString(), *FolderPath) - return false; -} - -/** - * Returns all poperties on given object - * @param cls - class - * @return TArray of properties - */ -TArray UOpenPypeLib::GetAllProperties(UClass* cls) -{ - TArray Ret; - if (cls != nullptr) - { - for (TFieldIterator It(cls); It; ++It) - { - FProperty* Property = *It; - if (Property->HasAnyPropertyFlags(EPropertyFlags::CPF_Edit)) - { - Ret.Add(Property->GetName()); - } - } - } - return Ret; -} diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp deleted file mode 100644 index 38740f1cbd..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp +++ /dev/null @@ -1,200 +0,0 @@ -#pragma once - -#include "OpenPypePublishInstance.h" -#include "AssetRegistryModule.h" -#include "NotificationManager.h" -#include "OpenPypeLib.h" -#include "OpenPypeSettings.h" -#include "SNotificationList.h" - -//Moves all the invalid pointers to the end to prepare them for the shrinking -#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \ - VAR.Shrink(); - -UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer) - : UPrimaryDataAsset(ObjectInitializer) -{ - const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked< - FAssetRegistryModule>("AssetRegistry"); - - const FPropertyEditorModule& PropertyEditorModule = FModuleManager::LoadModuleChecked( - "PropertyEditor"); - - FString Left, Right; - GetPathName().Split("/" + GetName(), &Left, &Right); - - FARFilter Filter; - Filter.PackagePaths.Emplace(FName(Left)); - - TArray FoundAssets; - AssetRegistryModule.GetRegistry().GetAssets(Filter, FoundAssets); - - for (const FAssetData& AssetData : FoundAssets) - OnAssetCreated(AssetData); - - REMOVE_INVALID_ENTRIES(AssetDataInternal) - REMOVE_INVALID_ENTRIES(AssetDataExternal) - - AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetCreated); - AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved); - AssetRegistryModule.Get().OnAssetUpdated().AddUObject(this, &UOpenPypePublishInstance::OnAssetUpdated); - -#ifdef WITH_EDITOR - ColorOpenPypeDirs(); -#endif - -} - -void UOpenPypePublishInstance::OnAssetCreated(const FAssetData& InAssetData) -{ - TArray split; - - UObject* Asset = InAssetData.GetAsset(); - - if (!IsValid(Asset)) - { - UE_LOG(LogAssetData, Warning, TEXT("Asset \"%s\" is not valid! Skipping the addition."), - *InAssetData.ObjectPath.ToString()); - return; - } - - const bool result = IsUnderSameDir(Asset) && Cast(Asset) == nullptr; - - if (result) - { - if (AssetDataInternal.Emplace(Asset).IsValidId()) - { - UE_LOG(LogTemp, Log, TEXT("Added an Asset to PublishInstance - Publish Instance: %s, Asset %s"), - *this->GetName(), *Asset->GetName()); - } - } -} - -void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& InAssetData) -{ - if (Cast(InAssetData.GetAsset()) == nullptr) - { - if (AssetDataInternal.Contains(nullptr)) - { - AssetDataInternal.Remove(nullptr); - REMOVE_INVALID_ENTRIES(AssetDataInternal) - } - else - { - AssetDataExternal.Remove(nullptr); - REMOVE_INVALID_ENTRIES(AssetDataExternal) - } - } -} - -void UOpenPypePublishInstance::OnAssetUpdated(const FAssetData& InAssetData) -{ - REMOVE_INVALID_ENTRIES(AssetDataInternal); - REMOVE_INVALID_ENTRIES(AssetDataExternal); -} - -bool UOpenPypePublishInstance::IsUnderSameDir(const UObject* InAsset) const -{ - FString ThisLeft, ThisRight; - this->GetPathName().Split(this->GetName(), &ThisLeft, &ThisRight); - - return InAsset->GetPathName().StartsWith(ThisLeft); -} - -#ifdef WITH_EDITOR - -void UOpenPypePublishInstance::ColorOpenPypeDirs() -{ - FString PathName = this->GetPathName(); - - //Check whether the path contains the defined OpenPype folder - if (!PathName.Contains(TEXT("OpenPype"))) return; - - //Get the base path for open pype - FString PathLeft, PathRight; - PathName.Split(FString("OpenPype"), &PathLeft, &PathRight); - - if (PathLeft.IsEmpty() || PathRight.IsEmpty()) - { - UE_LOG(LogAssetData, Error, TEXT("Failed to retrieve the base OpenPype directory!")) - return; - } - - PathName.RemoveFromEnd(PathRight, ESearchCase::CaseSensitive); - - //Get the current settings - const UOpenPypeSettings* Settings = GetMutableDefault(); - - //Color the base folder - UOpenPypeLib::SetFolderColor(PathName, Settings->GetFolderFColor(), false); - - //Get Sub paths, iterate through them and color them according to the folder color in UOpenPypeSettings - const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked( - "AssetRegistry"); - - TArray PathList; - - AssetRegistryModule.Get().GetSubPaths(PathName, PathList, true); - - if (PathList.Num() > 0) - { - for (const FString& Path : PathList) - { - UOpenPypeLib::SetFolderColor(Path, Settings->GetFolderFColor(), false); - } - } -} - -void UOpenPypePublishInstance::SendNotification(const FString& Text) const -{ - FNotificationInfo Info{FText::FromString(Text)}; - - Info.bFireAndForget = true; - Info.bUseLargeFont = false; - Info.bUseThrobber = false; - Info.bUseSuccessFailIcons = false; - Info.ExpireDuration = 4.f; - Info.FadeOutDuration = 2.f; - - FSlateNotificationManager::Get().AddNotification(Info); - - UE_LOG(LogAssetData, Warning, - TEXT( - "Removed duplicated asset from the AssetsDataExternal in Container \"%s\", Asset is already included in the AssetDataInternal!" - ), *GetName() - ) -} - - -void UOpenPypePublishInstance::PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) -{ - Super::PostEditChangeProperty(PropertyChangedEvent); - - if (PropertyChangedEvent.ChangeType == EPropertyChangeType::ValueSet && - PropertyChangedEvent.Property->GetFName() == GET_MEMBER_NAME_CHECKED( - UOpenPypePublishInstance, AssetDataExternal)) - { - // Check for duplicated assets - for (const auto& Asset : AssetDataInternal) - { - if (AssetDataExternal.Contains(Asset)) - { - AssetDataExternal.Remove(Asset); - return SendNotification( - "You are not allowed to add assets into AssetDataExternal which are already included in AssetDataInternal!"); - } - } - - // Check if no UOpenPypePublishInstance type assets are included - for (const auto& Asset : AssetDataExternal) - { - if (Cast(Asset.Get()) != nullptr) - { - AssetDataExternal.Remove(Asset); - return SendNotification("You are not allowed to add publish instances!"); - } - } - } -} - -#endif diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp deleted file mode 100644 index 9b26da7fa4..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "OpenPypePublishInstanceFactory.h" -#include "OpenPypePublishInstance.h" - -UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer) - : UFactory(ObjectInitializer) -{ - SupportedClass = UOpenPypePublishInstance::StaticClass(); - bCreateNew = false; - bEditorImport = true; -} - -UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) -{ - check(InClass->IsChildOf(UOpenPypePublishInstance::StaticClass())); - return NewObject(InParent, InClass, InName, Flags); -} - -bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const { - return false; -} diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePythonBridge.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePythonBridge.cpp deleted file mode 100644 index 8113231503..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePythonBridge.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include "OpenPypePythonBridge.h" - -UOpenPypePythonBridge* UOpenPypePythonBridge::Get() -{ - TArray OpenPypePythonBridgeClasses; - GetDerivedClasses(UOpenPypePythonBridge::StaticClass(), OpenPypePythonBridgeClasses); - int32 NumClasses = OpenPypePythonBridgeClasses.Num(); - if (NumClasses > 0) - { - return Cast(OpenPypePythonBridgeClasses[NumClasses - 1]->GetDefaultObject()); - } - return nullptr; -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeSettings.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeSettings.cpp deleted file mode 100644 index 7134614d22..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeSettings.cpp +++ /dev/null @@ -1,21 +0,0 @@ -๏ปฟ// Fill out your copyright notice in the Description page of Project Settings. - -#include "OpenPypeSettings.h" - -#include "IPluginManager.h" -#include "UObjectGlobals.h" - -/** - * Mainly is used for initializing default values if the DefaultOpenPypeSettings.ini file does not exist in the saved config - */ -UOpenPypeSettings::UOpenPypeSettings(const FObjectInitializer& ObjectInitializer) -{ - - const FString ConfigFilePath = OPENPYPE_SETTINGS_FILEPATH; - - // This has to be probably in the future set using the UE Reflection system - FColor Color; - GConfig->GetColor(TEXT("/Script/OpenPype.OpenPypeSettings"), TEXT("FolderColor"), Color, ConfigFilePath); - - FolderColor = Color; -} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeStyle.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeStyle.cpp deleted file mode 100644 index a51c2d6aa5..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeStyle.cpp +++ /dev/null @@ -1,70 +0,0 @@ -#include "OpenPypeStyle.h" -#include "Framework/Application/SlateApplication.h" -#include "Styling/SlateStyle.h" -#include "Styling/SlateStyleRegistry.h" - - -TUniquePtr< FSlateStyleSet > FOpenPypeStyle::OpenPypeStyleInstance = nullptr; - -void FOpenPypeStyle::Initialize() -{ - if (!OpenPypeStyleInstance.IsValid()) - { - OpenPypeStyleInstance = Create(); - FSlateStyleRegistry::RegisterSlateStyle(*OpenPypeStyleInstance); - } -} - -void FOpenPypeStyle::Shutdown() -{ - if (OpenPypeStyleInstance.IsValid()) - { - FSlateStyleRegistry::UnRegisterSlateStyle(*OpenPypeStyleInstance); - OpenPypeStyleInstance.Reset(); - } -} - -FName FOpenPypeStyle::GetStyleSetName() -{ - static FName StyleSetName(TEXT("OpenPypeStyle")); - return StyleSetName; -} - -FName FOpenPypeStyle::GetContextName() -{ - static FName ContextName(TEXT("OpenPype")); - return ContextName; -} - -#define IMAGE_BRUSH(RelativePath, ...) FSlateImageBrush( Style->RootToContentDir( RelativePath, TEXT(".png") ), __VA_ARGS__ ) - -const FVector2D Icon40x40(40.0f, 40.0f); - -TUniquePtr< FSlateStyleSet > FOpenPypeStyle::Create() -{ - TUniquePtr< FSlateStyleSet > Style = MakeUnique(GetStyleSetName()); - Style->SetContentRoot(FPaths::ProjectPluginsDir() / TEXT("OpenPype/Resources")); - - return Style; -} - -void FOpenPypeStyle::SetIcon(const FString& StyleName, const FString& ResourcePath) -{ - FSlateStyleSet* Style = OpenPypeStyleInstance.Get(); - - FString Name(GetContextName().ToString()); - Name = Name + "." + StyleName; - Style->Set(*Name, new FSlateImageBrush(Style->RootToContentDir(ResourcePath, TEXT(".png")), Icon40x40)); - - - FSlateApplication::Get().GetRenderer()->ReloadTextureResources(); -} - -#undef IMAGE_BRUSH - -const ISlateStyle& FOpenPypeStyle::Get() -{ - check(OpenPypeStyleInstance); - return *OpenPypeStyleInstance; - return *OpenPypeStyleInstance; -} diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainer.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainer.h deleted file mode 100644 index 3c2a360c78..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainer.h +++ /dev/null @@ -1,39 +0,0 @@ -// Fill out your copyright notice in the Description page of Project Settings. - -#pragma once - -#include "CoreMinimal.h" -#include "UObject/NoExportTypes.h" -#include "Engine/AssetUserData.h" -#include "AssetData.h" -#include "AssetContainer.generated.h" - -/** - * - */ -UCLASS(Blueprintable) -class OPENPYPE_API UAssetContainer : public UAssetUserData -{ - GENERATED_BODY() - -public: - - UAssetContainer(const FObjectInitializer& ObjectInitalizer); - // ~UAssetContainer(); - - UPROPERTY(EditAnywhere, BlueprintReadOnly) - TArray assets; - - // There seems to be no reflection option to expose array of FAssetData - /* - UPROPERTY(Transient, BlueprintReadOnly, Category = "Python", meta=(DisplayName="Assets Data")) - TArray assetsData; - */ -private: - TArray assetsData; - void OnAssetAdded(const FAssetData& AssetData); - void OnAssetRemoved(const FAssetData& AssetData); - void OnAssetRenamed(const FAssetData& AssetData, const FString& str); -}; - - diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainerFactory.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainerFactory.h deleted file mode 100644 index 331ce6bb50..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainerFactory.h +++ /dev/null @@ -1,21 +0,0 @@ -// Fill out your copyright notice in the Description page of Project Settings. - -#pragma once - -#include "CoreMinimal.h" -#include "Factories/Factory.h" -#include "AssetContainerFactory.generated.h" - -/** - * - */ -UCLASS() -class OPENPYPE_API UAssetContainerFactory : public UFactory -{ - GENERATED_BODY() - -public: - UAssetContainerFactory(const FObjectInitializer& ObjectInitializer); - virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; - virtual bool ShouldShowInNewMenu() const override; -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h deleted file mode 100644 index 9cfa60176c..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. - -#pragma once - -#include "Engine.h" - - -class FOpenPypeModule : public IModuleInterface -{ -public: - virtual void StartupModule() override; - virtual void ShutdownModule() override; - -private: - void RegisterSettings(); - bool HandleSettingsSaved(); - - void AddMenuEntry(FMenuBuilder& MenuBuilder); - void AddToobarEntry(FToolBarBuilder& ToolbarBuilder); - void MenuPopup(); - void MenuDialog(); -}; diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h deleted file mode 100644 index 06425c7c7d..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include "Engine.h" -#include "OpenPypeLib.generated.h" - - -UCLASS(Blueprintable) -class OPENPYPE_API UOpenPypeLib : public UBlueprintFunctionLibrary -{ - - GENERATED_BODY() - -public: - UFUNCTION(BlueprintCallable, Category = Python) - static bool SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor,const bool& bForceAdd); - - UFUNCTION(BlueprintCallable, Category = Python) - static TArray GetAllProperties(UClass* cls); -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h deleted file mode 100644 index cd414fe2cc..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h +++ /dev/null @@ -1,101 +0,0 @@ -#pragma once - -#include "Engine.h" -#include "OpenPypePublishInstance.generated.h" - - -UCLASS(Blueprintable) -class OPENPYPE_API UOpenPypePublishInstance : public UPrimaryDataAsset -{ - GENERATED_UCLASS_BODY() - -public: - /** - * Retrieves all the assets which are monitored by the Publish Instance (Monitors assets in the directory which is - * placed in) - * - * @return - Set of UObjects. Careful! They are returning raw pointers. Seems like an issue in UE5 - */ - UFUNCTION(BlueprintCallable, BlueprintPure) - TSet GetInternalAssets() const - { - //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. - TSet ResultSet; - - for (const auto& Asset : AssetDataInternal) - ResultSet.Add(Asset.LoadSynchronous()); - - return ResultSet; - } - - /** - * Retrieves all the assets which have been added manually by the Publish Instance - * - * @return - TSet of assets (UObjects). Careful! They are returning raw pointers. Seems like an issue in UE5 - */ - UFUNCTION(BlueprintCallable, BlueprintPure) - TSet GetExternalAssets() const - { - //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. - TSet ResultSet; - - for (const auto& Asset : AssetDataExternal) - ResultSet.Add(Asset.LoadSynchronous()); - - return ResultSet; - } - - /** - * Function for returning all the assets in the container combined. - * - * @return Returns all the internal and externally added assets into one set (TSet of UObjects). Careful! They are - * returning raw pointers. Seems like an issue in UE5 - * - * @attention If the bAddExternalAssets variable is false, external assets won't be included! - */ - UFUNCTION(BlueprintCallable, BlueprintPure) - TSet GetAllAssets() const - { - const TSet>& IteratedSet = bAddExternalAssets - ? AssetDataInternal.Union(AssetDataExternal) - : AssetDataInternal; - - //Create a new TSet only with raw pointers. - TSet ResultSet; - - for (auto& Asset : IteratedSet) - ResultSet.Add(Asset.LoadSynchronous()); - - return ResultSet; - } - -private: - UPROPERTY(VisibleAnywhere, Category="Assets") - TSet> AssetDataInternal; - - /** - * This property allows exposing the array to include other assets from any other directory than what it's currently - * monitoring. NOTE: that these assets have to be added manually! They are not automatically registered or added! - */ - UPROPERTY(EditAnywhere, Category = "Assets") - bool bAddExternalAssets = false; - - UPROPERTY(EditAnywhere, meta=(EditCondition="bAddExternalAssets"), Category="Assets") - TSet> AssetDataExternal; - - - void OnAssetCreated(const FAssetData& InAssetData); - void OnAssetRemoved(const FAssetData& InAssetData); - void OnAssetUpdated(const FAssetData& InAssetData); - - bool IsUnderSameDir(const UObject* InAsset) const; - -#ifdef WITH_EDITOR - - void ColorOpenPypeDirs(); - - void SendNotification(const FString& Text) const; - virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override; - -#endif -}; diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h deleted file mode 100644 index 7d2c77fe6e..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include "CoreMinimal.h" -#include "Factories/Factory.h" -#include "OpenPypePublishInstanceFactory.generated.h" - -/** - * - */ -UCLASS() -class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory -{ - GENERATED_BODY() - -public: - UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer); - virtual UObject* FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; - virtual bool ShouldShowInNewMenu() const override; -}; diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePythonBridge.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePythonBridge.h deleted file mode 100644 index 692aab2e5e..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePythonBridge.h +++ /dev/null @@ -1,20 +0,0 @@ -#pragma once -#include "Engine.h" -#include "OpenPypePythonBridge.generated.h" - -UCLASS(Blueprintable) -class UOpenPypePythonBridge : public UObject -{ - GENERATED_BODY() - -public: - UFUNCTION(BlueprintCallable, Category = Python) - static UOpenPypePythonBridge* Get(); - - UFUNCTION(BlueprintImplementableEvent, Category = Python) - void RunInPython_Popup() const; - - UFUNCTION(BlueprintImplementableEvent, Category = Python) - void RunInPython_Dialog() const; - -}; diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeSettings.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeSettings.h deleted file mode 100644 index 2df6c887cf..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeSettings.h +++ /dev/null @@ -1,32 +0,0 @@ -๏ปฟ// Fill out your copyright notice in the Description page of Project Settings. - -#pragma once - -#include "CoreMinimal.h" -#include "Object.h" -#include "OpenPypeSettings.generated.h" - -#define OPENPYPE_SETTINGS_FILEPATH IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Config") / TEXT("DefaultOpenPypeSettings.ini") - -UCLASS(Config=OpenPypeSettings, DefaultConfig) -class OPENPYPE_API UOpenPypeSettings : public UObject -{ - GENERATED_UCLASS_BODY() - - UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) - FColor GetFolderFColor() const - { - return FolderColor; - } - - UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) - FLinearColor GetFolderFLinearColor() const - { - return FLinearColor(FolderColor); - } - -protected: - - UPROPERTY(config, EditAnywhere, Category = Folders) - FColor FolderColor = FColor(25,45,223); -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeStyle.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeStyle.h deleted file mode 100644 index fbc8bcdd5b..0000000000 --- a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeStyle.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once -#include "CoreMinimal.h" - -class FSlateStyleSet; -class ISlateStyle; - - -class FOpenPypeStyle -{ -public: - static void Initialize(); - static void Shutdown(); - static const ISlateStyle& Get(); - static FName GetStyleSetName(); - static FName GetContextName(); - - static void SetIcon(const FString& StyleName, const FString& ResourcePath); - -private: - static TUniquePtr< FSlateStyleSet > Create(); - static TUniquePtr< FSlateStyleSet > OpenPypeStyleInstance; -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/.gitignore b/openpype/hosts/unreal/integration/UE_5.0/.gitignore deleted file mode 100644 index b32a6f55e5..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/.gitignore +++ /dev/null @@ -1,35 +0,0 @@ -# Prerequisites -*.d - -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod -*.smod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app - -/Binaries -/Intermediate diff --git a/openpype/hosts/unreal/integration/UE_5.0/Config/DefaultOpenPypeSettings.ini b/openpype/hosts/unreal/integration/UE_5.0/Config/DefaultOpenPypeSettings.ini deleted file mode 100644 index 8a883cf1db..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Config/DefaultOpenPypeSettings.ini +++ /dev/null @@ -1,2 +0,0 @@ -๏ปฟ[/Script/OpenPype.OpenPypeSettings] -FolderColor=(R=91,G=197,B=220,A=255) \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py deleted file mode 100644 index b85f970699..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py +++ /dev/null @@ -1,30 +0,0 @@ -import unreal - -openpype_detected = True -try: - from openpype.pipeline import install_host - from openpype.hosts.unreal.api import UnrealHost - - openpype_host = UnrealHost() -except ImportError as exc: - openpype_host = None - openpype_detected = False - unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) - -if openpype_detected: - install_host(openpype_host) - - -@unreal.uclass() -class OpenPypeIntegration(unreal.OpenPypePythonBridge): - @unreal.ufunction(override=True) - def RunInPython_Popup(self): - unreal.log_warning("OpenPype: showing tools popup") - if openpype_detected: - openpype_host.show_tools_popup() - - @unreal.ufunction(override=True) - def RunInPython_Dialog(self): - unreal.log_warning("OpenPype: showing tools dialog") - if openpype_detected: - openpype_host.show_tools_dialog() diff --git a/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin b/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin deleted file mode 100644 index 4c7a74403c..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin +++ /dev/null @@ -1,24 +0,0 @@ -{ - "FileVersion": 3, - "Version": 1, - "VersionName": "1.0", - "FriendlyName": "OpenPype", - "Description": "OpenPype Integration", - "Category": "OpenPype.Integration", - "CreatedBy": "Ondrej Samohel", - "CreatedByURL": "https://openpype.io", - "DocsURL": "https://openpype.io/docs/artist_hosts_unreal", - "MarketplaceURL": "", - "SupportURL": "https://pype.club/", - "CanContainContent": true, - "IsBetaVersion": true, - "IsExperimentalVersion": false, - "Installed": false, - "Modules": [ - { - "Name": "OpenPype", - "Type": "Editor", - "LoadingPhase": "Default" - } - ] -} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/README.md b/openpype/hosts/unreal/integration/UE_5.0/README.md deleted file mode 100644 index cf0aa622c2..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# OpenPype Unreal Integration plugin - UE 5.x - -This is plugin for Unreal Editor, creating menu for [OpenPype](https://github.com/getavalon) tools to run. - -## How does this work - -Plugin is creating basic menu items in **Window/OpenPype** section of Unreal Editor main menu and a button -on the main toolbar with associated menu. Clicking on those menu items is calling callbacks that are -declared in C++ but needs to be implemented during Unreal Editor -startup in `Plugins/OpenPype/Content/Python/init_unreal.py` - this should be executed by Unreal Editor -automatically. diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png deleted file mode 100644 index abe8a807ef..0000000000 Binary files a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png and /dev/null differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png deleted file mode 100644 index f983e7a1f2..0000000000 Binary files a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png and /dev/null differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png deleted file mode 100644 index 97c4d4326b..0000000000 Binary files a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png and /dev/null differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs deleted file mode 100644 index d853ec028f..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. - -using UnrealBuildTool; - -public class OpenPype : ModuleRules -{ - public OpenPype(ReadOnlyTargetRules Target) : base(Target) - { - DefaultBuildSettings = BuildSettingsVersion.V2; - bLegacyPublicIncludePaths = false; - ShadowVariableWarningLevel = WarningLevel.Error; - PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs; - IncludeOrderVersion = EngineIncludeOrderVersion.Unreal5_0; - - PublicIncludePaths.AddRange( - new string[] { - // ... add public include paths required here ... - } - ); - - - PrivateIncludePaths.AddRange( - new string[] { - // ... add other private include paths required here ... - } - ); - - - PublicDependencyModuleNames.AddRange( - new string[] - { - "Core", - // ... add other public dependencies that you statically link with here ... - } - ); - - - PrivateDependencyModuleNames.AddRange( - new string[] - { - "Projects", - "InputCore", - "EditorFramework", - "UnrealEd", - "ToolMenus", - "LevelEditor", - "CoreUObject", - "Engine", - "Slate", - "SlateCore", - "AssetTools" - // ... add private dependencies that you statically link with here ... - } - ); - - - DynamicallyLoadedModuleNames.AddRange( - new string[] - { - // ... add any modules that your module loads dynamically here ... - } - ); - } -} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp deleted file mode 100644 index 61e563f729..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp +++ /dev/null @@ -1,115 +0,0 @@ -// Fill out your copyright notice in the Description page of Project Settings. - -#include "AssetContainer.h" -#include "AssetRegistry/AssetRegistryModule.h" -#include "Misc/PackageName.h" -#include "Engine.h" -#include "Containers/UnrealString.h" - -UAssetContainer::UAssetContainer(const FObjectInitializer& ObjectInitializer) -: UAssetUserData(ObjectInitializer) -{ - FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked("AssetRegistry"); - FString path = UAssetContainer::GetPathName(); - UE_LOG(LogTemp, Warning, TEXT("UAssetContainer %s"), *path); - FARFilter Filter; - Filter.PackagePaths.Add(FName(*path)); - - AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UAssetContainer::OnAssetAdded); - AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UAssetContainer::OnAssetRemoved); - AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UAssetContainer::OnAssetRenamed); -} - -void UAssetContainer::OnAssetAdded(const FAssetData& AssetData) -{ - TArray split; - - // get directory of current container - FString selfFullPath = UAssetContainer::GetPathName(); - FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); - - // get asset path and class - FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClassPath.ToString(); - UE_LOG(LogTemp, Log, TEXT("asset name %s"), *assetFName); - // split path - assetPath.ParseIntoArray(split, TEXT(" "), true); - - FString assetDir = FPackageName::GetLongPackagePath(*split[1]); - - // take interest only in paths starting with path of current container - if (assetDir.StartsWith(*selfDir)) - { - // exclude self - if (assetFName != "AssetContainer") - { - assets.Add(assetPath); - assetsData.Add(AssetData); - UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir); - } - } -} - -void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData) -{ - TArray split; - - // get directory of current container - FString selfFullPath = UAssetContainer::GetPathName(); - FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); - - // get asset path and class - FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClassPath.ToString(); - - // split path - assetPath.ParseIntoArray(split, TEXT(" "), true); - - FString assetDir = FPackageName::GetLongPackagePath(*split[1]); - - // take interest only in paths starting with path of current container - FString path = UAssetContainer::GetPathName(); - FString lpp = FPackageName::GetLongPackagePath(*path); - - if (assetDir.StartsWith(*selfDir)) - { - // exclude self - if (assetFName != "AssetContainer") - { - // UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp); - assets.Remove(assetPath); - assetsData.Remove(AssetData); - } - } -} - -void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString& str) -{ - TArray split; - - // get directory of current container - FString selfFullPath = UAssetContainer::GetPathName(); - FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); - - // get asset path and class - FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClassPath.ToString(); - - // split path - assetPath.ParseIntoArray(split, TEXT(" "), true); - - FString assetDir = FPackageName::GetLongPackagePath(*split[1]); - if (assetDir.StartsWith(*selfDir)) - { - // exclude self - if (assetFName != "AssetContainer") - { - - assets.Remove(str); - assets.Add(assetPath); - assetsData.Remove(AssetData); - // UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str); - } - } -} - diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp deleted file mode 100644 index b943150bdd..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "AssetContainerFactory.h" -#include "AssetContainer.h" - -UAssetContainerFactory::UAssetContainerFactory(const FObjectInitializer& ObjectInitializer) - : UFactory(ObjectInitializer) -{ - SupportedClass = UAssetContainer::StaticClass(); - bCreateNew = false; - bEditorImport = true; -} - -UObject* UAssetContainerFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) -{ - UAssetContainer* AssetContainer = NewObject(InParent, Class, Name, Flags); - return AssetContainer; -} - -bool UAssetContainerFactory::ShouldShowInNewMenu() const { - return false; -} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp deleted file mode 100644 index d23de61102..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp +++ /dev/null @@ -1,139 +0,0 @@ -#include "OpenPype.h" - -#include "ISettingsContainer.h" -#include "ISettingsModule.h" -#include "ISettingsSection.h" -#include "OpenPypeStyle.h" -#include "OpenPypeCommands.h" -#include "OpenPypePythonBridge.h" -#include "OpenPypeSettings.h" -#include "Misc/MessageDialog.h" -#include "ToolMenus.h" - - -static const FName OpenPypeTabName("OpenPype"); - -#define LOCTEXT_NAMESPACE "FOpenPypeModule" - -// This function is triggered when the plugin is staring up -void FOpenPypeModule::StartupModule() -{ - FOpenPypeStyle::Initialize(); - FOpenPypeStyle::ReloadTextures(); - FOpenPypeCommands::Register(); - - PluginCommands = MakeShareable(new FUICommandList); - - PluginCommands->MapAction( - FOpenPypeCommands::Get().OpenPypeTools, - FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuPopup), - FCanExecuteAction()); - PluginCommands->MapAction( - FOpenPypeCommands::Get().OpenPypeToolsDialog, - FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuDialog), - FCanExecuteAction()); - - UToolMenus::RegisterStartupCallback( - FSimpleMulticastDelegate::FDelegate::CreateRaw(this, &FOpenPypeModule::RegisterMenus)); - - RegisterSettings(); -} - -void FOpenPypeModule::ShutdownModule() -{ - UToolMenus::UnRegisterStartupCallback(this); - - UToolMenus::UnregisterOwner(this); - - FOpenPypeStyle::Shutdown(); - - FOpenPypeCommands::Unregister(); -} - - -void FOpenPypeModule::RegisterSettings() -{ - ISettingsModule& SettingsModule = FModuleManager::LoadModuleChecked("Settings"); - - // Create the new category - // TODO: After the movement of the plugin from the game to editor, it might be necessary to move this! - ISettingsContainerPtr SettingsContainer = SettingsModule.GetContainer("Project"); - - UOpenPypeSettings* Settings = GetMutableDefault(); - - // Register the settings - ISettingsSectionPtr SettingsSection = SettingsModule.RegisterSettings("Project", "OpenPype", "General", - LOCTEXT("RuntimeGeneralSettingsName", - "General"), - LOCTEXT("RuntimeGeneralSettingsDescription", - "Base configuration for Open Pype Module"), - Settings - ); - - // Register the save handler to your settings, you might want to use it to - // validate those or just act to settings changes. - if (SettingsSection.IsValid()) - { - SettingsSection->OnModified().BindRaw(this, &FOpenPypeModule::HandleSettingsSaved); - } -} - -bool FOpenPypeModule::HandleSettingsSaved() -{ - UOpenPypeSettings* Settings = GetMutableDefault(); - bool ResaveSettings = false; - - // You can put any validation code in here and resave the settings in case an invalid - // value has been entered - - if (ResaveSettings) - { - Settings->SaveConfig(); - } - - return true; -} - -void FOpenPypeModule::RegisterMenus() -{ - // Owner will be used for cleanup in call to UToolMenus::UnregisterOwner - FToolMenuOwnerScoped OwnerScoped(this); - - { - UToolMenu* Menu = UToolMenus::Get()->ExtendMenu("LevelEditor.MainMenu.Tools"); - { - // FToolMenuSection& Section = Menu->FindOrAddSection("OpenPype"); - FToolMenuSection& Section = Menu->AddSection( - "OpenPype", - TAttribute(FText::FromString("OpenPype")), - FToolMenuInsert("Programming", EToolMenuInsertType::Before) - ); - Section.AddMenuEntryWithCommandList(FOpenPypeCommands::Get().OpenPypeTools, PluginCommands); - Section.AddMenuEntryWithCommandList(FOpenPypeCommands::Get().OpenPypeToolsDialog, PluginCommands); - } - UToolMenu* ToolbarMenu = UToolMenus::Get()->ExtendMenu("LevelEditor.LevelEditorToolBar.PlayToolBar"); - { - FToolMenuSection& Section = ToolbarMenu->FindOrAddSection("PluginTools"); - { - FToolMenuEntry& Entry = Section.AddEntry( - FToolMenuEntry::InitToolBarButton(FOpenPypeCommands::Get().OpenPypeTools)); - Entry.SetCommandList(PluginCommands); - } - } - } -} - - -void FOpenPypeModule::MenuPopup() -{ - UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); - bridge->RunInPython_Popup(); -} - -void FOpenPypeModule::MenuDialog() -{ - UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); - bridge->RunInPython_Dialog(); -} - -IMPLEMENT_MODULE(FOpenPypeModule, OpenPype) diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp deleted file mode 100644 index 6187bd7c7e..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright Epic Games, Inc. All Rights Reserved. - -#include "OpenPypeCommands.h" - -#define LOCTEXT_NAMESPACE "FOpenPypeModule" - -void FOpenPypeCommands::RegisterCommands() -{ - UI_COMMAND(OpenPypeTools, "OpenPype Tools", "Pipeline tools", EUserInterfaceActionType::Button, FInputChord()); - UI_COMMAND(OpenPypeToolsDialog, "OpenPype Tools Dialog", "Pipeline tools dialog", EUserInterfaceActionType::Button, FInputChord()); -} - -#undef LOCTEXT_NAMESPACE diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp deleted file mode 100644 index a58e921288..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp +++ /dev/null @@ -1,52 +0,0 @@ -#include "OpenPypeLib.h" - -#include "AssetViewUtils.h" -#include "Misc/Paths.h" -#include "Misc/ConfigCacheIni.h" -#include "UObject/UnrealType.h" - -/** - * Sets color on folder icon on given path - * @param InPath - path to folder - * @param InFolderColor - color of the folder - * @warning This color will appear only after Editor restart. Is there a better way? - */ - -bool UOpenPypeLib::SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor, const bool& bForceAdd) -{ - if (AssetViewUtils::DoesFolderExist(FolderPath)) - { - const TSharedPtr LinearColor = MakeShared(FolderColor); - - AssetViewUtils::SaveColor(FolderPath, LinearColor, true); - UE_LOG(LogAssetData, Display, TEXT("A color {%s} has been set to folder \"%s\""), *LinearColor->ToString(), - *FolderPath) - return true; - } - - UE_LOG(LogAssetData, Display, TEXT("Setting a color {%s} to folder \"%s\" has failed! Directory doesn't exist!"), - *FolderColor.ToString(), *FolderPath) - return false; -} - -/** - * Returns all poperties on given object - * @param cls - class - * @return TArray of properties - */ -TArray UOpenPypeLib::GetAllProperties(UClass* cls) -{ - TArray Ret; - if (cls != nullptr) - { - for (TFieldIterator It(cls); It; ++It) - { - FProperty* Property = *It; - if (Property->HasAnyPropertyFlags(EPropertyFlags::CPF_Edit)) - { - Ret.Add(Property->GetName()); - } - } - } - return Ret; -} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp deleted file mode 100644 index 0b56111a49..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp +++ /dev/null @@ -1,201 +0,0 @@ -#pragma once - -#include "OpenPypePublishInstance.h" -#include "AssetRegistry/AssetRegistryModule.h" -#include "AssetToolsModule.h" -#include "Framework/Notifications/NotificationManager.h" -#include "OpenPypeLib.h" -#include "OpenPypeSettings.h" -#include "Widgets/Notifications/SNotificationList.h" - - -//Moves all the invalid pointers to the end to prepare them for the shrinking -#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \ - VAR.Shrink(); - -UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer) - : UPrimaryDataAsset(ObjectInitializer) -{ - const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked< - FAssetRegistryModule>("AssetRegistry"); - - const FPropertyEditorModule& PropertyEditorModule = FModuleManager::LoadModuleChecked( - "PropertyEditor"); - - FString Left, Right; - GetPathName().Split("/" + GetName(), &Left, &Right); - - FARFilter Filter; - Filter.PackagePaths.Emplace(FName(Left)); - - TArray FoundAssets; - AssetRegistryModule.GetRegistry().GetAssets(Filter, FoundAssets); - - for (const FAssetData& AssetData : FoundAssets) - OnAssetCreated(AssetData); - - REMOVE_INVALID_ENTRIES(AssetDataInternal) - REMOVE_INVALID_ENTRIES(AssetDataExternal) - - AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetCreated); - AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved); - AssetRegistryModule.Get().OnAssetUpdated().AddUObject(this, &UOpenPypePublishInstance::OnAssetUpdated); - -#ifdef WITH_EDITOR - ColorOpenPypeDirs(); -#endif -} - -void UOpenPypePublishInstance::OnAssetCreated(const FAssetData& InAssetData) -{ - TArray split; - - UObject* Asset = InAssetData.GetAsset(); - - if (!IsValid(Asset)) - { - UE_LOG(LogAssetData, Warning, TEXT("Asset \"%s\" is not valid! Skipping the addition."), - *InAssetData.GetObjectPathString()); - return; - } - - const bool result = IsUnderSameDir(Asset) && Cast(Asset) == nullptr; - - if (result) - { - if (AssetDataInternal.Emplace(Asset).IsValidId()) - { - UE_LOG(LogTemp, Log, TEXT("Added an Asset to PublishInstance - Publish Instance: %s, Asset %s"), - *this->GetName(), *Asset->GetName()); - } - } -} - -void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& InAssetData) -{ - if (Cast(InAssetData.GetAsset()) == nullptr) - { - if (AssetDataInternal.Contains(nullptr)) - { - AssetDataInternal.Remove(nullptr); - REMOVE_INVALID_ENTRIES(AssetDataInternal) - } - else - { - AssetDataExternal.Remove(nullptr); - REMOVE_INVALID_ENTRIES(AssetDataExternal) - } - } -} - -void UOpenPypePublishInstance::OnAssetUpdated(const FAssetData& InAssetData) -{ - REMOVE_INVALID_ENTRIES(AssetDataInternal); - REMOVE_INVALID_ENTRIES(AssetDataExternal); -} - -bool UOpenPypePublishInstance::IsUnderSameDir(const UObject* InAsset) const -{ - FString ThisLeft, ThisRight; - this->GetPathName().Split(this->GetName(), &ThisLeft, &ThisRight); - - return InAsset->GetPathName().StartsWith(ThisLeft); -} - -#ifdef WITH_EDITOR - -void UOpenPypePublishInstance::ColorOpenPypeDirs() -{ - FString PathName = this->GetPathName(); - - //Check whether the path contains the defined OpenPype folder - if (!PathName.Contains(TEXT("OpenPype"))) return; - - //Get the base path for open pype - FString PathLeft, PathRight; - PathName.Split(FString("OpenPype"), &PathLeft, &PathRight); - - if (PathLeft.IsEmpty() || PathRight.IsEmpty()) - { - UE_LOG(LogAssetData, Error, TEXT("Failed to retrieve the base OpenPype directory!")) - return; - } - - PathName.RemoveFromEnd(PathRight, ESearchCase::CaseSensitive); - - //Get the current settings - const UOpenPypeSettings* Settings = GetMutableDefault(); - - //Color the base folder - UOpenPypeLib::SetFolderColor(PathName, Settings->GetFolderFColor(), false); - - //Get Sub paths, iterate through them and color them according to the folder color in UOpenPypeSettings - const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked( - "AssetRegistry"); - - TArray PathList; - - AssetRegistryModule.Get().GetSubPaths(PathName, PathList, true); - - if (PathList.Num() > 0) - { - for (const FString& Path : PathList) - { - UOpenPypeLib::SetFolderColor(Path, Settings->GetFolderFColor(), false); - } - } -} - -void UOpenPypePublishInstance::SendNotification(const FString& Text) const -{ - FNotificationInfo Info{FText::FromString(Text)}; - - Info.bFireAndForget = true; - Info.bUseLargeFont = false; - Info.bUseThrobber = false; - Info.bUseSuccessFailIcons = false; - Info.ExpireDuration = 4.f; - Info.FadeOutDuration = 2.f; - - FSlateNotificationManager::Get().AddNotification(Info); - - UE_LOG(LogAssetData, Warning, - TEXT( - "Removed duplicated asset from the AssetsDataExternal in Container \"%s\", Asset is already included in the AssetDataInternal!" - ), *GetName() - ) -} - - -void UOpenPypePublishInstance::PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) -{ - Super::PostEditChangeProperty(PropertyChangedEvent); - - if (PropertyChangedEvent.ChangeType == EPropertyChangeType::ValueSet && - PropertyChangedEvent.Property->GetFName() == GET_MEMBER_NAME_CHECKED( - UOpenPypePublishInstance, AssetDataExternal)) - { - // Check for duplicated assets - for (const auto& Asset : AssetDataInternal) - { - if (AssetDataExternal.Contains(Asset)) - { - AssetDataExternal.Remove(Asset); - return SendNotification( - "You are not allowed to add assets into AssetDataExternal which are already included in AssetDataInternal!"); - } - } - - // Check if no UOpenPypePublishInstance type assets are included - for (const auto& Asset : AssetDataExternal) - { - if (Cast(Asset.Get()) != nullptr) - { - AssetDataExternal.Remove(Asset); - return SendNotification("You are not allowed to add publish instances!"); - } - } - } -} - -#endif diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp deleted file mode 100644 index 9b26da7fa4..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include "OpenPypePublishInstanceFactory.h" -#include "OpenPypePublishInstance.h" - -UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer) - : UFactory(ObjectInitializer) -{ - SupportedClass = UOpenPypePublishInstance::StaticClass(); - bCreateNew = false; - bEditorImport = true; -} - -UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) -{ - check(InClass->IsChildOf(UOpenPypePublishInstance::StaticClass())); - return NewObject(InParent, InClass, InName, Flags); -} - -bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const { - return false; -} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp deleted file mode 100644 index 8113231503..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include "OpenPypePythonBridge.h" - -UOpenPypePythonBridge* UOpenPypePythonBridge::Get() -{ - TArray OpenPypePythonBridgeClasses; - GetDerivedClasses(UOpenPypePythonBridge::StaticClass(), OpenPypePythonBridgeClasses); - int32 NumClasses = OpenPypePythonBridgeClasses.Num(); - if (NumClasses > 0) - { - return Cast(OpenPypePythonBridgeClasses[NumClasses - 1]->GetDefaultObject()); - } - return nullptr; -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeSettings.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeSettings.cpp deleted file mode 100644 index a6b9eba749..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeSettings.cpp +++ /dev/null @@ -1,21 +0,0 @@ -๏ปฟ// Fill out your copyright notice in the Description page of Project Settings. - -#include "OpenPypeSettings.h" - -#include "Interfaces/IPluginManager.h" -#include "UObject/UObjectGlobals.h" - -/** - * Mainly is used for initializing default values if the DefaultOpenPypeSettings.ini file does not exist in the saved config - */ -UOpenPypeSettings::UOpenPypeSettings(const FObjectInitializer& ObjectInitializer) -{ - - const FString ConfigFilePath = OPENPYPE_SETTINGS_FILEPATH; - - // This has to be probably in the future set using the UE Reflection system - FColor Color; - GConfig->GetColor(TEXT("/Script/OpenPype.OpenPypeSettings"), TEXT("FolderColor"), Color, ConfigFilePath); - - FolderColor = Color; -} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp deleted file mode 100644 index 49e805da4d..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp +++ /dev/null @@ -1,61 +0,0 @@ -#include "OpenPypeStyle.h" -#include "OpenPype.h" -#include "Framework/Application/SlateApplication.h" -#include "Styling/SlateStyleRegistry.h" -#include "Slate/SlateGameResources.h" -#include "Interfaces/IPluginManager.h" -#include "Styling/SlateStyleMacros.h" - -#define RootToContentDir Style->RootToContentDir - -TSharedPtr FOpenPypeStyle::OpenPypeStyleInstance = nullptr; - -void FOpenPypeStyle::Initialize() -{ - if (!OpenPypeStyleInstance.IsValid()) - { - OpenPypeStyleInstance = Create(); - FSlateStyleRegistry::RegisterSlateStyle(*OpenPypeStyleInstance); - } -} - -void FOpenPypeStyle::Shutdown() -{ - FSlateStyleRegistry::UnRegisterSlateStyle(*OpenPypeStyleInstance); - ensure(OpenPypeStyleInstance.IsUnique()); - OpenPypeStyleInstance.Reset(); -} - -FName FOpenPypeStyle::GetStyleSetName() -{ - static FName StyleSetName(TEXT("OpenPypeStyle")); - return StyleSetName; -} - -const FVector2D Icon16x16(16.0f, 16.0f); -const FVector2D Icon20x20(20.0f, 20.0f); -const FVector2D Icon40x40(40.0f, 40.0f); - -TSharedRef< FSlateStyleSet > FOpenPypeStyle::Create() -{ - TSharedRef< FSlateStyleSet > Style = MakeShareable(new FSlateStyleSet("OpenPypeStyle")); - Style->SetContentRoot(IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Resources")); - - Style->Set("OpenPype.OpenPypeTools", new IMAGE_BRUSH(TEXT("openpype40"), Icon40x40)); - Style->Set("OpenPype.OpenPypeToolsDialog", new IMAGE_BRUSH(TEXT("openpype40"), Icon40x40)); - - return Style; -} - -void FOpenPypeStyle::ReloadTextures() -{ - if (FSlateApplication::IsInitialized()) - { - FSlateApplication::Get().GetRenderer()->ReloadTextureResources(); - } -} - -const ISlateStyle& FOpenPypeStyle::Get() -{ - return *OpenPypeStyleInstance; -} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h deleted file mode 100644 index 2c06e59d6f..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h +++ /dev/null @@ -1,39 +0,0 @@ -// Fill out your copyright notice in the Description page of Project Settings. - -#pragma once - -#include "CoreMinimal.h" -#include "UObject/NoExportTypes.h" -#include "Engine/AssetUserData.h" -#include "AssetRegistry/AssetData.h" -#include "AssetContainer.generated.h" - -/** - * - */ -UCLASS(Blueprintable) -class OPENPYPE_API UAssetContainer : public UAssetUserData -{ - GENERATED_BODY() - -public: - - UAssetContainer(const FObjectInitializer& ObjectInitalizer); - // ~UAssetContainer(); - - UPROPERTY(EditAnywhere, BlueprintReadOnly) - TArray assets; - - // There seems to be no reflection option to expose array of FAssetData - /* - UPROPERTY(Transient, BlueprintReadOnly, Category = "Python", meta=(DisplayName="Assets Data")) - TArray assetsData; - */ -private: - TArray assetsData; - void OnAssetAdded(const FAssetData& AssetData); - void OnAssetRemoved(const FAssetData& AssetData); - void OnAssetRenamed(const FAssetData& AssetData, const FString& str); -}; - - diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h deleted file mode 100644 index 331ce6bb50..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h +++ /dev/null @@ -1,21 +0,0 @@ -// Fill out your copyright notice in the Description page of Project Settings. - -#pragma once - -#include "CoreMinimal.h" -#include "Factories/Factory.h" -#include "AssetContainerFactory.generated.h" - -/** - * - */ -UCLASS() -class OPENPYPE_API UAssetContainerFactory : public UFactory -{ - GENERATED_BODY() - -public: - UAssetContainerFactory(const FObjectInitializer& ObjectInitializer); - virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; - virtual bool ShouldShowInNewMenu() const override; -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h deleted file mode 100644 index 4261476da8..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. - -#pragma once - -#include "CoreMinimal.h" -#include "Modules/ModuleManager.h" - - -class FOpenPypeModule : public IModuleInterface -{ -public: - virtual void StartupModule() override; - virtual void ShutdownModule() override; - -private: - void RegisterMenus(); - void RegisterSettings(); - bool HandleSettingsSaved(); - - void MenuPopup(); - void MenuDialog(); - -private: - TSharedPtr PluginCommands; -}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h deleted file mode 100644 index 62ffb8de33..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright Epic Games, Inc. All Rights Reserved. - -#pragma once - -#include "CoreMinimal.h" -#include "Framework/Commands/Commands.h" -#include "OpenPypeStyle.h" - -class FOpenPypeCommands : public TCommands -{ -public: - - FOpenPypeCommands() - : TCommands(TEXT("OpenPype"), NSLOCTEXT("Contexts", "OpenPype", "OpenPype Tools"), NAME_None, FOpenPypeStyle::GetStyleSetName()) - { - } - - // TCommands<> interface - virtual void RegisterCommands() override; - -public: - TSharedPtr< FUICommandInfo > OpenPypeTools; - TSharedPtr< FUICommandInfo > OpenPypeToolsDialog; -}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h deleted file mode 100644 index 06425c7c7d..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include "Engine.h" -#include "OpenPypeLib.generated.h" - - -UCLASS(Blueprintable) -class OPENPYPE_API UOpenPypeLib : public UBlueprintFunctionLibrary -{ - - GENERATED_BODY() - -public: - UFUNCTION(BlueprintCallable, Category = Python) - static bool SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor,const bool& bForceAdd); - - UFUNCTION(BlueprintCallable, Category = Python) - static TArray GetAllProperties(UClass* cls); -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h deleted file mode 100644 index 146025bd6d..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h +++ /dev/null @@ -1,102 +0,0 @@ -#pragma once - -#include "Engine.h" -#include "OpenPypePublishInstance.generated.h" - - -UCLASS(Blueprintable) -class OPENPYPE_API UOpenPypePublishInstance : public UPrimaryDataAsset -{ - GENERATED_UCLASS_BODY() - -public: - /** - /** - * Retrieves all the assets which are monitored by the Publish Instance (Monitors assets in the directory which is - * placed in) - * - * @return - Set of UObjects. Careful! They are returning raw pointers. Seems like an issue in UE5 - */ - UFUNCTION(BlueprintCallable, BlueprintPure) - TSet GetInternalAssets() const - { - //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. - TSet ResultSet; - - for (const auto& Asset : AssetDataInternal) - ResultSet.Add(Asset.LoadSynchronous()); - - return ResultSet; - } - - /** - * Retrieves all the assets which have been added manually by the Publish Instance - * - * @return - TSet of assets (UObjects). Careful! They are returning raw pointers. Seems like an issue in UE5 - */ - UFUNCTION(BlueprintCallable, BlueprintPure) - TSet GetExternalAssets() const - { - //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. - TSet ResultSet; - - for (const auto& Asset : AssetDataExternal) - ResultSet.Add(Asset.LoadSynchronous()); - - return ResultSet; - } - - /** - * Function for returning all the assets in the container combined. - * - * @return Returns all the internal and externally added assets into one set (TSet of UObjects). Careful! They are - * returning raw pointers. Seems like an issue in UE5 - * - * @attention If the bAddExternalAssets variable is false, external assets won't be included! - */ - UFUNCTION(BlueprintCallable, BlueprintPure) - TSet GetAllAssets() const - { - const TSet>& IteratedSet = bAddExternalAssets - ? AssetDataInternal.Union(AssetDataExternal) - : AssetDataInternal; - - //Create a new TSet only with raw pointers. - TSet ResultSet; - - for (auto& Asset : IteratedSet) - ResultSet.Add(Asset.LoadSynchronous()); - - return ResultSet; - } - -private: - UPROPERTY(VisibleAnywhere, Category="Assets") - TSet> AssetDataInternal; - - /** - * This property allows exposing the array to include other assets from any other directory than what it's currently - * monitoring. NOTE: that these assets have to be added manually! They are not automatically registered or added! - */ - UPROPERTY(EditAnywhere, Category = "Assets") - bool bAddExternalAssets = false; - - UPROPERTY(EditAnywhere, meta=(EditCondition="bAddExternalAssets"), Category="Assets") - TSet> AssetDataExternal; - - - void OnAssetCreated(const FAssetData& InAssetData); - void OnAssetRemoved(const FAssetData& InAssetData); - void OnAssetUpdated(const FAssetData& InAssetData); - - bool IsUnderSameDir(const UObject* InAsset) const; - -#ifdef WITH_EDITOR - - void ColorOpenPypeDirs(); - - void SendNotification(const FString& Text) const; - virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override; - -#endif -}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h deleted file mode 100644 index 7d2c77fe6e..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include "CoreMinimal.h" -#include "Factories/Factory.h" -#include "OpenPypePublishInstanceFactory.generated.h" - -/** - * - */ -UCLASS() -class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory -{ - GENERATED_BODY() - -public: - UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer); - virtual UObject* FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; - virtual bool ShouldShowInNewMenu() const override; -}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h deleted file mode 100644 index 692aab2e5e..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h +++ /dev/null @@ -1,20 +0,0 @@ -#pragma once -#include "Engine.h" -#include "OpenPypePythonBridge.generated.h" - -UCLASS(Blueprintable) -class UOpenPypePythonBridge : public UObject -{ - GENERATED_BODY() - -public: - UFUNCTION(BlueprintCallable, Category = Python) - static UOpenPypePythonBridge* Get(); - - UFUNCTION(BlueprintImplementableEvent, Category = Python) - void RunInPython_Popup() const; - - UFUNCTION(BlueprintImplementableEvent, Category = Python) - void RunInPython_Dialog() const; - -}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeSettings.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeSettings.h deleted file mode 100644 index aca80946bb..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeSettings.h +++ /dev/null @@ -1,32 +0,0 @@ -๏ปฟ// Fill out your copyright notice in the Description page of Project Settings. - -#pragma once - -#include "CoreMinimal.h" -#include "UObject/Object.h" -#include "OpenPypeSettings.generated.h" - -#define OPENPYPE_SETTINGS_FILEPATH IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Config") / TEXT("DefaultOpenPypeSettings.ini") - -UCLASS(Config=OpenPypeSettings, DefaultConfig) -class OPENPYPE_API UOpenPypeSettings : public UObject -{ - GENERATED_UCLASS_BODY() - - UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) - FColor GetFolderFColor() const - { - return FolderColor; - } - - UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) - FLinearColor GetFolderFLinearColor() const - { - return FLinearColor(FolderColor); - } - -protected: - - UPROPERTY(config, EditAnywhere, Category = Folders) - FColor FolderColor = FColor(25,45,223); -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h deleted file mode 100644 index ae704251e1..0000000000 --- a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once -#include "CoreMinimal.h" -#include "Styling/SlateStyle.h" - -class FOpenPypeStyle -{ -public: - static void Initialize(); - static void Shutdown(); - static void ReloadTextures(); - static const ISlateStyle& Get(); - static FName GetStyleSetName(); - - -private: - static TSharedRef< class FSlateStyleSet > Create(); - static TSharedPtr< class FSlateStyleSet > OpenPypeStyleInstance; -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/lib.py b/openpype/hosts/unreal/lib.py index 095f5e414b..97771472cf 100644 --- a/openpype/hosts/unreal/lib.py +++ b/openpype/hosts/unreal/lib.py @@ -4,6 +4,9 @@ import os import platform import json + +from typing import List + from distutils import dir_util import subprocess import re @@ -19,6 +22,8 @@ def get_engine_versions(env=None): Location can be overridden by `UNREAL_ENGINE_LOCATION` environment variable. + .. deprecated:: 3.15.4 + Args: env (dict, optional): Environment to use. @@ -73,7 +78,7 @@ def get_engine_versions(env=None): return OrderedDict() -def get_editor_executable_path(engine_path: Path, engine_version: str) -> Path: +def get_editor_exe_path(engine_path: Path, engine_version: str) -> Path: """Get UE Editor executable path.""" ue_path = engine_path / "Engine/Binaries" if platform.system().lower() == "windows": @@ -99,6 +104,8 @@ def _win_get_engine_versions(): This file is JSON file listing installed stuff, Unreal engines are marked with `"AppName" = "UE_X.XX"`` like `UE_4.24` + .. deprecated:: 3.15.4 + Returns: dict: version as a key and path as a value. @@ -118,6 +125,8 @@ def _darwin_get_engine_version() -> dict: It works the same as on Windows, just JSON file location is different. + .. deprecated:: 3.15.4 + Returns: dict: version as a key and path as a value. @@ -140,6 +149,8 @@ def _darwin_get_engine_version() -> dict: def _parse_launcher_locations(install_json_path: str) -> dict: """This will parse locations from json file. + .. deprecated:: 3.15.4 + Args: install_json_path (str): Path to `LauncherInstalled.dat`. @@ -177,7 +188,7 @@ def create_unreal_project(project_name: str, As there is no way I know to create a project via command line, this is easiest option. Unreal project file is basically a JSON file. If we find - the `OPENPYPE_UNREAL_PLUGIN` environment variable we assume this is the + the `AYON_UNREAL_PLUGIN` environment variable we assume this is the location of the Integration Plugin and we copy its content to the project folder and enable this plugin. @@ -191,8 +202,7 @@ def create_unreal_project(project_name: str, sources. This will trigger automatically if `Binaries` directory is not found in plugin folders as this indicates this is only source distribution of the plugin. Dev mode - is also set by preset file `unreal/project_setup.json` in - **OPENPYPE_CONFIG**. + is also set in Settings. env (dict, optional): Environment to use. If not set, `os.environ`. Throws: @@ -214,77 +224,71 @@ def create_unreal_project(project_name: str, # created in different UE4 version. When user convert such project # to his UE4 version, Engine ID is replaced in uproject file. If some # other user tries to open it, it will present him with similar error. - ue_modules = Path() - if platform.system().lower() == "windows": - ue_modules_path = engine_path / "Engine/Binaries/Win64" - if ue_version.split(".")[0] == "4": - ue_modules_path /= "UE4Editor.modules" - elif ue_version.split(".")[0] == "5": - ue_modules_path /= "UnrealEditor.modules" - ue_modules = Path(ue_modules_path) - if platform.system().lower() == "linux": - ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", - "Linux", "UE4Editor.modules")) + # engine_path should be the location of UE_X.X folder - if platform.system().lower() == "darwin": - ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", - "Mac", "UE4Editor.modules")) + ue_editor_exe: Path = get_editor_exe_path(engine_path, ue_version) + cmdlet_project: Path = get_path_to_cmdlet_project(ue_version) - if ue_modules.exists(): - print("--- Loading Engine ID from modules file ...") - with open(ue_modules, "r") as mp: - loaded_modules = json.load(mp) + project_file = pr_dir / f"{project_name}.uproject" - if loaded_modules.get("BuildId"): - ue_id = "{" + loaded_modules.get("BuildId") + "}" - - plugins_path = None - if os.path.isdir(env.get("OPENPYPE_UNREAL_PLUGIN", "")): - # copy plugin to correct path under project - plugins_path = pr_dir / "Plugins" - openpype_plugin_path = plugins_path / "OpenPype" - if not openpype_plugin_path.is_dir(): - openpype_plugin_path.mkdir(parents=True, exist_ok=True) - dir_util._path_created = {} - dir_util.copy_tree(os.environ.get("OPENPYPE_UNREAL_PLUGIN"), - openpype_plugin_path.as_posix()) - - if not (openpype_plugin_path / "Binaries").is_dir() \ - or not (openpype_plugin_path / "Intermediate").is_dir(): - dev_mode = True - - # data for project file - data = { - "FileVersion": 3, - "EngineAssociation": ue_id, - "Category": "", - "Description": "", - "Plugins": [ - {"Name": "PythonScriptPlugin", "Enabled": True}, - {"Name": "EditorScriptingUtilities", "Enabled": True}, - {"Name": "SequencerScripting", "Enabled": True}, - {"Name": "MovieRenderPipeline", "Enabled": True}, - {"Name": "OpenPype", "Enabled": True} - ] - } + print("--- Generating a new project ...") + commandlet_cmd = [f'{ue_editor_exe.as_posix()}', + f'{cmdlet_project.as_posix()}', + f'-run=AyonGenerateProject', + f'{project_file.resolve().as_posix()}'] if dev_mode or preset["dev_mode"]: - # this will add the project module and necessary source file to - # make it a C++ project and to (hopefully) make Unreal Editor to - # compile all # sources at start + commandlet_cmd.append('-GenerateCode') - data["Modules"] = [{ - "Name": project_name, - "Type": "Runtime", - "LoadingPhase": "Default", - "AdditionalDependencies": ["Engine"], - }] + gen_process = subprocess.Popen(commandlet_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) - # write project file - project_file = pr_dir / f"{project_name}.uproject" - with open(project_file, mode="w") as pf: - json.dump(data, pf, indent=4) + for line in gen_process.stdout: + print(line.decode(), end='') + gen_process.stdout.close() + return_code = gen_process.wait() + + if return_code and return_code != 0: + raise RuntimeError(f'Failed to generate \'{project_name}\' project! ' + f'Exited with return code {return_code}') + + print("--- Project has been generated successfully.") + + with open(project_file.as_posix(), mode="r+") as pf: + pf_json = json.load(pf) + pf_json["EngineAssociation"] = get_build_id(engine_path, ue_version) + pf.seek(0) + json.dump(pf_json, pf, indent=4) + pf.truncate() + print(f'--- Engine ID has been written into the project file') + + if dev_mode or preset["dev_mode"]: + u_build_tool = get_path_to_ubt(engine_path, ue_version) + + arch = "Win64" + if platform.system().lower() == "windows": + arch = "Win64" + elif platform.system().lower() == "linux": + arch = "Linux" + elif platform.system().lower() == "darwin": + # we need to test this out + arch = "Mac" + + command1 = [u_build_tool.as_posix(), "-projectfiles", + f"-project={project_file}", "-progress"] + + subprocess.run(command1) + + command2 = [u_build_tool.as_posix(), + f"-ModuleWithSuffix={project_name},3555", arch, + "Development", "-TargetType=Editor", + f'-Project={project_file}', + f'{project_file}', + "-IgnoreJunk"] + + subprocess.run(command2) # ensure we have PySide2 installed in engine python_path = None @@ -307,176 +311,193 @@ def create_unreal_project(project_name: str, subprocess.check_call( [python_path.as_posix(), "-m", "pip", "install", "pyside2"]) - if dev_mode or preset["dev_mode"]: - _prepare_cpp_project(project_file, engine_path, ue_version) + +def get_path_to_uat(engine_path: Path) -> Path: + if platform.system().lower() == "windows": + return engine_path / "Engine/Build/BatchFiles/RunUAT.bat" + + if platform.system().lower() in ["linux", "darwin"]: + return engine_path / "Engine/Build/BatchFiles/RunUAT.sh" -def _prepare_cpp_project( - project_file: Path, engine_path: Path, ue_version: str) -> None: - """Prepare CPP Unreal Project. - - This function will add source files needed for project to be - rebuild along with the OpenPype integration plugin. - - There seems not to be automated way to do it from command line. - But there might be way to create at least those target and build files - by some generator. This needs more research as manually writing - those files is rather hackish. :skull_and_crossbones: +def get_compatible_integration( + ue_version: str, integration_root: Path) -> List[Path]: + """Get path to compatible version of integration plugin. + This will try to get the closest compatible versions to the one + specified in sorted list. Args: - project_file (str): Path to .uproject file. - engine_path (str): Path to unreal engine associated with project. + ue_version (str): version of the current Unreal Engine. + integration_root (Path): path to built-in integration plugins. + + Returns: + list of Path: Sorted list of paths closest to the specified + version. """ - project_name = project_file.stem - project_dir = project_file.parent - targets_dir = project_dir / "Source" - sources_dir = targets_dir / project_name + major, minor = ue_version.split(".") + integration_paths = [p for p in integration_root.iterdir() + if p.is_dir()] - sources_dir.mkdir(parents=True, exist_ok=True) - (project_dir / "Content").mkdir(parents=True, exist_ok=True) + compatible_versions = [] + for i in integration_paths: + # parse version from path + try: + i_major, i_minor = re.search( + r"(?P\d+).(?P\d+)$", i.name).groups() + except AttributeError: + # in case there is no match, just skip to next + continue - module_target = ''' -using UnrealBuildTool; -using System.Collections.Generic; + # consider versions with different major so different that they + # are incompatible + if int(major) != int(i_major): + continue -public class {0}Target : TargetRules -{{ - public {0}Target( TargetInfo Target) : base(Target) - {{ - Type = TargetType.Game; - ExtraModuleNames.AddRange( new string[] {{ "{0}" }} ); - }} -}} -'''.format(project_name) + compatible_versions.append(i) - editor_module_target = ''' -using UnrealBuildTool; -using System.Collections.Generic; + sorted(set(compatible_versions)) + return compatible_versions -public class {0}EditorTarget : TargetRules -{{ - public {0}EditorTarget( TargetInfo Target) : base(Target) - {{ - Type = TargetType.Editor; - ExtraModuleNames.AddRange( new string[] {{ "{0}" }} ); - }} -}} -'''.format(project_name) +def get_path_to_cmdlet_project(ue_version: str) -> Path: + cmd_project = Path( + os.path.abspath(os.getenv("OPENPYPE_ROOT"))) - module_build = ''' -using UnrealBuildTool; -public class {0} : ModuleRules -{{ - public {0}(ReadOnlyTargetRules Target) : base(Target) - {{ - PCHUsage = PCHUsageMode.UseExplicitOrSharedPCHs; - PublicDependencyModuleNames.AddRange(new string[] {{ "Core", - "CoreUObject", "Engine", "InputCore" }}); - PrivateDependencyModuleNames.AddRange(new string[] {{ }}); - }} -}} -'''.format(project_name) + # For now, only tested on Windows (For Linux and Mac + # it has to be implemented) + cmd_project /= f"openpype/hosts/unreal/integration/UE_{ue_version}" - module_cpp = ''' -#include "{0}.h" -#include "Modules/ModuleManager.h" + # if the integration doesn't exist for current engine version + # try to find the closest to it. + if cmd_project.exists(): + return cmd_project / "CommandletProject/CommandletProject.uproject" -IMPLEMENT_PRIMARY_GAME_MODULE( FDefaultGameModuleImpl, {0}, "{0}" ); -'''.format(project_name) + if compatible_versions := get_compatible_integration( + ue_version, cmd_project.parent + ): + return compatible_versions[-1] / "CommandletProject/CommandletProject.uproject" # noqa: E501 + else: + raise RuntimeError( + ("There are no compatible versions of Unreal " + "integration plugin compatible with running version " + f"of Unreal Engine {ue_version}")) - module_header = ''' -#pragma once -#include "CoreMinimal.h" -''' - - game_mode_cpp = ''' -#include "{0}GameModeBase.h" -'''.format(project_name) - - game_mode_h = ''' -#pragma once - -#include "CoreMinimal.h" -#include "GameFramework/GameModeBase.h" -#include "{0}GameModeBase.generated.h" - -UCLASS() -class {1}_API A{0}GameModeBase : public AGameModeBase -{{ - GENERATED_BODY() -}}; -'''.format(project_name, project_name.upper()) - - with open(targets_dir / f"{project_name}.Target.cs", mode="w") as f: - f.write(module_target) - - with open(targets_dir / f"{project_name}Editor.Target.cs", mode="w") as f: - f.write(editor_module_target) - - with open(sources_dir / f"{project_name}.Build.cs", mode="w") as f: - f.write(module_build) - - with open(sources_dir / f"{project_name}.cpp", mode="w") as f: - f.write(module_cpp) - - with open(sources_dir / f"{project_name}.h", mode="w") as f: - f.write(module_header) - - with open(sources_dir / f"{project_name}GameModeBase.cpp", mode="w") as f: - f.write(game_mode_cpp) - - with open(sources_dir / f"{project_name}GameModeBase.h", mode="w") as f: - f.write(game_mode_h) +def get_path_to_ubt(engine_path: Path, ue_version: str) -> Path: u_build_tool_path = engine_path / "Engine/Binaries/DotNET" + if ue_version.split(".")[0] == "4": u_build_tool_path /= "UnrealBuildTool.exe" elif ue_version.split(".")[0] == "5": u_build_tool_path /= "UnrealBuildTool/UnrealBuildTool.exe" - u_build_tool = Path(u_build_tool_path) - u_header_tool = None - arch = "Win64" + return Path(u_build_tool_path) + + +def get_build_id(engine_path: Path, ue_version: str) -> str: + ue_modules = Path() if platform.system().lower() == "windows": - arch = "Win64" - u_header_tool = Path( - engine_path / "Engine/Binaries/Win64/UnrealHeaderTool.exe") - elif platform.system().lower() == "linux": - arch = "Linux" - u_header_tool = Path( - engine_path / "Engine/Binaries/Linux/UnrealHeaderTool") - elif platform.system().lower() == "darwin": - # we need to test this out - arch = "Mac" - u_header_tool = Path( - engine_path / "Engine/Binaries/Mac/UnrealHeaderTool") + ue_modules_path = engine_path / "Engine/Binaries/Win64" + if ue_version.split(".")[0] == "4": + ue_modules_path /= "UE4Editor.modules" + elif ue_version.split(".")[0] == "5": + ue_modules_path /= "UnrealEditor.modules" + ue_modules = Path(ue_modules_path) - if not u_header_tool: - raise NotImplementedError("Unsupported platform") + if platform.system().lower() == "linux": + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + "Linux", "UE4Editor.modules")) - command1 = [u_build_tool.as_posix(), "-projectfiles", - f"-project={project_file}", "-progress"] + if platform.system().lower() == "darwin": + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + "Mac", "UE4Editor.modules")) - subprocess.run(command1) + if ue_modules.exists(): + print("--- Loading Engine ID from modules file ...") + with open(ue_modules, "r") as mp: + loaded_modules = json.load(mp) - command2 = [u_build_tool.as_posix(), - f"-ModuleWithSuffix={project_name},3555", arch, - "Development", "-TargetType=Editor", - f'-Project={project_file}', - f'{project_file}', - "-IgnoreJunk"] + if loaded_modules.get("BuildId"): + return "{" + loaded_modules.get("BuildId") + "}" - subprocess.run(command2) - """ - uhtmanifest = os.path.join(os.path.dirname(project_file), - f"{project_name}.uhtmanifest") +def check_plugin_existence(engine_path: Path, env: dict = None) -> bool: + env = env or os.environ + integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) - command3 = [u_header_tool, f'"{project_file}"', f'"{uhtmanifest}"', - "-Unattended", "-WarningsAsErrors", "-installed"] + if not os.path.isdir(integration_plugin_path): + raise RuntimeError("Path to the integration plugin is null!") - subprocess.run(command3) - """ + # Create a path to the plugin in the engine + op_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" + + if not op_plugin_path.is_dir(): + return False + + if not (op_plugin_path / "Binaries").is_dir() \ + or not (op_plugin_path / "Intermediate").is_dir(): + return False + + return True + + +def try_installing_plugin(engine_path: Path, env: dict = None) -> None: + env = env or os.environ + + integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) + + if not os.path.isdir(integration_plugin_path): + raise RuntimeError("Path to the integration plugin is null!") + + # Create a path to the plugin in the engine + op_plugin_path: Path = engine_path / "Engine/Plugins/Marketplace/Ayon" + + if not op_plugin_path.is_dir(): + op_plugin_path.mkdir(parents=True, exist_ok=True) + + engine_plugin_config_path: Path = op_plugin_path / "Config" + engine_plugin_config_path.mkdir(exist_ok=True) + + dir_util._path_created = {} + + if not (op_plugin_path / "Binaries").is_dir() \ + or not (op_plugin_path / "Intermediate").is_dir(): + _build_and_move_plugin(engine_path, op_plugin_path, env) + + +def _build_and_move_plugin(engine_path: Path, + plugin_build_path: Path, + env: dict = None) -> None: + uat_path: Path = get_path_to_uat(engine_path) + + env = env or os.environ + integration_plugin_path: Path = Path(env.get("AYON_UNREAL_PLUGIN", "")) + + if uat_path.is_file(): + temp_dir: Path = integration_plugin_path.parent / "Temp" + temp_dir.mkdir(exist_ok=True) + uplugin_path: Path = integration_plugin_path / "Ayon.uplugin" + + # in order to successfully build the plugin, + # It must be built outside the Engine directory and then moved + build_plugin_cmd: List[str] = [f'{uat_path.as_posix()}', + 'BuildPlugin', + f'-Plugin={uplugin_path.as_posix()}', + f'-Package={temp_dir.as_posix()}'] + subprocess.run(build_plugin_cmd) + + # Copy the contents of the 'Temp' dir into the + # 'Ayon' directory in the engine + dir_util.copy_tree(temp_dir.as_posix(), plugin_build_path.as_posix()) + + # We need to also copy the config folder. + # The UAT doesn't include the Config folder in the build + plugin_install_config_path: Path = plugin_build_path / "Config" + integration_plugin_config_path = integration_plugin_path / "Config" + + dir_util.copy_tree(integration_plugin_config_path.as_posix(), + plugin_install_config_path.as_posix()) + + dir_util.remove_tree(temp_dir.as_posix()) diff --git a/openpype/hosts/unreal/plugins/create/create_camera.py b/openpype/hosts/unreal/plugins/create/create_camera.py index bf1489d688..73afb6cefd 100644 --- a/openpype/hosts/unreal/plugins/create/create_camera.py +++ b/openpype/hosts/unreal/plugins/create/create_camera.py @@ -1,41 +1,38 @@ +# -*- coding: utf-8 -*- import unreal -from unreal import EditorAssetLibrary as eal -from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api.pipeline import instantiate -from openpype.pipeline import LegacyCreator +from openpype.pipeline import CreatorError +from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION +from openpype.hosts.unreal.api.plugin import ( + UnrealAssetCreator, +) -class CreateCamera(LegacyCreator): - """Layout output for character rigs""" +class CreateCamera(UnrealAssetCreator): + """Create Camera.""" - name = "layoutMain" + identifier = "io.ayon.creators.unreal.camera" label = "Camera" family = "camera" - icon = "cubes" + icon = "fa.camera" - root = "/Game/OpenPype/Instances" - suffix = "_INS" + def create(self, subset_name, instance_data, pre_create_data): + if pre_create_data.get("use_selection"): + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [a.get_path_name() for a in sel_objects] - def __init__(self, *args, **kwargs): - super(CreateCamera, self).__init__(*args, **kwargs) + if len(selection) != 1: + raise CreatorError("Please select only one object.") - def process(self): - data = self.data + # Add the current level path to the metadata + if UNREAL_VERSION.major == 5: + world = unreal.UnrealEditorSubsystem().get_editor_world() + else: + world = unreal.EditorLevelLibrary.get_editor_world() - name = data["subset"] + instance_data["level"] = world.get_path_name() - data["level"] = ell.get_editor_world().get_path_name() - - if not eal.does_directory_exist(self.root): - eal.make_directory(self.root) - - factory = unreal.LevelSequenceFactoryNew() - tools = unreal.AssetToolsHelpers().get_asset_tools() - tools.create_asset(name, f"{self.root}/{name}", None, factory) - - asset_name = f"{self.root}/{name}/{name}.{name}" - - data["members"] = [asset_name] - - instantiate(f"{self.root}", name, data, None, self.suffix) + super(CreateCamera, self).create( + subset_name, + instance_data, + pre_create_data) diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py index c1067b00d9..e5c7b8ee19 100644 --- a/openpype/hosts/unreal/plugins/create/create_layout.py +++ b/openpype/hosts/unreal/plugins/create/create_layout.py @@ -1,42 +1,13 @@ # -*- coding: utf-8 -*- -from unreal import EditorLevelLibrary - -from openpype.pipeline import LegacyCreator -from openpype.hosts.unreal.api.pipeline import instantiate +from openpype.hosts.unreal.api.plugin import ( + UnrealActorCreator, +) -class CreateLayout(LegacyCreator): +class CreateLayout(UnrealActorCreator): """Layout output for character rigs.""" - name = "layoutMain" + identifier = "io.ayon.creators.unreal.layout" label = "Layout" family = "layout" icon = "cubes" - - root = "/Game" - suffix = "_INS" - - def __init__(self, *args, **kwargs): - super(CreateLayout, self).__init__(*args, **kwargs) - - def process(self): - data = self.data - - name = data["subset"] - - selection = [] - # if (self.options or {}).get("useSelection"): - # sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() - # selection = [a.get_path_name() for a in sel_objects] - - data["level"] = EditorLevelLibrary.get_editor_world().get_path_name() - - data["members"] = [] - - if (self.options or {}).get("useSelection"): - # Set as members the selected actors - for actor in EditorLevelLibrary.get_selected_level_actors(): - data["members"].append("{}.{}".format( - actor.get_outer().get_name(), actor.get_name())) - - instantiate(self.root, name, data, selection, self.suffix) diff --git a/openpype/hosts/unreal/plugins/create/create_look.py b/openpype/hosts/unreal/plugins/create/create_look.py index 4abf3f6095..e15b57b2ee 100644 --- a/openpype/hosts/unreal/plugins/create/create_look.py +++ b/openpype/hosts/unreal/plugins/create/create_look.py @@ -1,56 +1,57 @@ # -*- coding: utf-8 -*- -"""Create look in Unreal.""" -import unreal # noqa -from openpype.hosts.unreal.api import pipeline, plugin -from openpype.pipeline import LegacyCreator +import unreal + +from openpype.pipeline import CreatorError +from openpype.hosts.unreal.api.pipeline import ( + create_folder +) +from openpype.hosts.unreal.api.plugin import ( + UnrealAssetCreator +) +from openpype.lib import UILabelDef -class CreateLook(LegacyCreator): +class CreateLook(UnrealAssetCreator): """Shader connections defining shape look.""" - name = "unrealLook" - label = "Unreal - Look" + identifier = "io.ayon.creators.unreal.look" + label = "Look" family = "look" icon = "paint-brush" - root = "/Game/Avalon/Assets" - suffix = "_INS" + def create(self, subset_name, instance_data, pre_create_data): + # We need to set this to True for the parent class to work + pre_create_data["use_selection"] = True + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [a.get_path_name() for a in sel_objects] - def __init__(self, *args, **kwargs): - super(CreateLook, self).__init__(*args, **kwargs) + if len(selection) != 1: + raise CreatorError("Please select only one asset.") - def process(self): - name = self.data["subset"] + selected_asset = selection[0] - selection = [] - if (self.options or {}).get("useSelection"): - sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() - selection = [a.get_path_name() for a in sel_objects] + look_directory = "/Game/Ayon/Looks" # Create the folder - path = f"{self.root}/{self.data['asset']}" - new_name = pipeline.create_folder(path, name) - full_path = f"{path}/{new_name}" + folder_name = create_folder(look_directory, subset_name) + path = f"{look_directory}/{folder_name}" + + instance_data["look"] = path # Create a new cube static mesh ar = unreal.AssetRegistryHelpers.get_asset_registry() cube = ar.get_asset_by_object_path("/Engine/BasicShapes/Cube.Cube") - # Create the avalon publish instance object - container_name = f"{name}{self.suffix}" - pipeline.create_publish_instance( - instance=container_name, path=full_path) - # Get the mesh of the selected object - original_mesh = ar.get_asset_by_object_path(selection[0]).get_asset() - materials = original_mesh.get_editor_property('materials') + original_mesh = ar.get_asset_by_object_path(selected_asset).get_asset() + materials = original_mesh.get_editor_property('static_materials') - self.data["members"] = [] + pre_create_data["members"] = [] # Add the materials to the cube for material in materials: - name = material.get_editor_property('material_slot_name') - object_path = f"{full_path}/{name}.{name}" + mat_name = material.get_editor_property('material_slot_name') + object_path = f"{path}/{mat_name}.{mat_name}" unreal_object = unreal.EditorAssetLibrary.duplicate_loaded_asset( cube.get_asset(), object_path ) @@ -61,8 +62,16 @@ class CreateLook(LegacyCreator): unreal_object.add_material( material.get_editor_property('material_interface')) - self.data["members"].append(object_path) + pre_create_data["members"].append(object_path) unreal.EditorAssetLibrary.save_asset(object_path) - pipeline.imprint(f"{full_path}/{container_name}", self.data) + super(CreateLook, self).create( + subset_name, + instance_data, + pre_create_data) + + def get_pre_create_attr_defs(self): + return [ + UILabelDef("Select the asset from which to create the look.") + ] diff --git a/openpype/hosts/unreal/plugins/create/create_render.py b/openpype/hosts/unreal/plugins/create/create_render.py index a85d17421b..5f561e68ad 100644 --- a/openpype/hosts/unreal/plugins/create/create_render.py +++ b/openpype/hosts/unreal/plugins/create/create_render.py @@ -1,117 +1,276 @@ +# -*- coding: utf-8 -*- +from pathlib import Path + import unreal -from openpype.hosts.unreal.api import pipeline -from openpype.pipeline import LegacyCreator +from openpype.hosts.unreal.api.pipeline import ( + UNREAL_VERSION, + create_folder, + get_subsequences, +) +from openpype.hosts.unreal.api.plugin import ( + UnrealAssetCreator +) +from openpype.lib import ( + UILabelDef, + UISeparatorDef, + BoolDef, + NumberDef +) -class CreateRender(LegacyCreator): +class CreateRender(UnrealAssetCreator): """Create instance for sequence for rendering""" - name = "unrealRender" - label = "Unreal - Render" + identifier = "io.ayon.creators.unreal.render" + label = "Render" family = "render" - icon = "cube" - asset_types = ["LevelSequence"] + icon = "eye" - root = "/Game/OpenPype/PublishInstances" - suffix = "_INS" + def create_instance( + self, instance_data, subset_name, pre_create_data, + selected_asset_path, master_seq, master_lvl, seq_data + ): + instance_data["members"] = [selected_asset_path] + instance_data["sequence"] = selected_asset_path + instance_data["master_sequence"] = master_seq + instance_data["master_level"] = master_lvl + instance_data["output"] = seq_data.get('output') + instance_data["frameStart"] = seq_data.get('frame_range')[0] + instance_data["frameEnd"] = seq_data.get('frame_range')[1] - def process(self): - subset = self.data["subset"] + super(CreateRender, self).create( + subset_name, + instance_data, + pre_create_data) - ar = unreal.AssetRegistryHelpers.get_asset_registry() + def create_with_new_sequence( + self, subset_name, instance_data, pre_create_data + ): + # If the option to create a new level sequence is selected, + # create a new level sequence and a master level. - # The asset name is the the third element of the path which contains - # the map. - # The index of the split path is 3 because the first element is an - # empty string, as the path begins with "/Content". - a = unreal.EditorUtilityLibrary.get_selected_assets()[0] - asset_name = a.get_path_name().split("/")[3] + root = f"/Game/Ayon/Sequences" - # Get the master sequence and the master level. - # There should be only one sequence and one level in the directory. - filter = unreal.ARFilter( - class_names=["LevelSequence"], - package_paths=[f"/Game/OpenPype/{asset_name}"], - recursive_paths=False) - sequences = ar.get_assets(filter) - ms = sequences[0].get_editor_property('object_path') - filter = unreal.ARFilter( - class_names=["World"], - package_paths=[f"/Game/OpenPype/{asset_name}"], - recursive_paths=False) - levels = ar.get_assets(filter) - ml = levels[0].get_editor_property('object_path') + # Create a new folder for the sequence in root + sequence_dir_name = create_folder(root, subset_name) + sequence_dir = f"{root}/{sequence_dir_name}" - selection = [] - if (self.options or {}).get("useSelection"): - sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() - selection = [ - a.get_path_name() for a in sel_objects - if a.get_class().get_name() in self.asset_types] + unreal.log_warning(f"sequence_dir: {sequence_dir}") + + # Create the level sequence + asset_tools = unreal.AssetToolsHelpers.get_asset_tools() + seq = asset_tools.create_asset( + asset_name=subset_name, + package_path=sequence_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew()) + + seq.set_playback_start(pre_create_data.get("start_frame")) + seq.set_playback_end(pre_create_data.get("end_frame")) + + pre_create_data["members"] = [seq.get_path_name()] + + unreal.EditorAssetLibrary.save_asset(seq.get_path_name()) + + # Create the master level + if UNREAL_VERSION.major >= 5: + curr_level = unreal.LevelEditorSubsystem().get_current_level() else: - selection.append(self.data['sequence']) + world = unreal.EditorLevelLibrary.get_editor_world() + levels = unreal.EditorLevelUtils.get_levels(world) + curr_level = levels[0] if len(levels) else None + if not curr_level: + raise RuntimeError("No level loaded.") + curr_level_path = curr_level.get_outer().get_path_name() - unreal.log(f"selection: {selection}") + # If the level path does not start with "/Game/", the current + # level is a temporary, unsaved level. + if curr_level_path.startswith("/Game/"): + if UNREAL_VERSION.major >= 5: + unreal.LevelEditorSubsystem().save_current_level() + else: + unreal.EditorLevelLibrary.save_current_level() - path = f"{self.root}" - unreal.EditorAssetLibrary.make_directory(path) + ml_path = f"{sequence_dir}/{subset_name}_MasterLevel" + if UNREAL_VERSION.major >= 5: + unreal.LevelEditorSubsystem().new_level(ml_path) + else: + unreal.EditorLevelLibrary.new_level(ml_path) + + seq_data = { + "sequence": seq, + "output": f"{seq.get_name()}", + "frame_range": ( + seq.get_playback_start(), + seq.get_playback_end())} + + self.create_instance( + instance_data, subset_name, pre_create_data, + seq.get_path_name(), seq.get_path_name(), ml_path, seq_data) + + def create_from_existing_sequence( + self, subset_name, instance_data, pre_create_data + ): ar = unreal.AssetRegistryHelpers.get_asset_registry() - for a in selection: - ms_obj = ar.get_asset_by_object_path(ms).get_asset() + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [ + a.get_path_name() for a in sel_objects + if a.get_class().get_name() == "LevelSequence"] - seq_data = None + if len(selection) == 0: + raise RuntimeError("Please select at least one Level Sequence.") - if a == ms: - seq_data = { - "sequence": ms_obj, - "output": f"{ms_obj.get_name()}", - "frame_range": ( - ms_obj.get_playback_start(), ms_obj.get_playback_end()) - } + seq_data = None + + for sel in selection: + selected_asset = ar.get_asset_by_object_path(sel).get_asset() + selected_asset_path = selected_asset.get_path_name() + + # Check if the selected asset is a level sequence asset. + if selected_asset.get_class().get_name() != "LevelSequence": + unreal.log_warning( + f"Skipping {selected_asset.get_name()}. It isn't a Level " + "Sequence.") + + if pre_create_data.get("use_hierarchy"): + # The asset name is the the third element of the path which + # contains the map. + # To take the asset name, we remove from the path the prefix + # "/Game/OpenPype/" and then we split the path by "/". + sel_path = selected_asset_path + asset_name = sel_path.replace( + "/Game/Ayon/", "").split("/")[0] + + search_path = f"/Game/Ayon/{asset_name}" else: - seq_data_list = [{ - "sequence": ms_obj, - "output": f"{ms_obj.get_name()}", - "frame_range": ( - ms_obj.get_playback_start(), ms_obj.get_playback_end()) - }] + search_path = Path(selected_asset_path).parent.as_posix() - for s in seq_data_list: - subscenes = pipeline.get_subsequences(s.get('sequence')) + # Get the master sequence and the master level. + # There should be only one sequence and one level in the directory. + try: + ar_filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[search_path], + recursive_paths=False) + sequences = ar.get_assets(ar_filter) + master_seq = sequences[0].get_asset().get_path_name() + master_seq_obj = sequences[0].get_asset() + ar_filter = unreal.ARFilter( + class_names=["World"], + package_paths=[search_path], + recursive_paths=False) + levels = ar.get_assets(ar_filter) + master_lvl = levels[0].get_asset().get_path_name() + except IndexError: + raise RuntimeError( + f"Could not find the hierarchy for the selected sequence.") - for ss in subscenes: + # If the selected asset is the master sequence, we get its data + # and then we create the instance for the master sequence. + # Otherwise, we cycle from the master sequence to find the selected + # sequence and we get its data. This data will be used to create + # the instance for the selected sequence. In particular, + # we get the frame range of the selected sequence and its final + # output path. + master_seq_data = { + "sequence": master_seq_obj, + "output": f"{master_seq_obj.get_name()}", + "frame_range": ( + master_seq_obj.get_playback_start(), + master_seq_obj.get_playback_end())} + + if (selected_asset_path == master_seq or + pre_create_data.get("use_hierarchy")): + seq_data = master_seq_data + else: + seq_data_list = [master_seq_data] + + for seq in seq_data_list: + subscenes = get_subsequences(seq.get('sequence')) + + for sub_seq in subscenes: + sub_seq_obj = sub_seq.get_sequence() curr_data = { - "sequence": ss.get_sequence(), - "output": (f"{s.get('output')}/" - f"{ss.get_sequence().get_name()}"), + "sequence": sub_seq_obj, + "output": (f"{seq.get('output')}/" + f"{sub_seq_obj.get_name()}"), "frame_range": ( - ss.get_start_frame(), ss.get_end_frame() - 1) - } + sub_seq.get_start_frame(), + sub_seq.get_end_frame() - 1)} - if ss.get_sequence().get_path_name() == a: + # If the selected asset is the current sub-sequence, + # we get its data and we break the loop. + # Otherwise, we add the current sub-sequence data to + # the list of sequences to check. + if sub_seq_obj.get_path_name() == selected_asset_path: seq_data = curr_data break + seq_data_list.append(curr_data) + # If we found the selected asset, we break the loop. if seq_data is not None: break + # If we didn't find the selected asset, we don't create the + # instance. if not seq_data: + unreal.log_warning( + f"Skipping {selected_asset.get_name()}. It isn't a " + "sub-sequence of the master sequence.") continue - d = self.data.copy() - d["members"] = [a] - d["sequence"] = a - d["master_sequence"] = ms - d["master_level"] = ml - d["output"] = seq_data.get('output') - d["frameStart"] = seq_data.get('frame_range')[0] - d["frameEnd"] = seq_data.get('frame_range')[1] + self.create_instance( + instance_data, subset_name, pre_create_data, + selected_asset_path, master_seq, master_lvl, seq_data) - container_name = f"{subset}{self.suffix}" - pipeline.create_publish_instance( - instance=container_name, path=path) - pipeline.imprint(f"{path}/{container_name}", d) + def create(self, subset_name, instance_data, pre_create_data): + if pre_create_data.get("create_seq"): + self.create_with_new_sequence( + subset_name, instance_data, pre_create_data) + else: + self.create_from_existing_sequence( + subset_name, instance_data, pre_create_data) + + def get_pre_create_attr_defs(self): + return [ + UILabelDef( + "Select a Level Sequence to render or create a new one." + ), + BoolDef( + "create_seq", + label="Create a new Level Sequence", + default=False + ), + UILabelDef( + "WARNING: If you create a new Level Sequence, the current\n" + "level will be saved and a new Master Level will be created." + ), + NumberDef( + "start_frame", + label="Start Frame", + default=0, + minimum=-999999, + maximum=999999 + ), + NumberDef( + "end_frame", + label="Start Frame", + default=150, + minimum=-999999, + maximum=999999 + ), + UISeparatorDef(), + UILabelDef( + "The following settings are valid only if you are not\n" + "creating a new sequence." + ), + BoolDef( + "use_hierarchy", + label="Use Hierarchy", + default=False + ), + ] diff --git a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py index 45d517d27d..80816d8386 100644 --- a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py @@ -1,35 +1,13 @@ # -*- coding: utf-8 -*- -"""Create Static Meshes as FBX geometry.""" -import unreal # noqa -from openpype.hosts.unreal.api.pipeline import ( - instantiate, +from openpype.hosts.unreal.api.plugin import ( + UnrealAssetCreator, ) -from openpype.pipeline import LegacyCreator -class CreateStaticMeshFBX(LegacyCreator): - """Static FBX geometry.""" +class CreateStaticMeshFBX(UnrealAssetCreator): + """Create Static Meshes as FBX geometry.""" - name = "unrealStaticMeshMain" - label = "Unreal - Static Mesh" + identifier = "io.ayon.creators.unreal.staticmeshfbx" + label = "Static Mesh (FBX)" family = "unrealStaticMesh" icon = "cube" - asset_types = ["StaticMesh"] - - root = "/Game" - suffix = "_INS" - - def __init__(self, *args, **kwargs): - super(CreateStaticMeshFBX, self).__init__(*args, **kwargs) - - def process(self): - - name = self.data["subset"] - - selection = [] - if (self.options or {}).get("useSelection"): - sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() - selection = [a.get_path_name() for a in sel_objects] - - unreal.log("selection: {}".format(selection)) - instantiate(self.root, name, self.data, selection, self.suffix) diff --git a/openpype/hosts/unreal/plugins/create/create_uasset.py b/openpype/hosts/unreal/plugins/create/create_uasset.py index ee584ac00c..f70ecc55b3 100644 --- a/openpype/hosts/unreal/plugins/create/create_uasset.py +++ b/openpype/hosts/unreal/plugins/create/create_uasset.py @@ -1,41 +1,33 @@ -"""Create UAsset.""" +# -*- coding: utf-8 -*- from pathlib import Path import unreal -from openpype.hosts.unreal.api import pipeline -from openpype.pipeline import LegacyCreator +from openpype.pipeline import CreatorError +from openpype.hosts.unreal.api.plugin import ( + UnrealAssetCreator, +) -class CreateUAsset(LegacyCreator): - """UAsset.""" +class CreateUAsset(UnrealAssetCreator): + """Create UAsset.""" - name = "UAsset" + identifier = "io.ayon.creators.unreal.uasset" label = "UAsset" family = "uasset" icon = "cube" - root = "/Game/OpenPype" - suffix = "_INS" + extension = ".uasset" - def __init__(self, *args, **kwargs): - super(CreateUAsset, self).__init__(*args, **kwargs) + def create(self, subset_name, instance_data, pre_create_data): + if pre_create_data.get("use_selection"): + ar = unreal.AssetRegistryHelpers.get_asset_registry() - def process(self): - ar = unreal.AssetRegistryHelpers.get_asset_registry() - - subset = self.data["subset"] - path = f"{self.root}/PublishInstances/" - - unreal.EditorAssetLibrary.make_directory(path) - - selection = [] - if (self.options or {}).get("useSelection"): sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() selection = [a.get_path_name() for a in sel_objects] if len(selection) != 1: - raise RuntimeError("Please select only one object.") + raise CreatorError("Please select only one object.") obj = selection[0] @@ -43,19 +35,32 @@ class CreateUAsset(LegacyCreator): sys_path = unreal.SystemLibrary.get_system_path(asset) if not sys_path: - raise RuntimeError( + raise CreatorError( f"{Path(obj).name} is not on the disk. Likely it needs to" "be saved first.") - if Path(sys_path).suffix != ".uasset": - raise RuntimeError(f"{Path(sys_path).name} is not a UAsset.") + if Path(sys_path).suffix != self.extension: + raise CreatorError( + f"{Path(sys_path).name} is not a {self.label}.") - unreal.log("selection: {}".format(selection)) - container_name = f"{subset}{self.suffix}" - pipeline.create_publish_instance( - instance=container_name, path=path) + super(CreateUAsset, self).create( + subset_name, + instance_data, + pre_create_data) - data = self.data.copy() - data["members"] = selection - pipeline.imprint(f"{path}/{container_name}", data) +class CreateUMap(CreateUAsset): + """Create Level.""" + + identifier = "io.ayon.creators.unreal.umap" + label = "Level" + family = "uasset" + extension = ".umap" + + def create(self, subset_name, instance_data, pre_create_data): + instance_data["families"] = ["umap"] + + super(CreateUMap, self).create( + subset_name, + instance_data, + pre_create_data) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_animation.py b/openpype/hosts/unreal/plugins/load/load_alembic_animation.py index 496b6056ea..52eea4122a 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_animation.py @@ -4,7 +4,7 @@ import os from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -68,8 +68,8 @@ class AnimationAlembicLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and openpype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and ayon container + root = "/Game/Ayon/Assets" asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -97,8 +97,8 @@ class AnimationAlembicLoader(plugin.Loader): container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -109,7 +109,7 @@ class AnimationAlembicLoader(plugin.Loader): "family": context["representation"]["context"]["family"] } unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + f"{asset_dir}/{container_name}", data) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index 1fe0bef462..a5ecb677e8 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -11,7 +11,7 @@ from unreal import MovieSceneSkeletalAnimationSection from openpype.pipeline.context_tools import get_current_project_asset from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -139,9 +139,9 @@ class AnimationFBXLoader(plugin.Loader): Returns: list(str): list of container content """ - # Create directory for asset and avalon container + # Create directory for asset and Ayon container hierarchy = context.get('asset').get('data').get('parents') - root = "/Game/OpenPype" + root = "/Game/Ayon" asset = context.get('asset').get('name') suffix = "_CON" asset_name = f"{asset}_{name}" if asset else f"{name}" @@ -156,7 +156,7 @@ class AnimationFBXLoader(plugin.Loader): package_paths=[f"{root}/{hierarchy[0]}"], recursive_paths=False) levels = ar.get_assets(_filter) - master_level = levels[0].get_editor_property('object_path') + master_level = levels[0].get_asset().get_path_name() hierarchy_dir = root for h in hierarchy: @@ -168,7 +168,7 @@ class AnimationFBXLoader(plugin.Loader): package_paths=[f"{hierarchy_dir}/"], recursive_paths=True) levels = ar.get_assets(_filter) - level = levels[0].get_editor_property('object_path') + level = levels[0].get_asset().get_path_name() unreal.EditorLevelLibrary.save_all_dirty_levels() unreal.EditorLevelLibrary.load_level(level) @@ -223,8 +223,8 @@ class AnimationFBXLoader(plugin.Loader): container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py index ca6b0ce736..59ea14697d 100644 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ b/openpype/hosts/unreal/plugins/load/load_camera.py @@ -3,16 +3,24 @@ from pathlib import Path import unreal -from unreal import EditorAssetLibrary -from unreal import EditorLevelLibrary -from unreal import EditorLevelUtils -from openpype.client import get_assets, get_asset_by_name +from unreal import ( + EditorAssetLibrary, + EditorLevelLibrary, + EditorLevelUtils, + LevelSequenceEditorBlueprintLibrary as LevelSequenceLib, +) +from openpype.client import get_asset_by_name from openpype.pipeline import ( - AVALON_CONTAINER_ID, + AYON_CONTAINER_ID, legacy_io, ) from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api import pipeline as unreal_pipeline +from openpype.hosts.unreal.api.pipeline import ( + generate_sequence, + set_sequence_hierarchy, + create_container, + imprint, +) class CameraLoader(plugin.Loader): @@ -24,32 +32,6 @@ class CameraLoader(plugin.Loader): icon = "cube" color = "orange" - def _set_sequence_hierarchy( - self, seq_i, seq_j, min_frame_j, max_frame_j - ): - tracks = seq_i.get_master_tracks() - track = None - for t in tracks: - if t.get_class() == unreal.MovieSceneSubTrack.static_class(): - track = t - break - if not track: - track = seq_i.add_master_track(unreal.MovieSceneSubTrack) - - subscenes = track.get_sections() - subscene = None - for s in subscenes: - if s.get_editor_property('sub_sequence') == seq_j: - subscene = s - break - if not subscene: - subscene = track.add_section() - subscene.set_row_index(len(track.get_sections())) - subscene.set_editor_property('sub_sequence', seq_j) - subscene.set_range( - min_frame_j, - max_frame_j + 1) - def _import_camera( self, world, sequence, bindings, import_fbx_settings, import_filename ): @@ -100,9 +82,9 @@ class CameraLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and avalon container + # Create directory for asset and Ayon container hierarchy = context.get('asset').get('data').get('parents') - root = "/Game/OpenPype" + root = "/Game/Ayon" hierarchy_dir = root hierarchy_dir_list = [] for h in hierarchy: @@ -110,10 +92,7 @@ class CameraLoader(plugin.Loader): hierarchy_dir_list.append(hierarchy_dir) asset = context.get('asset').get('name') suffix = "_CON" - if asset: - asset_name = "{}_{}".format(asset, name) - else: - asset_name = "{}".format(name) + asset_name = f"{asset}_{name}" if asset else f"{name}" tools = unreal.AssetToolsHelpers().get_asset_tools() @@ -127,23 +106,15 @@ class CameraLoader(plugin.Loader): # Get highest number to make a unique name folders = [a for a in asset_content if a[-1] == "/" and f"{name}_" in a] - f_numbers = [] - for f in folders: - # Get number from folder name. Splits the string by "_" and - # removes the last element (which is a "/"). - f_numbers.append(int(f.split("_")[-1][:-1])) + # Get number from folder name. Splits the string by "_" and + # removes the last element (which is a "/"). + f_numbers = [int(f.split("_")[-1][:-1]) for f in folders] f_numbers.sort() - if not f_numbers: - unique_number = 1 - else: - unique_number = f_numbers[-1] + 1 + unique_number = f_numbers[-1] + 1 if f_numbers else 1 asset_dir, container_name = tools.create_unique_asset_name( f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="") - asset_path = Path(asset_dir) - asset_path_parent = str(asset_path.parent.as_posix()) - container_name += suffix EditorAssetLibrary.make_directory(asset_dir) @@ -156,9 +127,9 @@ class CameraLoader(plugin.Loader): if not EditorAssetLibrary.does_asset_exist(master_level): EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") - level = f"{asset_path_parent}/{asset}_map.{asset}_map" + level = f"{asset_dir}/{asset}_map_camera.{asset}_map_camera" if not EditorAssetLibrary.does_asset_exist(level): - EditorLevelLibrary.new_level(f"{asset_path_parent}/{asset}_map") + EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map_camera") EditorLevelLibrary.load_level(master_level) EditorLevelUtils.add_level_to_world( @@ -169,27 +140,13 @@ class CameraLoader(plugin.Loader): EditorLevelLibrary.save_all_dirty_levels() EditorLevelLibrary.load_level(level) - project_name = legacy_io.active_project() - # TODO refactor - # - Creationg of hierarchy should be a function in unreal integration - # - it's used in multiple loaders but must not be loader's logic - # - hard to say what is purpose of the loop - # - variables does not match their meaning - # - why scene is stored to sequences? - # - asset documents vs. elements - # - cleanup variable names in whole function - # - e.g. 'asset', 'asset_name', 'asset_data', 'asset_doc' - # - really inefficient queries of asset documents - # - existing asset in scene is considered as "with correct values" - # - variable 'elements' is modified during it's loop # Get all the sequences in the hierarchy. It will create them, if # they don't exist. - sequences = [] frame_ranges = [] - i = 0 - for h in hierarchy_dir_list: + sequences = [] + for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): root_content = EditorAssetLibrary.list_assets( - h, recursive=False, include_folder=False) + h_dir, recursive=False, include_folder=False) existing_sequences = [ EditorAssetLibrary.find_asset_data(asset) @@ -198,57 +155,17 @@ class CameraLoader(plugin.Loader): asset).get_class().get_name() == 'LevelSequence' ] - if not existing_sequences: - scene = tools.create_asset( - asset_name=hierarchy[i], - package_path=h, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() - ) - - asset_data = get_asset_by_name( - project_name, - h.split('/')[-1], - fields=["_id", "data.fps"] - ) - - start_frames = [] - end_frames = [] - - elements = list(get_assets( - project_name, - parent_ids=[asset_data["_id"]], - fields=["_id", "data.clipIn", "data.clipOut"] - )) - - for e in elements: - start_frames.append(e.get('data').get('clipIn')) - end_frames.append(e.get('data').get('clipOut')) - - elements.extend(get_assets( - project_name, - parent_ids=[e["_id"]], - fields=["_id", "data.clipIn", "data.clipOut"] - )) - - min_frame = min(start_frames) - max_frame = max(end_frames) - - scene.set_display_rate( - unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) - scene.set_playback_start(min_frame) - scene.set_playback_end(max_frame) - - sequences.append(scene) - frame_ranges.append((min_frame, max_frame)) - else: - for e in existing_sequences: - sequences.append(e.get_asset()) + if existing_sequences: + for seq in existing_sequences: + sequences.append(seq.get_asset()) frame_ranges.append(( - e.get_asset().get_playback_start(), - e.get_asset().get_playback_end())) + seq.get_asset().get_playback_start(), + seq.get_asset().get_playback_end())) + else: + sequence, frame_range = generate_sequence(h, h_dir) - i += 1 + sequences.append(sequence) + frame_ranges.append(frame_range) EditorAssetLibrary.make_directory(asset_dir) @@ -260,19 +177,24 @@ class CameraLoader(plugin.Loader): ) # Add sequences data to hierarchy - for i in range(0, len(sequences) - 1): - self._set_sequence_hierarchy( + for i in range(len(sequences) - 1): + set_sequence_hierarchy( sequences[i], sequences[i + 1], - frame_ranges[i + 1][0], frame_ranges[i + 1][1]) + frame_ranges[i][1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1], + [level]) + project_name = legacy_io.active_project() data = get_asset_by_name(project_name, asset)["data"] cam_seq.set_display_rate( unreal.FrameRate(data.get("fps"), 1.0)) - cam_seq.set_playback_start(0) - cam_seq.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) - self._set_sequence_hierarchy( + cam_seq.set_playback_start(data.get('clipIn')) + cam_seq.set_playback_end(data.get('clipOut') + 1) + set_sequence_hierarchy( sequences[-1], cam_seq, - data.get('clipIn'), data.get('clipOut')) + frame_ranges[-1][1], + data.get('clipIn'), data.get('clipOut'), + [level]) settings = unreal.MovieSceneUserImportFBXSettings() settings.set_editor_property('reduce_keys', False) @@ -286,13 +208,33 @@ class CameraLoader(plugin.Loader): self.fname ) + # Set range of all sections + # Changing the range of the section is not enough. We need to change + # the frame of all the keys in the section. + for possessable in cam_seq.get_possessables(): + for tracks in possessable.get_tracks(): + for section in tracks.get_sections(): + section.set_range( + data.get('clipIn'), + data.get('clipOut') + 1) + for channel in section.get_all_channels(): + for key in channel.get_keys(): + old_time = key.get_time().get_editor_property( + 'frame_number') + old_time_value = old_time.get_editor_property( + 'value') + new_time = old_time_value + ( + data.get('clipIn') - data.get('frameStart') + ) + key.set_time(unreal.FrameNumber(value=new_time)) + # Create Asset Container - unreal_pipeline.create_container( + create_container( container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -302,14 +244,14 @@ class CameraLoader(plugin.Loader): "parent": context["representation"]["parent"], "family": context["representation"]["context"]["family"] } - unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + imprint(f"{asset_dir}/{container_name}", data) EditorLevelLibrary.save_all_dirty_levels() EditorLevelLibrary.load_level(master_level) + # Save all assets in the hierarchy asset_content = EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=True + hierarchy_dir_list[0], recursive=True, include_folder=False ) for a in asset_content: @@ -320,32 +262,30 @@ class CameraLoader(plugin.Loader): def update(self, container, representation): ar = unreal.AssetRegistryHelpers.get_asset_registry() - root = "/Game/OpenPype" + curr_level_sequence = LevelSequenceLib.get_current_level_sequence() + curr_time = LevelSequenceLib.get_current_time() + is_cam_lock = LevelSequenceLib.is_camera_cut_locked_to_viewport() + + editor_subsystem = unreal.UnrealEditorSubsystem() + vp_loc, vp_rot = editor_subsystem.get_level_viewport_camera_info() asset_dir = container.get('namespace') - context = representation.get("context") - - hierarchy = context.get('hierarchy').split("/") - h_dir = f"{root}/{hierarchy[0]}" - h_asset = hierarchy[0] - master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" - EditorLevelLibrary.save_current_level() - filter = unreal.ARFilter( + _filter = unreal.ARFilter( class_names=["LevelSequence"], package_paths=[asset_dir], recursive_paths=False) - sequences = ar.get_assets(filter) - filter = unreal.ARFilter( + sequences = ar.get_assets(_filter) + _filter = unreal.ARFilter( class_names=["World"], - package_paths=[str(Path(asset_dir).parent.as_posix())], + package_paths=[asset_dir], recursive_paths=True) - maps = ar.get_assets(filter) + maps = ar.get_assets(_filter) # There should be only one map in the list - EditorLevelLibrary.load_level(maps[0].get_full_name()) + EditorLevelLibrary.load_level(maps[0].get_asset().get_path_name()) level_sequence = sequences[0].get_asset() @@ -378,15 +318,21 @@ class CameraLoader(plugin.Loader): # Remove the Level Sequence from the parent. # We need to traverse the hierarchy from the master sequence to find # the level sequence. - root = "/Game/OpenPype" + root = "/Game/Ayon" namespace = container.get('namespace').replace(f"{root}/", "") ms_asset = namespace.split('/')[0] - filter = unreal.ARFilter( + _filter = unreal.ARFilter( class_names=["LevelSequence"], package_paths=[f"{root}/{ms_asset}"], recursive_paths=False) - sequences = ar.get_assets(filter) + sequences = ar.get_assets(_filter) master_sequence = sequences[0].get_asset() + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_asset().get_path_name() sequences = [master_sequence] @@ -398,26 +344,20 @@ class CameraLoader(plugin.Loader): for t in tracks: if t.get_class() == unreal.MovieSceneSubTrack.static_class(): subscene_track = t - break if subscene_track: sections = subscene_track.get_sections() for ss in sections: if ss.get_sequence().get_name() == sequence_name: parent = s sub_scene = ss - # subscene_track.remove_section(ss) break sequences.append(ss.get_sequence()) - # Update subscenes indexes. - i = 0 - for ss in sections: + for i, ss in enumerate(sections): ss.set_row_index(i) - i += 1 - if parent: break - assert parent, "Could not find the parent sequence" + assert parent, "Could not find the parent sequence" EditorAssetLibrary.delete_asset(level_sequence.get_path_name()) @@ -446,33 +386,63 @@ class CameraLoader(plugin.Loader): str(representation["data"]["path"]) ) + # Set range of all sections + # Changing the range of the section is not enough. We need to change + # the frame of all the keys in the section. + project_name = legacy_io.active_project() + asset = container.get('asset') + data = get_asset_by_name(project_name, asset)["data"] + + for possessable in new_sequence.get_possessables(): + for tracks in possessable.get_tracks(): + for section in tracks.get_sections(): + section.set_range( + data.get('clipIn'), + data.get('clipOut') + 1) + for channel in section.get_all_channels(): + for key in channel.get_keys(): + old_time = key.get_time().get_editor_property( + 'frame_number') + old_time_value = old_time.get_editor_property( + 'value') + new_time = old_time_value + ( + data.get('clipIn') - data.get('frameStart') + ) + key.set_time(unreal.FrameNumber(value=new_time)) + data = { "representation": str(representation["_id"]), "parent": str(representation["parent"]) } - unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container.get('container_name')), data) + imprint(f"{asset_dir}/{container.get('container_name')}", data) EditorLevelLibrary.save_current_level() asset_content = EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=False) + f"{root}/{ms_asset}", recursive=True, include_folder=False) for a in asset_content: EditorAssetLibrary.save_asset(a) EditorLevelLibrary.load_level(master_level) + if curr_level_sequence: + LevelSequenceLib.open_level_sequence(curr_level_sequence) + LevelSequenceLib.set_current_time(curr_time) + LevelSequenceLib.set_lock_camera_cut_to_viewport(is_cam_lock) + + editor_subsystem.set_level_viewport_camera_info(vp_loc, vp_rot) + def remove(self, container): - path = Path(container.get("namespace")) - parent_path = str(path.parent.as_posix()) + asset_dir = container.get('namespace') + path = Path(asset_dir) ar = unreal.AssetRegistryHelpers.get_asset_registry() - filter = unreal.ARFilter( + _filter = unreal.ARFilter( class_names=["LevelSequence"], - package_paths=[f"{str(path.as_posix())}"], + package_paths=[asset_dir], recursive_paths=False) - sequences = ar.get_assets(filter) + sequences = ar.get_assets(_filter) if not sequences: raise Exception("Could not find sequence.") @@ -480,11 +450,11 @@ class CameraLoader(plugin.Loader): world = ar.get_asset_by_object_path( EditorLevelLibrary.get_editor_world().get_path_name()) - filter = unreal.ARFilter( + _filter = unreal.ARFilter( class_names=["World"], - package_paths=[f"{parent_path}"], + package_paths=[asset_dir], recursive_paths=True) - maps = ar.get_assets(filter) + maps = ar.get_assets(_filter) # There should be only one map in the list if not maps: @@ -493,7 +463,7 @@ class CameraLoader(plugin.Loader): map = maps[0] EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(map.get_full_name()) + EditorLevelLibrary.load_level(map.get_asset().get_path_name()) # Remove the camera from the level. actors = EditorLevelLibrary.get_all_level_actors() @@ -503,7 +473,7 @@ class CameraLoader(plugin.Loader): EditorLevelLibrary.destroy_actor(a) EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(world.get_full_name()) + EditorLevelLibrary.load_level(world.get_asset().get_path_name()) # There should be only one sequence in the path. sequence_name = sequences[0].asset_name @@ -511,15 +481,21 @@ class CameraLoader(plugin.Loader): # Remove the Level Sequence from the parent. # We need to traverse the hierarchy from the master sequence to find # the level sequence. - root = "/Game/OpenPype" + root = "/Game/Ayon" namespace = container.get('namespace').replace(f"{root}/", "") ms_asset = namespace.split('/')[0] - filter = unreal.ARFilter( + _filter = unreal.ARFilter( class_names=["LevelSequence"], package_paths=[f"{root}/{ms_asset}"], recursive_paths=False) - sequences = ar.get_assets(filter) + sequences = ar.get_assets(_filter) master_sequence = sequences[0].get_asset() + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_full_name() sequences = [master_sequence] @@ -527,10 +503,13 @@ class CameraLoader(plugin.Loader): for s in sequences: tracks = s.get_master_tracks() subscene_track = None + visibility_track = None for t in tracks: if t.get_class() == unreal.MovieSceneSubTrack.static_class(): subscene_track = t - break + if (t.get_class() == + unreal.MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t if subscene_track: sections = subscene_track.get_sections() for ss in sections: @@ -540,23 +519,48 @@ class CameraLoader(plugin.Loader): break sequences.append(ss.get_sequence()) # Update subscenes indexes. - i = 0 - for ss in sections: + for i, ss in enumerate(sections): ss.set_row_index(i) - i += 1 + if visibility_track: + sections = visibility_track.get_sections() + for ss in sections: + if (unreal.Name(f"{container.get('asset')}_map_camera") + in ss.get_level_names()): + visibility_track.remove_section(ss) + # Update visibility sections indexes. + i = -1 + prev_name = [] + for ss in sections: + if prev_name != ss.get_level_names(): + i += 1 + ss.set_row_index(i) + prev_name = ss.get_level_names() if parent: break assert parent, "Could not find the parent sequence" - EditorAssetLibrary.delete_directory(str(path.as_posix())) + # Create a temporary level to delete the layout level. + EditorLevelLibrary.save_all_dirty_levels() + EditorAssetLibrary.make_directory(f"{root}/tmp") + tmp_level = f"{root}/tmp/temp_map" + if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): + EditorLevelLibrary.new_level(tmp_level) + else: + EditorLevelLibrary.load_level(tmp_level) + + # Delete the layout directory. + EditorAssetLibrary.delete_directory(asset_dir) + + EditorLevelLibrary.load_level(master_level) + EditorAssetLibrary.delete_directory(f"{root}/tmp") # Check if there isn't any more assets in the parent folder, and # delete it if not. asset_content = EditorAssetLibrary.list_assets( - parent_path, recursive=False, include_folder=True + path.parent.as_posix(), recursive=False, include_folder=True ) if len(asset_content) == 0: - EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(path.parent.as_posix()) diff --git a/openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py b/openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py index 6ac3531b40..3a292fdbd1 100644 --- a/openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py +++ b/openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py @@ -4,7 +4,7 @@ import os from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -22,7 +22,8 @@ class PointCacheAlembicLoader(plugin.Loader): color = "orange" def get_task( - self, filename, asset_dir, asset_name, replace, frame_start, frame_end + self, filename, asset_dir, asset_name, replace, + frame_start=None, frame_end=None ): task = unreal.AssetImportTask() options = unreal.AbcImportSettings() @@ -51,8 +52,10 @@ class PointCacheAlembicLoader(plugin.Loader): conversion_settings.set_editor_property( 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0)) - sampling_settings.set_editor_property('frame_start', frame_start) - sampling_settings.set_editor_property('frame_end', frame_end) + if frame_start is not None: + sampling_settings.set_editor_property('frame_start', frame_start) + if frame_end is not None: + sampling_settings.set_editor_property('frame_end', frame_end) options.geometry_cache_settings = gc_settings options.conversion_settings = conversion_settings @@ -83,8 +86,8 @@ class PointCacheAlembicLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and Ayon container + root = "/Game/Ayon/Assets" asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -118,8 +121,8 @@ class PointCacheAlembicLoader(plugin.Loader): container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -145,9 +148,9 @@ class PointCacheAlembicLoader(plugin.Loader): name = container["asset_name"] source_path = get_representation_path(representation) destination_path = container["namespace"] + representation["context"] - task = self.get_task(source_path, destination_path, name, True) - + task = self.get_task(source_path, destination_path, name, False) # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index c1d66ddf2a..86b2e1456c 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -1,32 +1,40 @@ # -*- coding: utf-8 -*- """Loader for layouts.""" import json +import collections from pathlib import Path import unreal -from unreal import EditorAssetLibrary -from unreal import EditorLevelLibrary -from unreal import EditorLevelUtils -from unreal import AssetToolsHelpers -from unreal import FBXImportType -from unreal import MovieSceneLevelVisibilityTrack -from unreal import MovieSceneSubTrack +from unreal import ( + EditorAssetLibrary, + EditorLevelLibrary, + EditorLevelUtils, + AssetToolsHelpers, + FBXImportType, + MovieSceneLevelVisibilityTrack, + MovieSceneSubTrack, + LevelSequenceEditorBlueprintLibrary as LevelSequenceLib, +) -from bson.objectid import ObjectId - -from openpype.client import get_asset_by_name, get_assets +from openpype.client import get_asset_by_name, get_representations from openpype.pipeline import ( discover_loader_plugins, loaders_from_representation, load_container, get_representation_path, - AVALON_CONTAINER_ID, + AYON_CONTAINER_ID, legacy_io, ) from openpype.pipeline.context_tools import get_current_project_asset from openpype.settings import get_current_project_settings from openpype.hosts.unreal.api import plugin -from openpype.hosts.unreal.api import pipeline as unreal_pipeline +from openpype.hosts.unreal.api.pipeline import ( + generate_sequence, + set_sequence_hierarchy, + create_container, + imprint, + ls, +) class LayoutLoader(plugin.Loader): @@ -38,7 +46,7 @@ class LayoutLoader(plugin.Loader): label = "Load Layout" icon = "code-fork" color = "orange" - ASSET_ROOT = "/Game/OpenPype" + ASSET_ROOT = "/Game/Ayon" def _get_asset_containers(self, path): ar = unreal.AssetRegistryHelpers.get_asset_registry() @@ -51,7 +59,7 @@ class LayoutLoader(plugin.Loader): # Get all the asset containers for a in asset_content: obj = ar.get_asset_by_object_path(a) - if obj.get_asset().get_class().get_name() == 'AssetContainer': + if obj.get_asset().get_class().get_name() == 'AyonAssetContainer': asset_containers.append(obj) return asset_containers @@ -92,77 +100,6 @@ class LayoutLoader(plugin.Loader): return None - @staticmethod - def _set_sequence_hierarchy( - seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths - ): - # Get existing sequencer tracks or create them if they don't exist - tracks = seq_i.get_master_tracks() - subscene_track = None - visibility_track = None - for t in tracks: - if t.get_class() == unreal.MovieSceneSubTrack.static_class(): - subscene_track = t - if (t.get_class() == - unreal.MovieSceneLevelVisibilityTrack.static_class()): - visibility_track = t - if not subscene_track: - subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack) - if not visibility_track: - visibility_track = seq_i.add_master_track( - unreal.MovieSceneLevelVisibilityTrack) - - # Create the sub-scene section - subscenes = subscene_track.get_sections() - subscene = None - for s in subscenes: - if s.get_editor_property('sub_sequence') == seq_j: - subscene = s - break - if not subscene: - subscene = subscene_track.add_section() - subscene.set_row_index(len(subscene_track.get_sections())) - subscene.set_editor_property('sub_sequence', seq_j) - subscene.set_range( - min_frame_j, - max_frame_j + 1) - - # Create the visibility section - ar = unreal.AssetRegistryHelpers.get_asset_registry() - maps = [] - for m in map_paths: - # Unreal requires to load the level to get the map name - EditorLevelLibrary.save_all_dirty_levels() - EditorLevelLibrary.load_level(m) - maps.append(str(ar.get_asset_by_object_path(m).asset_name)) - - vis_section = visibility_track.add_section() - index = len(visibility_track.get_sections()) - - vis_section.set_range( - min_frame_j, - max_frame_j + 1) - vis_section.set_visibility(unreal.LevelVisibility.VISIBLE) - vis_section.set_row_index(index) - vis_section.set_level_names(maps) - - if min_frame_j > 1: - hid_section = visibility_track.add_section() - hid_section.set_range( - 1, - min_frame_j) - hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) - hid_section.set_row_index(index) - hid_section.set_level_names(maps) - if max_frame_j < max_frame_i: - hid_section = visibility_track.add_section() - hid_section.set_range( - max_frame_j + 1, - max_frame_i + 1) - hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) - hid_section.set_row_index(index) - hid_section.set_level_names(maps) - def _transform_from_basis(self, transform, basis): """Transform a transform from a basis to a new basis.""" # Get the basis matrix @@ -339,7 +276,7 @@ class LayoutLoader(plugin.Loader): ).replace('\\', '/') _filter = unreal.ARFilter( - class_names=["AssetContainer"], + class_names=["AyonAssetContainer"], package_paths=[anim_path], recursive_paths=False) containers = ar.get_assets(_filter) @@ -353,62 +290,29 @@ class LayoutLoader(plugin.Loader): sec_params = section.get_editor_property('params') sec_params.set_editor_property('animation', animation) - @staticmethod - def _generate_sequence(h, h_dir): - tools = unreal.AssetToolsHelpers().get_asset_tools() + def _get_repre_docs_by_version_id(self, data): + version_ids = { + element.get("version") + for element in data + if element.get("representation") + } + version_ids.discard(None) - sequence = tools.create_asset( - asset_name=h, - package_path=h_dir, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() - ) + output = collections.defaultdict(list) + if not version_ids: + return output project_name = legacy_io.active_project() - asset_data = get_asset_by_name( + repre_docs = get_representations( project_name, - h_dir.split('/')[-1], - fields=["_id", "data.fps"] + representation_names=["fbx", "abc"], + version_ids=version_ids, + fields=["_id", "parent", "name"] ) - - start_frames = [] - end_frames = [] - - elements = list(get_assets( - project_name, - parent_ids=[asset_data["_id"]], - fields=["_id", "data.clipIn", "data.clipOut"] - )) - for e in elements: - start_frames.append(e.get('data').get('clipIn')) - end_frames.append(e.get('data').get('clipOut')) - - elements.extend(get_assets( - project_name, - parent_ids=[e["_id"]], - fields=["_id", "data.clipIn", "data.clipOut"] - )) - - min_frame = min(start_frames) - max_frame = max(end_frames) - - sequence.set_display_rate( - unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) - sequence.set_playback_start(min_frame) - sequence.set_playback_end(max_frame) - - tracks = sequence.get_master_tracks() - track = None - for t in tracks: - if (t.get_class() == - unreal.MovieSceneCameraCutTrack.static_class()): - track = t - break - if not track: - track = sequence.add_master_track( - unreal.MovieSceneCameraCutTrack) - - return sequence, (min_frame, max_frame) + for repre_doc in repre_docs: + version_id = str(repre_doc["parent"]) + output[version_id].append(repre_doc) + return output def _process(self, lib_path, asset_dir, sequence, repr_loaded=None): ar = unreal.AssetRegistryHelpers.get_asset_registry() @@ -429,31 +333,21 @@ class LayoutLoader(plugin.Loader): loaded_assets = [] + repre_docs_by_version_id = self._get_repre_docs_by_version_id(data) for element in data: representation = None repr_format = None if element.get('representation'): - # representation = element.get('representation') - - self.log.info(element.get("version")) - - valid_formats = ['fbx', 'abc'] - - repr_data = legacy_io.find_one({ - "type": "representation", - "parent": ObjectId(element.get("version")), - "name": {"$in": valid_formats} - }) - repr_format = repr_data.get('name') - - if not repr_data: + repre_docs = repre_docs_by_version_id[element.get("version")] + if not repre_docs: self.log.error( f"No valid representation found for version " f"{element.get('version')}") continue + repre_doc = repre_docs[0] + representation = str(repre_doc["_id"]) + repr_format = repre_doc["name"] - representation = str(repr_data.get('_id')) - print(representation) # This is to keep compatibility with old versions of the # json format. elif element.get('reference_fbx'): @@ -506,7 +400,7 @@ class LayoutLoader(plugin.Loader): for asset in assets: obj = ar.get_asset_by_object_path(asset).get_asset() - if obj.get_class().get_name() == 'AssetContainer': + if obj.get_class().get_name() == 'AyonAssetContainer': container = obj if obj.get_class().get_name() == 'Skeleton': skeleton = obj @@ -621,7 +515,7 @@ class LayoutLoader(plugin.Loader): data = get_current_project_settings() create_sequences = data["unreal"]["level_sequences_for_layouts"] - # Create directory for asset and avalon container + # Create directory for asset and Ayon container hierarchy = context.get('asset').get('data').get('parents') root = self.ASSET_ROOT hierarchy_dir = root @@ -683,7 +577,7 @@ class LayoutLoader(plugin.Loader): ] if not existing_sequences: - sequence, frame_range = self._generate_sequence(h, h_dir) + sequence, frame_range = generate_sequence(h, h_dir) sequences.append(sequence) frame_ranges.append(frame_range) @@ -703,7 +597,7 @@ class LayoutLoader(plugin.Loader): # sequences and frame_ranges have the same length for i in range(0, len(sequences) - 1): - self._set_sequence_hierarchy( + set_sequence_hierarchy( sequences[i], sequences[i + 1], frame_ranges[i][1], frame_ranges[i + 1][0], frame_ranges[i + 1][1], @@ -716,7 +610,7 @@ class LayoutLoader(plugin.Loader): shot.set_playback_start(0) shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) if sequences: - self._set_sequence_hierarchy( + set_sequence_hierarchy( sequences[-1], shot, frame_ranges[-1][1], data.get('clipIn'), data.get('clipOut'), @@ -727,17 +621,17 @@ class LayoutLoader(plugin.Loader): loaded_assets = self._process(self.fname, asset_dir, shot) for s in sequences: - EditorAssetLibrary.save_asset(s.get_full_name()) + EditorAssetLibrary.save_asset(s.get_path_name()) EditorLevelLibrary.save_current_level() # Create Asset Container - unreal_pipeline.create_container( + create_container( container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -748,11 +642,13 @@ class LayoutLoader(plugin.Loader): "family": context["representation"]["context"]["family"], "loaded_assets": loaded_assets } - unreal_pipeline.imprint( + imprint( "{}/{}".format(asset_dir, container_name), data) + save_dir = hierarchy_dir_list[0] if create_sequences else asset_dir + asset_content = EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=False) + save_dir, recursive=True, include_folder=False) for a in asset_content: EditorAssetLibrary.save_asset(a) @@ -768,16 +664,24 @@ class LayoutLoader(plugin.Loader): ar = unreal.AssetRegistryHelpers.get_asset_registry() - root = "/Game/OpenPype" + curr_level_sequence = LevelSequenceLib.get_current_level_sequence() + curr_time = LevelSequenceLib.get_current_time() + is_cam_lock = LevelSequenceLib.is_camera_cut_locked_to_viewport() + + editor_subsystem = unreal.UnrealEditorSubsystem() + vp_loc, vp_rot = editor_subsystem.get_level_viewport_camera_info() + + root = "/Game/Ayon" asset_dir = container.get('namespace') context = representation.get("context") + hierarchy = context.get('hierarchy').split("/") + sequence = None master_level = None if create_sequences: - hierarchy = context.get('hierarchy').split("/") h_dir = f"{root}/{hierarchy[0]}" h_asset = hierarchy[0] master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" @@ -806,7 +710,7 @@ class LayoutLoader(plugin.Loader): recursive_paths=False) levels = ar.get_assets(filter) - layout_level = levels[0].get_editor_property('object_path') + layout_level = levels[0].get_asset().get_path_name() EditorLevelLibrary.save_all_dirty_levels() EditorLevelLibrary.load_level(layout_level) @@ -830,13 +734,15 @@ class LayoutLoader(plugin.Loader): "parent": str(representation["parent"]), "loaded_assets": loaded_assets } - unreal_pipeline.imprint( + imprint( "{}/{}".format(asset_dir, container.get('container_name')), data) EditorLevelLibrary.save_current_level() + save_dir = f"{root}/{hierarchy[0]}" if create_sequences else asset_dir + asset_content = EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=False) + save_dir, recursive=True, include_folder=False) for a in asset_content: EditorAssetLibrary.save_asset(a) @@ -846,6 +752,13 @@ class LayoutLoader(plugin.Loader): elif prev_level: EditorLevelLibrary.load_level(prev_level) + if curr_level_sequence: + LevelSequenceLib.open_level_sequence(curr_level_sequence) + LevelSequenceLib.set_current_time(curr_time) + LevelSequenceLib.set_lock_camera_cut_to_viewport(is_cam_lock) + + editor_subsystem.set_level_viewport_camera_info(vp_loc, vp_rot) + def remove(self, container): """ Delete the layout. First, check if the assets loaded with the layout @@ -854,10 +767,10 @@ class LayoutLoader(plugin.Loader): data = get_current_project_settings() create_sequences = data["unreal"]["level_sequences_for_layouts"] - root = "/Game/OpenPype" + root = "/Game/Ayon" path = Path(container.get("namespace")) - containers = unreal_pipeline.ls() + containers = ls() layout_containers = [ c for c in containers if (c.get('asset_name') != container.get('asset_name') and @@ -906,7 +819,7 @@ class LayoutLoader(plugin.Loader): package_paths=[f"{root}/{ms_asset}"], recursive_paths=False) levels = ar.get_assets(_filter) - master_level = levels[0].get_editor_property('object_path') + master_level = levels[0].get_asset().get_path_name() sequences = [master_sequence] diff --git a/openpype/hosts/unreal/plugins/load/load_layout_existing.py b/openpype/hosts/unreal/plugins/load/load_layout_existing.py index 3ce99f8ef6..929a9a1399 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout_existing.py +++ b/openpype/hosts/unreal/plugins/load/load_layout_existing.py @@ -4,18 +4,15 @@ from pathlib import Path import unreal from unreal import EditorLevelLibrary -from bson.objectid import ObjectId - -from openpype import pipeline +from openpype.client import get_representations from openpype.pipeline import ( discover_loader_plugins, loaders_from_representation, load_container, get_representation_path, - AVALON_CONTAINER_ID, + AYON_CONTAINER_ID, legacy_io, ) -from openpype.api import get_current_project_settings from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as upipeline @@ -31,7 +28,18 @@ class ExistingLayoutLoader(plugin.Loader): label = "Load Layout on Existing Scene" icon = "code-fork" color = "orange" - ASSET_ROOT = "/Game/OpenPype" + ASSET_ROOT = "/Game/Ayon" + + delete_unmatched_assets = True + + @classmethod + def apply_settings(cls, project_settings, *args, **kwargs): + super(ExistingLayoutLoader, cls).apply_settings( + project_settings, *args, **kwargs + ) + cls.delete_unmatched_assets = ( + project_settings["unreal"]["delete_unmatched_assets"] + ) @staticmethod def _create_container( @@ -51,8 +59,8 @@ class ExistingLayoutLoader(plugin.Loader): container = obj.get_asset() data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -81,50 +89,26 @@ class ExistingLayoutLoader(plugin.Loader): raise NotImplementedError( f"Unreal version {ue_major} not supported") - def _get_transform(self, ext, import_data, lasset): - conversion = unreal.Matrix.IDENTITY.transform() - fbx_tuning = unreal.Matrix.IDENTITY.transform() + def _transform_from_basis(self, transform, basis): + """Transform a transform from a basis to a new basis.""" + # Get the basis matrix + basis_matrix = unreal.Matrix( + basis[0], + basis[1], + basis[2], + basis[3] + ) + transform_matrix = unreal.Matrix( + transform[0], + transform[1], + transform[2], + transform[3] + ) - basis = unreal.Matrix( - lasset.get('basis')[0], - lasset.get('basis')[1], - lasset.get('basis')[2], - lasset.get('basis')[3] - ).transform() - transform = unreal.Matrix( - lasset.get('transform_matrix')[0], - lasset.get('transform_matrix')[1], - lasset.get('transform_matrix')[2], - lasset.get('transform_matrix')[3] - ).transform() + new_transform = ( + basis_matrix.get_inverse() * transform_matrix * basis_matrix) - # Check for the conversion settings. We cannot access - # the alembic conversion settings, so we assume that - # the maya ones have been applied. - if ext == '.fbx': - loc = import_data.import_translation - rot = import_data.import_rotation.to_vector() - scale = import_data.import_uniform_scale - conversion = unreal.Transform( - location=[loc.x, loc.y, loc.z], - rotation=[rot.x, rot.y, rot.z], - scale=[-scale, scale, scale] - ) - fbx_tuning = unreal.Transform( - rotation=[180.0, 0.0, 90.0], - scale=[1.0, 1.0, 1.0] - ) - elif ext == '.abc': - # This is the standard conversion settings for - # alembic files from Maya. - conversion = unreal.Transform( - location=[0.0, 0.0, 0.0], - rotation=[0.0, 0.0, 0.0], - scale=[1.0, -1.0, 1.0] - ) - - new_transform = (basis.inverse() * transform * basis) - return fbx_tuning * conversion.inverse() * new_transform + return new_transform.transform() def _spawn_actor(self, obj, lasset): actor = EditorLevelLibrary.spawn_actor_from_object( @@ -132,16 +116,13 @@ class ExistingLayoutLoader(plugin.Loader): ) actor.set_actor_label(lasset.get('instance_name')) - smc = actor.get_editor_property('static_mesh_component') - mesh = smc.get_editor_property('static_mesh') - import_data = mesh.get_editor_property('asset_import_data') - filename = import_data.get_first_filename() - path = Path(filename) - transform = self._get_transform( - path.suffix, import_data, lasset) + transform = lasset.get('transform_matrix') + basis = lasset.get('basis') - actor.set_actor_transform(transform, False, True) + computed_transform = self._transform_from_basis(transform, basis) + + actor.set_actor_transform(computed_transform, False, True) @staticmethod def _get_fbx_loader(loaders, family): @@ -179,14 +160,7 @@ class ExistingLayoutLoader(plugin.Loader): return None - def _load_asset(self, representation, version, instance_name, family): - valid_formats = ['fbx', 'abc'] - - repr_data = legacy_io.find_one({ - "type": "representation", - "parent": ObjectId(version), - "name": {"$in": valid_formats} - }) + def _load_asset(self, repr_data, representation, instance_name, family): repr_format = repr_data.get('name') all_loaders = discover_loader_plugins() @@ -221,10 +195,21 @@ class ExistingLayoutLoader(plugin.Loader): return assets - def _process(self, lib_path): - data = get_current_project_settings() - delete_unmatched = data["unreal"]["delete_unmatched_assets"] + def _get_valid_repre_docs(self, project_name, version_ids): + valid_formats = ['fbx', 'abc'] + repre_docs = list(get_representations( + project_name, + representation_names=valid_formats, + version_ids=version_ids + )) + repre_doc_by_version_id = {} + for repre_doc in repre_docs: + version_id = str(repre_doc["parent"]) + repre_doc_by_version_id[version_id] = repre_doc + return repre_doc_by_version_id + + def _process(self, lib_path, project_name): ar = unreal.AssetRegistryHelpers.get_asset_registry() actors = EditorLevelLibrary.get_all_level_actors() @@ -232,30 +217,44 @@ class ExistingLayoutLoader(plugin.Loader): with open(lib_path, "r") as fp: data = json.load(fp) - layout_data = [] - + elements = [] + repre_ids = set() # Get all the representations in the JSON from the database. for element in data: - if element.get('representation'): - layout_data.append(( - pipeline.legacy_io.find_one({ - "_id": ObjectId(element.get('representation')) - }), - element - )) + repre_id = element.get('representation') + if repre_id: + repre_ids.add(repre_id) + elements.append(element) + repre_docs = get_representations( + project_name, representation_ids=repre_ids + ) + repre_docs_by_id = { + str(repre_doc["_id"]): repre_doc + for repre_doc in repre_docs + } + layout_data = [] + version_ids = set() + for element in elements: + repre_id = element.get("representation") + repre_doc = repre_docs_by_id.get(repre_id) + if not repre_doc: + raise AssertionError("Representation not found") + if not (repre_doc.get('data') or repre_doc['data'].get('path')): + raise AssertionError("Representation does not have path") + if not repre_doc.get('context'): + raise AssertionError("Representation does not have context") + + layout_data.append((repre_doc, element)) + version_ids.add(repre_doc["parent"]) + + # Prequery valid repre documents for all elements at once + valid_repre_doc_by_version_id = self._get_valid_repre_docs( + project_name, version_ids) containers = [] actors_matched = [] for (repr_data, lasset) in layout_data: - if not repr_data: - raise AssertionError("Representation not found") - if not (repr_data.get('data') or - repr_data.get('data').get('path')): - raise AssertionError("Representation does not have path") - if not repr_data.get('context'): - raise AssertionError("Representation does not have context") - # For every actor in the scene, check if it has a representation in # those we got from the JSON. If so, create a container for it. # Otherwise, remove it from the scene. @@ -294,9 +293,12 @@ class ExistingLayoutLoader(plugin.Loader): containers.append(container) # Set the transform for the actor. - transform = self._get_transform( - path.suffix, import_data, lasset) - actor.set_actor_transform(transform, False, True) + transform = lasset.get('transform_matrix') + basis = lasset.get('basis') + + computed_transform = self._transform_from_basis( + transform, basis) + actor.set_actor_transform(computed_transform, False, True) actors_matched.append(actor) found = True @@ -339,8 +341,8 @@ class ExistingLayoutLoader(plugin.Loader): continue assets = self._load_asset( + valid_repre_doc_by_version_id.get(lasset.get('version')), lasset.get('representation'), - lasset.get('version'), lasset.get('instance_name'), lasset.get('family') ) @@ -360,7 +362,7 @@ class ExistingLayoutLoader(plugin.Loader): continue if actor not in actors_matched: self.log.warning(f"Actor {actor.get_name()} not matched.") - if delete_unmatched: + if self.delete_unmatched_assets: EditorLevelLibrary.destroy_actor(actor) return containers @@ -377,7 +379,8 @@ class ExistingLayoutLoader(plugin.Loader): if not curr_level: raise AssertionError("Current level not saved") - containers = self._process(self.fname) + project_name = context["project"]["name"] + containers = self._process(self.fname, project_name) curr_level_path = Path( curr_level.get_outer().get_path_name()).parent.as_posix() @@ -389,8 +392,8 @@ class ExistingLayoutLoader(plugin.Loader): container=container_name, path=curr_level_path) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": curr_level_path, "container_name": container_name, @@ -407,7 +410,8 @@ class ExistingLayoutLoader(plugin.Loader): asset_dir = container.get('namespace') source_path = get_representation_path(representation) - containers = self._process(source_path) + project_name = legacy_io.active_project() + containers = self._process(source_path, project_name) data = { "representation": str(representation["_id"]), diff --git a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py index e316d255e9..7591d5582f 100644 --- a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py +++ b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py @@ -4,7 +4,7 @@ import os from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -70,8 +70,8 @@ class SkeletalMeshAlembicLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and openpype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and ayon container + root = "/Game/Ayon/Assets" asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -98,8 +98,8 @@ class SkeletalMeshAlembicLoader(plugin.Loader): container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -110,7 +110,7 @@ class SkeletalMeshAlembicLoader(plugin.Loader): "family": context["representation"]["context"]["family"] } unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + f"{asset_dir}/{container_name}", data) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True diff --git a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py index 227c5c9292..e9676cde3a 100644 --- a/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py +++ b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py @@ -4,7 +4,7 @@ import os from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -42,8 +42,8 @@ class SkeletalMeshFBXLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and Ayon container + root = "/Game/Ayon/Assets" if options and options.get("asset_dir"): root = options["asset_dir"] asset = context.get('asset').get('name') @@ -103,8 +103,8 @@ class SkeletalMeshFBXLoader(plugin.Loader): container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -115,7 +115,7 @@ class SkeletalMeshFBXLoader(plugin.Loader): "family": context["representation"]["context"]["family"] } unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + f"{asset_dir}/{container_name}", data) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True diff --git a/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py b/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py index c7841cef53..befc7b0ac9 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py @@ -4,7 +4,7 @@ import os from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -75,8 +75,8 @@ class StaticMeshAlembicLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and Ayon container + root = "/Game/Ayon/Assets" asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -108,8 +108,8 @@ class StaticMeshAlembicLoader(plugin.Loader): container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -119,8 +119,7 @@ class StaticMeshAlembicLoader(plugin.Loader): "parent": context["representation"]["parent"], "family": context["representation"]["context"]["family"] } - unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True @@ -136,7 +135,7 @@ class StaticMeshAlembicLoader(plugin.Loader): source_path = get_representation_path(representation) destination_path = container["namespace"] - task = self.get_task(source_path, destination_path, name, True) + task = self.get_task(source_path, destination_path, name, True, False) # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) diff --git a/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py b/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py index 351c686095..e416256486 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py @@ -4,7 +4,7 @@ import os from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -68,8 +68,8 @@ class StaticMeshFBXLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and Ayon container + root = "/Game/Ayon/Assets" if options and options.get("asset_dir"): root = options["asset_dir"] asset = context.get('asset').get('name') @@ -81,7 +81,8 @@ class StaticMeshFBXLoader(plugin.Loader): tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}", suffix="" + ) container_name += suffix @@ -96,8 +97,8 @@ class StaticMeshFBXLoader(plugin.Loader): container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -107,8 +108,7 @@ class StaticMeshFBXLoader(plugin.Loader): "parent": context["representation"]["parent"], "family": context["representation"]["context"]["family"] } - unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True diff --git a/openpype/hosts/unreal/plugins/load/load_uasset.py b/openpype/hosts/unreal/plugins/load/load_uasset.py index eccfc7b445..30f63abe39 100644 --- a/openpype/hosts/unreal/plugins/load/load_uasset.py +++ b/openpype/hosts/unreal/plugins/load/load_uasset.py @@ -5,7 +5,7 @@ import shutil from openpype.pipeline import ( get_representation_path, - AVALON_CONTAINER_ID + AYON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -21,6 +21,8 @@ class UAssetLoader(plugin.Loader): icon = "cube" color = "orange" + extension = "uasset" + def load(self, context, name, namespace, options): """Load and containerise representation into Content Browser. @@ -38,37 +40,41 @@ class UAssetLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and Ayon container + root = "/Game/Ayon/Assets" asset = context.get('asset').get('name') suffix = "_CON" - if asset: - asset_name = "{}_{}".format(asset, name) - else: - asset_name = "{}".format(name) - + asset_name = f"{asset}_{name}" if asset else f"{name}" tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}", suffix="" + ) - container_name += suffix + unique_number = 1 + while unreal.EditorAssetLibrary.does_directory_exist( + f"{asset_dir}_{unique_number:02}" + ): + unique_number += 1 + + asset_dir = f"{asset_dir}_{unique_number:02}" + container_name = f"{container_name}_{unique_number:02}{suffix}" unreal.EditorAssetLibrary.make_directory(asset_dir) destination_path = asset_dir.replace( - "/Game", - Path(unreal.Paths.project_content_dir()).as_posix(), - 1) + "/Game", Path(unreal.Paths.project_content_dir()).as_posix(), 1) - shutil.copy(self.fname, f"{destination_path}/{name}.uasset") + shutil.copy( + self.fname, + f"{destination_path}/{name}_{unique_number:02}.{self.extension}") # Create Asset Container unreal_pipeline.create_container( container=container_name, path=asset_dir) data = { - "schema": "openpype:container-2.0", - "id": AVALON_CONTAINER_ID, + "schema": "ayon:container-2.0", + "id": AYON_CONTAINER_ID, "asset": asset, "namespace": asset_dir, "container_name": container_name, @@ -76,10 +82,9 @@ class UAssetLoader(plugin.Loader): "loader": str(self.__class__.__name__), "representation": context["representation"]["_id"], "parent": context["representation"]["parent"], - "family": context["representation"]["context"]["family"] + "family": context["representation"]["context"]["family"], } - unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True @@ -96,10 +101,10 @@ class UAssetLoader(plugin.Loader): asset_dir = container["namespace"] name = representation["context"]["subset"] + unique_number = container["container_name"].split("_")[-2] + destination_path = asset_dir.replace( - "/Game", - Path(unreal.Paths.project_content_dir()).as_posix(), - 1) + "/Game", Path(unreal.Paths.project_content_dir()).as_posix(), 1) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=False, include_folder=True @@ -107,22 +112,24 @@ class UAssetLoader(plugin.Loader): for asset in asset_content: obj = ar.get_asset_by_object_path(asset).get_asset() - if not obj.get_class().get_name() == 'AssetContainer': + if obj.get_class().get_name() != "AyonAssetContainer": unreal.EditorAssetLibrary.delete_asset(asset) update_filepath = get_representation_path(representation) - shutil.copy(update_filepath, f"{destination_path}/{name}.uasset") + shutil.copy( + update_filepath, + f"{destination_path}/{name}_{unique_number}.{self.extension}") - container_path = "{}/{}".format(container["namespace"], - container["objectName"]) + container_path = f'{container["namespace"]}/{container["objectName"]}' # update metadata unreal_pipeline.imprint( container_path, { "representation": str(representation["_id"]), - "parent": str(representation["parent"]) - }) + "parent": str(representation["parent"]), + } + ) asset_content = unreal.EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True @@ -143,3 +150,13 @@ class UAssetLoader(plugin.Loader): if len(asset_content) == 0: unreal.EditorAssetLibrary.delete_directory(parent_path) + + +class UMapLoader(UAssetLoader): + """Load Level.""" + + families = ["uasset"] + label = "Load Level" + representations = ["umap"] + + extension = "umap" diff --git a/openpype/hosts/unreal/plugins/publish/collect_instance_members.py b/openpype/hosts/unreal/plugins/publish/collect_instance_members.py new file mode 100644 index 0000000000..de10e7b119 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/collect_instance_members.py @@ -0,0 +1,46 @@ +import unreal + +import pyblish.api + + +class CollectInstanceMembers(pyblish.api.InstancePlugin): + """ + Collect members of instance. + + This collector will collect the assets for the families that support to + have them included as External Data, and will add them to the instance + as members. + """ + + order = pyblish.api.CollectorOrder + 0.1 + hosts = ["unreal"] + families = ["camera", "look", "unrealStaticMesh", "uasset"] + label = "Collect Instance Members" + + def process(self, instance): + """Collect members of instance.""" + self.log.info("Collecting instance members") + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + inst_path = instance.data.get('instance_path') + inst_name = inst_path.split('/')[-1] + + pub_instance = ar.get_asset_by_object_path( + f"{inst_path}.{inst_name}").get_asset() + + if not pub_instance: + self.log.error(f"{inst_path}.{inst_name}") + raise RuntimeError(f"Instance {instance} not found.") + + if not pub_instance.get_editor_property("add_external_assets"): + # No external assets in the instance + return + + assets = pub_instance.get_editor_property('asset_data_external') + + members = [asset.get_path_name() for asset in assets] + + self.log.debug(f"Members: {members}") + + instance.data["members"] = members diff --git a/openpype/hosts/unreal/plugins/publish/collect_instances.py b/openpype/hosts/unreal/plugins/publish/collect_instances.py deleted file mode 100644 index 27b711cad6..0000000000 --- a/openpype/hosts/unreal/plugins/publish/collect_instances.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -"""Collect publishable instances in Unreal.""" -import ast -import unreal # noqa -import pyblish.api -from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION -from openpype.pipeline.publish import KnownPublishError - - -class CollectInstances(pyblish.api.ContextPlugin): - """Gather instances by OpenPypePublishInstance class - - This collector finds all paths containing `OpenPypePublishInstance` class - asset - - Identifier: - id (str): "pyblish.avalon.instance" - - """ - - label = "Collect Instances" - order = pyblish.api.CollectorOrder - 0.1 - hosts = ["unreal"] - - def process(self, context): - - ar = unreal.AssetRegistryHelpers.get_asset_registry() - class_name = [ - "/Script/OpenPype", - "OpenPypePublishInstance" - ] if ( - UNREAL_VERSION.major == 5 - and UNREAL_VERSION.minor > 0 - ) else "OpenPypePublishInstance" # noqa - instance_containers = ar.get_assets_by_class(class_name, True) - - for container_data in instance_containers: - asset = container_data.get_asset() - data = unreal.EditorAssetLibrary.get_metadata_tag_values(asset) - data["objectName"] = container_data.asset_name - # convert to strings - data = {str(key): str(value) for (key, value) in data.items()} - if not data.get("family"): - raise KnownPublishError("instance has no family") - - # content of container - members = ast.literal_eval(data.get("members")) - self.log.debug(members) - self.log.debug(asset.get_path_name()) - # remove instance container - self.log.info("Creating instance for {}".format(asset.get_name())) - - instance = context.create_instance(asset.get_name()) - instance[:] = members - - # Store the exact members of the object set - instance.data["setMembers"] = members - instance.data["families"] = [data.get("family")] - instance.data["level"] = data.get("level") - instance.data["parent"] = data.get("parent") - - label = "{0} ({1})".format(asset.get_name()[:-4], - data["asset"]) - - instance.data["label"] = label - - instance.data.update(data) diff --git a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py index cb28f4bf60..dad0310dfc 100644 --- a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py +++ b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py @@ -3,6 +3,7 @@ from pathlib import Path import unreal +from openpype.pipeline import get_current_project_name from openpype.pipeline import Anatomy from openpype.hosts.unreal.api import pipeline import pyblish.api @@ -72,8 +73,8 @@ class CollectRenderInstances(pyblish.api.InstancePlugin): new_data["level"] = data.get("level") new_data["output"] = s.get('output') new_data["fps"] = seq.get_display_rate().numerator - new_data["frameStart"] = s.get('frame_range')[0] - new_data["frameEnd"] = s.get('frame_range')[1] + new_data["frameStart"] = int(s.get('frame_range')[0]) + new_data["frameEnd"] = int(s.get('frame_range')[1]) new_data["sequence"] = seq.get_path_name() new_data["master_sequence"] = data["master_sequence"] new_data["master_level"] = data["master_level"] @@ -81,12 +82,13 @@ class CollectRenderInstances(pyblish.api.InstancePlugin): self.log.debug(f"new instance data: {new_data}") try: - project = os.environ.get("AVALON_PROJECT") + project = get_current_project_name() anatomy = Anatomy(project) root = anatomy.roots['renders'] - except Exception: - raise Exception( - "Could not find render root in anatomy settings.") + except Exception as e: + raise Exception(( + "Could not find render root " + "in anatomy settings.")) from e render_dir = f"{root}/{project}/{s.get('output')}" render_path = Path(render_dir) @@ -101,8 +103,8 @@ class CollectRenderInstances(pyblish.api.InstancePlugin): new_instance.data["representations"] = [] repr = { - 'frameStart': s.get('frame_range')[0], - 'frameEnd': s.get('frame_range')[1], + 'frameStart': instance.data["frameStart"], + 'frameEnd': instance.data["frameEnd"], 'name': 'png', 'ext': 'png', 'files': frames, diff --git a/openpype/hosts/unreal/plugins/publish/extract_camera.py b/openpype/hosts/unreal/plugins/publish/extract_camera.py index 4e37cc6a86..16e365ca96 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_camera.py +++ b/openpype/hosts/unreal/plugins/publish/extract_camera.py @@ -3,10 +3,9 @@ import os import unreal -from unreal import EditorAssetLibrary as eal -from unreal import EditorLevelLibrary as ell from openpype.pipeline import publish +from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION class ExtractCamera(publish.Extractor): @@ -18,6 +17,8 @@ class ExtractCamera(publish.Extractor): optional = True def process(self, instance): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + # Define extract output file path staging_dir = self.staging_dir(instance) fbx_filename = "{}.fbx".format(instance.name) @@ -26,23 +27,54 @@ class ExtractCamera(publish.Extractor): self.log.info("Performing extraction..") # Check if the loaded level is the same of the instance - current_level = ell.get_editor_world().get_path_name() + if UNREAL_VERSION.major == 5: + world = unreal.UnrealEditorSubsystem().get_editor_world() + else: + world = unreal.EditorLevelLibrary.get_editor_world() + current_level = world.get_path_name() assert current_level == instance.data.get("level"), \ "Wrong level loaded" - for member in instance[:]: - data = eal.find_asset_data(member) - if data.asset_class == "LevelSequence": - ar = unreal.AssetRegistryHelpers.get_asset_registry() - sequence = ar.get_asset_by_object_path(member).get_asset() - unreal.SequencerTools.export_fbx( - ell.get_editor_world(), - sequence, - sequence.get_bindings(), - unreal.FbxExportOption(), - os.path.join(staging_dir, fbx_filename) - ) - break + for member in instance.data.get('members'): + data = ar.get_asset_by_object_path(member) + if UNREAL_VERSION.major == 5: + is_level_sequence = ( + data.asset_class_path.asset_name == "LevelSequence") + else: + is_level_sequence = (data.asset_class == "LevelSequence") + + if is_level_sequence: + sequence = data.get_asset() + if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor >= 1: + params = unreal.SequencerExportFBXParams( + world=world, + root_sequence=sequence, + sequence=sequence, + bindings=sequence.get_bindings(), + master_tracks=sequence.get_master_tracks(), + fbx_file_name=os.path.join(staging_dir, fbx_filename) + ) + unreal.SequencerTools.export_level_sequence_fbx(params) + elif UNREAL_VERSION.major == 4 and UNREAL_VERSION.minor == 26: + unreal.SequencerTools.export_fbx( + world, + sequence, + sequence.get_bindings(), + unreal.FbxExportOption(), + os.path.join(staging_dir, fbx_filename) + ) + else: + # Unreal 5.0 or 4.27 + unreal.SequencerTools.export_level_sequence_fbx( + world, + sequence, + sequence.get_bindings(), + unreal.FbxExportOption(), + os.path.join(staging_dir, fbx_filename) + ) + + if not os.path.isfile(os.path.join(staging_dir, fbx_filename)): + raise RuntimeError("Failed to extract camera") if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py index cac7991f00..57e7957575 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py @@ -48,7 +48,7 @@ class ExtractLayout(publish.Extractor): # Search the reference to the Asset Container for the object path = unreal.Paths.get_path(mesh.get_path_name()) filter = unreal.ARFilter( - class_names=["AssetContainer"], package_paths=[path]) + class_names=["AyonAssetContainer"], package_paths=[path]) ar = unreal.AssetRegistryHelpers.get_asset_registry() try: asset_container = ar.get_assets(filter)[0].get_asset() diff --git a/openpype/hosts/unreal/plugins/publish/extract_look.py b/openpype/hosts/unreal/plugins/publish/extract_look.py index f999ad8651..4b32b4eb95 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_look.py +++ b/openpype/hosts/unreal/plugins/publish/extract_look.py @@ -29,13 +29,13 @@ class ExtractLook(publish.Extractor): for member in instance: asset = ar.get_asset_by_object_path(member) - object = asset.get_asset() + obj = asset.get_asset() name = asset.get_editor_property('asset_name') json_element = {'material': str(name)} - material_obj = object.get_editor_property('static_materials')[0] + material_obj = obj.get_editor_property('static_materials')[0] material = material_obj.material_interface base_color = mat_lib.get_material_property_input_node( diff --git a/openpype/hosts/unreal/plugins/publish/extract_render.py b/openpype/hosts/unreal/plugins/publish/extract_render.py deleted file mode 100644 index 8ff38fbee0..0000000000 --- a/openpype/hosts/unreal/plugins/publish/extract_render.py +++ /dev/null @@ -1,48 +0,0 @@ -from pathlib import Path - -import unreal - -from openpype.pipeline import publish - - -class ExtractRender(publish.Extractor): - """Extract render.""" - - label = "Extract Render" - hosts = ["unreal"] - families = ["render"] - optional = True - - def process(self, instance): - # Define extract output file path - stagingdir = self.staging_dir(instance) - - # Perform extraction - self.log.info("Performing extraction..") - - # Get the render output directory - project_dir = unreal.Paths.project_dir() - render_dir = (f"{project_dir}/Saved/MovieRenders/" - f"{instance.data['subset']}") - - assert unreal.Paths.directory_exists(render_dir), \ - "Render directory does not exist" - - render_path = Path(render_dir) - - frames = [] - - for x in render_path.iterdir(): - if x.is_file() and x.suffix == '.png': - frames.append(str(x)) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - render_representation = { - 'name': 'png', - 'ext': 'png', - 'files': frames, - "stagingDir": stagingdir, - } - instance.data["representations"].append(render_representation) diff --git a/openpype/hosts/unreal/plugins/publish/extract_uasset.py b/openpype/hosts/unreal/plugins/publish/extract_uasset.py index 89d779d368..48b62faa97 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_uasset.py +++ b/openpype/hosts/unreal/plugins/publish/extract_uasset.py @@ -11,18 +11,25 @@ class ExtractUAsset(publish.Extractor): label = "Extract UAsset" hosts = ["unreal"] - families = ["uasset"] + families = ["uasset", "umap"] optional = True def process(self, instance): + extension = ( + "umap" if "umap" in instance.data.get("families") else "uasset") ar = unreal.AssetRegistryHelpers.get_asset_registry() self.log.info("Performing extraction..") - staging_dir = self.staging_dir(instance) - filename = "{}.uasset".format(instance.name) + filename = f"{instance.name}.{extension}" - obj = instance[0] + members = instance.data.get("members", []) + + if not members: + raise RuntimeError("No members found in instance.") + + # UAsset publishing supports only one member + obj = members[0] asset = ar.get_asset_by_object_path(obj).get_asset() sys_path = unreal.SystemLibrary.get_system_path(asset) @@ -30,13 +37,15 @@ class ExtractUAsset(publish.Extractor): shutil.copy(sys_path, staging_dir) + self.log.info(f"instance.data: {instance.data}") + if "representations" not in instance.data: instance.data["representations"] = [] representation = { - 'name': 'uasset', - 'ext': 'uasset', - 'files': filename, + "name": extension, + "ext": extension, + "files": filename, "stagingDir": staging_dir, } instance.data["representations"].append(representation) diff --git a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py index 87f1338ee8..76bb25fac3 100644 --- a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py +++ b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py @@ -20,6 +20,7 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin): def process(self, instance): representations = instance.data.get("representations") for repr in representations: + data = instance.data.get("assetEntity", {}).get("data", {}) patterns = [clique.PATTERNS["frames"]] collections, remainder = clique.assemble( repr["files"], minimum_items=1, patterns=patterns) @@ -30,8 +31,8 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin): frames = list(collection.indexes) current_range = (frames[0], frames[-1]) - required_range = (instance.data["frameStart"], - instance.data["frameEnd"]) + required_range = (data["clipIn"], + data["clipOut"]) if current_range != required_range: raise ValueError(f"Invalid frame range: {current_range} - " diff --git a/openpype/hosts/unreal/ue_workers.py b/openpype/hosts/unreal/ue_workers.py new file mode 100644 index 0000000000..2b7e1375e6 --- /dev/null +++ b/openpype/hosts/unreal/ue_workers.py @@ -0,0 +1,392 @@ +import json +import os +import platform +import re +import subprocess +from distutils import dir_util +from pathlib import Path +from typing import List, Union +import tempfile +from distutils.dir_util import copy_tree + +import openpype.hosts.unreal.lib as ue_lib + +from qtpy import QtCore + + +def parse_comp_progress(line: str, progress_signal: QtCore.Signal(int)): + match = re.search(r"\[[1-9]+/[0-9]+]", line) + if match is not None: + split: list[str] = match.group().split("/") + curr: float = float(split[0][1:]) + total: float = float(split[1][:-1]) + progress_signal.emit(int((curr / total) * 100.0)) + + +def parse_prj_progress(line: str, progress_signal: QtCore.Signal(int)): + match = re.search("@progress", line) + if match is not None: + percent_match = re.search(r"\d{1,3}", line) + progress_signal.emit(int(percent_match.group())) + + +def retrieve_exit_code(line: str): + match = re.search(r"ExitCode=\d+", line) + if match is not None: + split: list[str] = match.group().split("=") + return int(split[1]) + + return None + + +class UEProjectGenerationWorker(QtCore.QObject): + finished = QtCore.Signal(str) + failed = QtCore.Signal(str) + progress = QtCore.Signal(int) + log = QtCore.Signal(str) + stage_begin = QtCore.Signal(str) + + ue_version: str = None + project_name: str = None + env = None + engine_path: Path = None + project_dir: Path = None + dev_mode = False + + def setup(self, ue_version: str, + project_name, + engine_path: Path, + project_dir: Path, + dev_mode: bool = False, + env: dict = None): + + self.ue_version = ue_version + self.project_dir = project_dir + self.env = env or os.environ + + preset = ue_lib.get_project_settings( + project_name + )["unreal"]["project_setup"] + + if dev_mode or preset["dev_mode"]: + self.dev_mode = True + + self.project_name = project_name + self.engine_path = engine_path + + def run(self): + # engine_path should be the location of UE_X.X folder + + ue_editor_exe = ue_lib.get_editor_exe_path(self.engine_path, + self.ue_version) + cmdlet_project = ue_lib.get_path_to_cmdlet_project(self.ue_version) + project_file = self.project_dir / f"{self.project_name}.uproject" + + print("--- Generating a new project ...") + # 1st stage + stage_count = 2 + if self.dev_mode: + stage_count = 4 + + self.stage_begin.emit( + ("Generating a new UE project ... 1 out of " + f"{stage_count}")) + + # Need to copy the commandlet project to a temporary folder where + # users don't need admin rights to write to. + cmdlet_tmp = tempfile.TemporaryDirectory() + cmdlet_filename = cmdlet_project.name + cmdlet_dir = cmdlet_project.parent.as_posix() + cmdlet_tmp_name = Path(cmdlet_tmp.name) + cmdlet_tmp_file = cmdlet_tmp_name.joinpath(cmdlet_filename) + copy_tree( + cmdlet_dir, + cmdlet_tmp_name.as_posix()) + + commandlet_cmd = [ + f"{ue_editor_exe.as_posix()}", + f"{cmdlet_tmp_file.as_posix()}", + "-run=AyonGenerateProject", + f"{project_file.resolve().as_posix()}", + ] + + if self.dev_mode: + commandlet_cmd.append("-GenerateCode") + + gen_process = subprocess.Popen(commandlet_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + for line in gen_process.stdout: + decoded_line = line.decode(errors="replace") + print(decoded_line, end="") + self.log.emit(decoded_line) + gen_process.stdout.close() + return_code = gen_process.wait() + + cmdlet_tmp.cleanup() + + if return_code and return_code != 0: + msg = ( + f"Failed to generate {self.project_name} " + f"project! Exited with return code {return_code}" + ) + self.failed.emit(msg, return_code) + raise RuntimeError(msg) + + print("--- Project has been generated successfully.") + self.stage_begin.emit( + (f"Writing the Engine ID of the build UE ... 1" + f" out of {stage_count}")) + + if not project_file.is_file(): + msg = ("Failed to write the Engine ID into .uproject file! Can " + "not read!") + self.failed.emit(msg) + raise RuntimeError(msg) + + with open(project_file.as_posix(), mode="r+") as pf: + pf_json = json.load(pf) + pf_json["EngineAssociation"] = ue_lib.get_build_id( + self.engine_path, + self.ue_version + ) + print(pf_json["EngineAssociation"]) + pf.seek(0) + json.dump(pf_json, pf, indent=4) + pf.truncate() + print("--- Engine ID has been written into the project file") + + self.progress.emit(90) + if self.dev_mode: + # 2nd stage + self.stage_begin.emit( + (f"Generating project files ... 2 out of " + f"{stage_count}")) + + self.progress.emit(0) + ubt_path = ue_lib.get_path_to_ubt(self.engine_path, + self.ue_version) + + arch = "Win64" + if platform.system().lower() == "windows": + arch = "Win64" + elif platform.system().lower() == "linux": + arch = "Linux" + elif platform.system().lower() == "darwin": + # we need to test this out + arch = "Mac" + + gen_prj_files_cmd = [ubt_path.as_posix(), + "-projectfiles", + f"-project={project_file}", + "-progress"] + gen_proc = subprocess.Popen(gen_prj_files_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + for line in gen_proc.stdout: + decoded_line: str = line.decode(errors="replace") + print(decoded_line, end="") + self.log.emit(decoded_line) + parse_prj_progress(decoded_line, self.progress) + + gen_proc.stdout.close() + return_code = gen_proc.wait() + + if return_code and return_code != 0: + msg = ("Failed to generate project files! " + f"Exited with return code {return_code}") + self.failed.emit(msg, return_code) + raise RuntimeError(msg) + + self.stage_begin.emit( + f"Building the project ... 3 out of {stage_count}") + self.progress.emit(0) + # 3rd stage + build_prj_cmd = [ubt_path.as_posix(), + f"-ModuleWithSuffix={self.project_name},3555", + arch, + "Development", + "-TargetType=Editor", + f"-Project={project_file}", + f"{project_file}", + "-IgnoreJunk"] + + build_prj_proc = subprocess.Popen(build_prj_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + for line in build_prj_proc.stdout: + decoded_line: str = line.decode(errors="replace") + print(decoded_line, end="") + self.log.emit(decoded_line) + parse_comp_progress(decoded_line, self.progress) + + build_prj_proc.stdout.close() + return_code = build_prj_proc.wait() + + if return_code and return_code != 0: + msg = ("Failed to build project! " + f"Exited with return code {return_code}") + self.failed.emit(msg, return_code) + raise RuntimeError(msg) + + # ensure we have PySide2 installed in engine + + self.progress.emit(0) + self.stage_begin.emit( + (f"Checking PySide2 installation... {stage_count} " + f" out of {stage_count}")) + python_path = None + if platform.system().lower() == "windows": + python_path = self.engine_path / ("Engine/Binaries/ThirdParty/" + "Python3/Win64/python.exe") + + if platform.system().lower() == "linux": + python_path = self.engine_path / ("Engine/Binaries/ThirdParty/" + "Python3/Linux/bin/python3") + + if platform.system().lower() == "darwin": + python_path = self.engine_path / ("Engine/Binaries/ThirdParty/" + "Python3/Mac/bin/python3") + + if not python_path: + msg = "Unsupported platform" + self.failed.emit(msg, 1) + raise NotImplementedError(msg) + if not python_path.exists(): + msg = f"Unreal Python not found at {python_path}" + self.failed.emit(msg, 1) + raise RuntimeError(msg) + pyside_cmd = [python_path.as_posix(), + "-m", + "pip", + "install", + "pyside2"] + + pyside_install = subprocess.Popen(pyside_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + for line in pyside_install.stdout: + decoded_line: str = line.decode(errors="replace") + print(decoded_line, end="") + self.log.emit(decoded_line) + + pyside_install.stdout.close() + return_code = pyside_install.wait() + + if return_code and return_code != 0: + msg = ("Failed to create the project! " + "The installation of PySide2 has failed!") + self.failed.emit(msg, return_code) + raise RuntimeError(msg) + + self.progress.emit(100) + self.finished.emit("Project successfully built!") + + +class UEPluginInstallWorker(QtCore.QObject): + finished = QtCore.Signal(str) + installing = QtCore.Signal(str) + failed = QtCore.Signal(str, int) + progress = QtCore.Signal(int) + log = QtCore.Signal(str) + + engine_path: Path = None + env = None + + def setup(self, engine_path: Path, env: dict = None, ): + self.engine_path = engine_path + self.env = env or os.environ + + def _build_and_move_plugin(self, plugin_build_path: Path): + uat_path: Path = ue_lib.get_path_to_uat(self.engine_path) + src_plugin_dir = Path(self.env.get("AYON_UNREAL_PLUGIN", "")) + + if not os.path.isdir(src_plugin_dir): + msg = "Path to the integration plugin is null!" + self.failed.emit(msg, 1) + raise RuntimeError(msg) + + if not uat_path.is_file(): + msg = "Building failed! Path to UAT is invalid!" + self.failed.emit(msg, 1) + raise RuntimeError(msg) + + temp_dir: Path = src_plugin_dir.parent / "Temp" + temp_dir.mkdir(exist_ok=True) + uplugin_path: Path = src_plugin_dir / "Ayon.uplugin" + + # in order to successfully build the plugin, + # It must be built outside the Engine directory and then moved + build_plugin_cmd: List[str] = [f"{uat_path.as_posix()}", + "BuildPlugin", + f"-Plugin={uplugin_path.as_posix()}", + f"-Package={temp_dir.as_posix()}"] + + build_proc = subprocess.Popen(build_plugin_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + return_code: Union[None, int] = None + for line in build_proc.stdout: + decoded_line: str = line.decode(errors="replace") + print(decoded_line, end="") + self.log.emit(decoded_line) + if return_code is None: + return_code = retrieve_exit_code(decoded_line) + parse_comp_progress(decoded_line, self.progress) + + build_proc.stdout.close() + build_proc.wait() + + if return_code and return_code != 0: + msg = ("Failed to build plugin" + f" project! Exited with return code {return_code}") + dir_util.remove_tree(temp_dir.as_posix()) + self.failed.emit(msg, return_code) + raise RuntimeError(msg) + + # Copy the contents of the 'Temp' dir into the + # 'Ayon' directory in the engine + dir_util.copy_tree(temp_dir.as_posix(), + plugin_build_path.as_posix()) + + # We need to also copy the config folder. + # The UAT doesn't include the Config folder in the build + plugin_install_config_path: Path = plugin_build_path / "Config" + src_plugin_config_path = src_plugin_dir / "Config" + + dir_util.copy_tree(src_plugin_config_path.as_posix(), + plugin_install_config_path.as_posix()) + + dir_util.remove_tree(temp_dir.as_posix()) + + def run(self): + src_plugin_dir = Path(self.env.get("AYON_UNREAL_PLUGIN", "")) + + if not os.path.isdir(src_plugin_dir): + msg = "Path to the integration plugin is null!" + self.failed.emit(msg, 1) + raise RuntimeError(msg) + + # Create a path to the plugin in the engine + op_plugin_path = self.engine_path / "Engine/Plugins/Marketplace" \ + "/Ayon" + + if not op_plugin_path.is_dir(): + self.installing.emit("Installing and building the plugin ...") + op_plugin_path.mkdir(parents=True, exist_ok=True) + + engine_plugin_config_path = op_plugin_path / "Config" + engine_plugin_config_path.mkdir(exist_ok=True) + + dir_util._path_created = {} + + if not (op_plugin_path / "Binaries").is_dir() \ + or not (op_plugin_path / "Intermediate").is_dir(): + self.installing.emit("Building the plugin ...") + print("--- Building the plugin...") + + self._build_and_move_plugin(op_plugin_path) + + self.finished.emit("Plugin successfully installed") diff --git a/openpype/hosts/webpublisher/lib.py b/openpype/hosts/webpublisher/lib.py index 4bc3f1db80..b207f85b46 100644 --- a/openpype/hosts/webpublisher/lib.py +++ b/openpype/hosts/webpublisher/lib.py @@ -30,7 +30,7 @@ def parse_json(path): Returns: (dict) or None if unparsable Raises: - AsssertionError if 'path' doesn't exist + AssertionError if 'path' doesn't exist """ path = path.strip('\"') assert os.path.isfile(path), ( diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index a64b7c2911..06de486f2e 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # flake8: noqa E402 -"""Pype module API.""" +"""OpenPype lib functions.""" # add vendor to sys path based on Python version import sys import os @@ -30,7 +30,7 @@ from .vendor_bin_utils import ( ) from .attribute_definitions import ( - AbtractAttrDef, + AbstractAttrDef, UIDef, UISeparatorDef, @@ -82,9 +82,6 @@ from .mongo import ( validate_mongo_connection, OpenPypeMongoConnection ) -from .anatomy import ( - Anatomy -) from .dateutils import ( get_datetime_data, @@ -97,7 +94,8 @@ from .python_module_tools import ( modules_from_path, recursive_bases_from_class, classes_from_module, - import_module_from_dirpath + import_module_from_dirpath, + is_func_signature_supported, ) from .profiles_filtering import ( @@ -119,36 +117,19 @@ from .transcoding import ( ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, create_project, - is_latest, - any_outdated, - get_asset, - get_linked_assets, - get_latest_version, - get_system_general_anatomy_data, get_workfile_template_key, get_workfile_template_key_from_context, - get_workdir_data, - get_workdir, - get_workdir_with_workdir_data, get_last_workfile_with_version, get_last_workfile, - create_workfile_doc, - save_workfile_data_to_doc, - get_workfile_doc, - BuildWorkfile, get_creator_by_name, get_custom_workfile_template, - change_timer_to_current_context, - get_custom_workfile_template_by_context, get_custom_workfile_template_by_string_context, get_custom_workfile_template @@ -186,8 +167,6 @@ from .plugin_tools import ( get_subset_name, get_subset_name_with_asset_doc, prepare_template_data, - filter_pyblish_plugins, - set_plugin_attributes_from_settings, source_hash, ) @@ -246,7 +225,7 @@ __all__ = [ "get_ffmpeg_tool_path", "is_oiio_supported", - "AbtractAttrDef", + "AbstractAttrDef", "UIDef", "UISeparatorDef", @@ -265,6 +244,7 @@ __all__ = [ "recursive_bases_from_class", "classes_from_module", "import_module_from_dirpath", + "is_func_signature_supported", "get_transcode_temp_directory", "should_convert_for_ffmpeg", @@ -278,34 +258,17 @@ __all__ = [ "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", - "PROJECT_NAME_ALLOWED_SYMBOLS", - "PROJECT_NAME_REGEX", "create_project", - "is_latest", - "any_outdated", - "get_asset", - "get_linked_assets", - "get_latest_version", - "get_system_general_anatomy_data", "get_workfile_template_key", "get_workfile_template_key_from_context", - "get_workdir_data", - "get_workdir", - "get_workdir_with_workdir_data", "get_last_workfile_with_version", "get_last_workfile", - "create_workfile_doc", - "save_workfile_data_to_doc", - "get_workfile_doc", - "BuildWorkfile", "get_creator_by_name", - "change_timer_to_current_context", - "get_custom_workfile_template_by_context", "get_custom_workfile_template_by_string_context", "get_custom_workfile_template", @@ -338,8 +301,6 @@ __all__ = [ "TaskNotSetError", "get_subset_name", "get_subset_name_with_asset_doc", - "filter_pyblish_plugins", - "set_plugin_attributes_from_settings", "source_hash", "format_file_size", @@ -358,8 +319,6 @@ __all__ = [ "terminal", - "Anatomy", - "get_datetime_data", "get_formatted_current_time", diff --git a/openpype/lib/anatomy.py b/openpype/lib/anatomy.py deleted file mode 100644 index 6d339f058f..0000000000 --- a/openpype/lib/anatomy.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code related to project Anatomy was moved -to 'openpype.pipeline.anatomy' please change your imports as soon as -possible. File will be probably removed in OpenPype 3.14.* -""" - -import warnings -import functools - - -class AnatomyDeprecatedWarning(DeprecationWarning): - pass - - -def anatomy_deprecated(func): - """Mark functions as deprecated. - - It will result in a warning being emitted when the function is used. - """ - - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.simplefilter("always", AnatomyDeprecatedWarning) - warnings.warn( - ( - "Deprecated import of 'Anatomy'." - " Class was moved to 'openpype.pipeline.anatomy'." - " Please change your imports of Anatomy in codebase." - ), - category=AnatomyDeprecatedWarning - ) - return func(*args, **kwargs) - return new_func - - -@anatomy_deprecated -def Anatomy(*args, **kwargs): - from openpype.pipeline.anatomy import Anatomy - return Anatomy(*args, **kwargs) diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index 990dc7495a..8adae34827 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -889,7 +889,8 @@ class ApplicationLaunchContext: self.modules_manager = ModulesManager() # Logger - logger_name = "{}-{}".format(self.__class__.__name__, self.app_name) + logger_name = "{}-{}".format(self.__class__.__name__, + self.application.full_name) self.log = Logger.get_logger(logger_name) self.executable = executable @@ -908,24 +909,25 @@ class ApplicationLaunchContext: self.launch_args.extend(self.data.pop("app_args")) # Handle launch environemtns - env = self.data.pop("env", None) - if env is not None and not isinstance(env, dict): + src_env = self.data.pop("env", None) + if src_env is not None and not isinstance(src_env, dict): self.log.warning(( "Passed `env` kwarg has invalid type: {}. Expected: `dict`." " Using `os.environ` instead." - ).format(str(type(env)))) - env = None + ).format(str(type(src_env)))) + src_env = None - if env is None: - env = os.environ + if src_env is None: + src_env = os.environ - # subprocess.Popen keyword arguments - self.kwargs = { - "env": { - key: str(value) - for key, value in env.items() - } + ignored_env = {"QT_API", } + env = { + key: str(value) + for key, value in src_env.items() + if key not in ignored_env } + # subprocess.Popen keyword arguments + self.kwargs = {"env": env} if platform.system().lower() == "windows": # Detach new process from currently running process on Windows @@ -967,7 +969,7 @@ class ApplicationLaunchContext: """Helper to collect application launch hooks from addons. Module have to have implemented 'get_launch_hook_paths' method which - can expect appliction as argument or nothing. + can expect application as argument or nothing. Returns: List[str]: Paths to launch hook directories. @@ -1245,7 +1247,7 @@ class ApplicationLaunchContext: args_len_str = " ({})".format(len(args)) self.log.info( "Launching \"{}\" with args{}: {}".format( - self.app_name, args_len_str, args + self.application.full_name, args_len_str, args ) ) self.launch_args = args @@ -1270,7 +1272,9 @@ class ApplicationLaunchContext: exc_info=True ) - self.log.debug("Launch of {} finished.".format(self.app_name)) + self.log.debug("Launch of {} finished.".format( + self.application.full_name + )) return self.process @@ -1368,6 +1372,7 @@ def get_app_environments_for_context( from openpype.modules import ModulesManager from openpype.pipeline import AvalonMongoDB, Anatomy + from openpype.lib.openpype_version import is_running_staging # Avalon database connection dbcon = AvalonMongoDB() @@ -1404,6 +1409,8 @@ def get_app_environments_for_context( "env": env }) data["env"].update(anatomy.root_environments()) + if is_running_staging(): + data["env"]["OPENPYPE_IS_STAGING"] = "1" prepare_app_environments(data, env_group, modules_manager) prepare_context_environments(data, env_group, modules_manager) @@ -1504,8 +1511,8 @@ def prepare_app_environments( if key in source_env: source_env[key] = value - # `added_env_keys` has debug purpose - added_env_keys = {app.group.name, app.name} + # `app_and_tool_labels` has debug purpose + app_and_tool_labels = [app.full_name] # Environments for application environments = [ app.group.environment, @@ -1528,15 +1535,14 @@ def prepare_app_environments( for group_name in sorted(groups_by_name.keys()): group = groups_by_name[group_name] environments.append(group.environment) - added_env_keys.add(group_name) for tool_name in sorted(tool_by_group_name[group_name].keys()): tool = tool_by_group_name[group_name][tool_name] environments.append(tool.environment) - added_env_keys.add(tool.name) + app_and_tool_labels.append(tool.full_name) log.debug( "Will add environments for apps and tools: {}".format( - ", ".join(added_env_keys) + ", ".join(app_and_tool_labels) ) ) diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index 0df7b16e64..6054d2a92a 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -3,12 +3,13 @@ import re import collections import uuid import json +import copy from abc import ABCMeta, abstractmethod, abstractproperty import six import clique -# Global variable which store attribude definitions by type +# Global variable which store attribute definitions by type # - default types are registered on import _attr_defs_by_type = {} @@ -19,7 +20,7 @@ def register_attr_def_class(cls): Currently are registered definitions used to deserialize data to objects. Attrs: - cls (AbtractAttrDef): Non-abstract class to be registered with unique + cls (AbstractAttrDef): Non-abstract class to be registered with unique 'type' attribute. Raises: @@ -35,7 +36,7 @@ def get_attributes_keys(attribute_definitions): """Collect keys from list of attribute definitions. Args: - attribute_definitions (List[AbtractAttrDef]): Objects of attribute + attribute_definitions (List[AbstractAttrDef]): Objects of attribute definitions. Returns: @@ -56,8 +57,8 @@ def get_default_values(attribute_definitions): """Receive default values for attribute definitions. Args: - attribute_definitions (List[AbtractAttrDef]): Attribute definitions for - which default values should be collected. + attribute_definitions (List[AbstractAttrDef]): Attribute definitions + for which default values should be collected. Returns: Dict[str, Any]: Default values for passet attribute definitions. @@ -75,15 +76,15 @@ def get_default_values(attribute_definitions): class AbstractAttrDefMeta(ABCMeta): - """Meta class to validate existence of 'key' attribute. + """Metaclass to validate existence of 'key' attribute. - Each object of `AbtractAttrDef` mus have defined 'key' attribute. + Each object of `AbstractAttrDef` mus have defined 'key' attribute. """ def __call__(self, *args, **kwargs): obj = super(AbstractAttrDefMeta, self).__call__(*args, **kwargs) init_class = getattr(obj, "__init__class__", None) - if init_class is not AbtractAttrDef: + if init_class is not AbstractAttrDef: raise TypeError("{} super was not called in __init__.".format( type(obj) )) @@ -91,8 +92,8 @@ class AbstractAttrDefMeta(ABCMeta): @six.add_metaclass(AbstractAttrDefMeta) -class AbtractAttrDef(object): - """Abstraction of attribute definiton. +class AbstractAttrDef(object): + """Abstraction of attribute definition. Each attribute definition must have implemented validation and conversion method. @@ -144,7 +145,7 @@ class AbtractAttrDef(object): self.disabled = disabled self._id = uuid.uuid4().hex - self.__init__class__ = AbtractAttrDef + self.__init__class__ = AbstractAttrDef @property def id(self): @@ -153,7 +154,15 @@ class AbtractAttrDef(object): def __eq__(self, other): if not isinstance(other, self.__class__): return False - return self.key == other.key + return ( + self.key == other.key + and self.hidden == other.hidden + and self.default == other.default + and self.disabled == other.disabled + ) + + def __ne__(self, other): + return not self.__eq__(other) @abstractproperty def type(self): @@ -211,7 +220,7 @@ class AbtractAttrDef(object): # UI attribute definitoins won't hold value # ----------------------------------------- -class UIDef(AbtractAttrDef): +class UIDef(AbstractAttrDef): is_value_def = False def __init__(self, key=None, default=None, *args, **kwargs): @@ -236,7 +245,7 @@ class UILabelDef(UIDef): # Attribute defintioins should hold value # --------------------------------------- -class UnknownDef(AbtractAttrDef): +class UnknownDef(AbstractAttrDef): """Definition is not known because definition is not available. This attribute can be used to keep existing data unchanged but does not @@ -253,7 +262,7 @@ class UnknownDef(AbtractAttrDef): return value -class HiddenDef(AbtractAttrDef): +class HiddenDef(AbstractAttrDef): """Hidden value of Any type. This attribute can be used for UI purposes to pass values related @@ -273,7 +282,7 @@ class HiddenDef(AbtractAttrDef): return value -class NumberDef(AbtractAttrDef): +class NumberDef(AbstractAttrDef): """Number definition. Number can have defined minimum/maximum value and decimal points. Value @@ -349,7 +358,7 @@ class NumberDef(AbtractAttrDef): return round(float(value), self.decimals) -class TextDef(AbtractAttrDef): +class TextDef(AbstractAttrDef): """Text definition. Text can have multiline option so endline characters are allowed regex @@ -414,13 +423,12 @@ class TextDef(AbtractAttrDef): return data -class EnumDef(AbtractAttrDef): +class EnumDef(AbstractAttrDef): """Enumeration of single item from items. Args: - items: Items definition that can be coverted to - `collections.OrderedDict`. Dictionary represent {value: label} - relation. + items: Items definition that can be converted using + 'prepare_enum_items'. default: Default value. Must be one key(value) from passed items. """ @@ -433,40 +441,98 @@ class EnumDef(AbtractAttrDef): " defined values on initialization." ).format(self.__class__.__name__)) - items = collections.OrderedDict(items) - if default not in items: - for _key in items.keys(): - default = _key + items = self.prepare_enum_items(items) + item_values = [item["value"] for item in items] + if default not in item_values: + for value in item_values: + default = value break super(EnumDef, self).__init__(key, default=default, **kwargs) self.items = items + self._item_values = set(item_values) def __eq__(self, other): if not super(EnumDef, self).__eq__(other): return False - if set(self.items.keys()) != set(other.items.keys()): - return False - - for key, label in self.items.items(): - if other.items[key] != label: - return False - return True + return self.items == other.items def convert_value(self, value): - if value in self.items: + if value in self._item_values: return value return self.default def serialize(self): - data = super(TextDef, self).serialize() - data["items"] = list(self.items) + data = super(EnumDef, self).serialize() + data["items"] = copy.deepcopy(self.items) return data + @staticmethod + def prepare_enum_items(items): + """Convert items to unified structure. -class BoolDef(AbtractAttrDef): + Output is a list where each item is dictionary with 'value' + and 'label'. + + ```python + # Example output + [ + {"label": "Option 1", "value": 1}, + {"label": "Option 2", "value": 2}, + {"label": "Option 3", "value": 3} + ] + ``` + + Args: + items (Union[Dict[str, Any], List[Any], List[Dict[str, Any]]): The + items to convert. + + Returns: + List[Dict[str, Any]]: Unified structure of items. + """ + + output = [] + if isinstance(items, dict): + for value, label in items.items(): + output.append({"label": label, "value": value}) + + elif isinstance(items, (tuple, list, set)): + for item in items: + if isinstance(item, dict): + # Validate if 'value' is available + if "value" not in item: + raise KeyError("Item does not contain 'value' key.") + + if "label" not in item: + item["label"] = str(item["value"]) + elif isinstance(item, (list, tuple)): + if len(item) == 2: + value, label = item + elif len(item) == 1: + value = item[0] + label = str(value) + else: + raise ValueError(( + "Invalid items count {}." + " Expected 1 or 2. Value: {}" + ).format(len(item), str(item))) + + item = {"label": label, "value": value} + else: + item = {"label": str(item), "value": item} + output.append(item) + + else: + raise TypeError( + "Unknown type for enum items '{}'".format(type(items)) + ) + + return output + + +class BoolDef(AbstractAttrDef): """Boolean representation. Args: @@ -711,7 +777,7 @@ class FileDefItem(object): return output -class FileDef(AbtractAttrDef): +class FileDef(AbstractAttrDef): """File definition. It is possible to define filters of allowed file extensions and if supports folders. @@ -829,7 +895,7 @@ def serialize_attr_def(attr_def): """Serialize attribute definition to data. Args: - attr_def (AbtractAttrDef): Attribute definition to serialize. + attr_def (AbstractAttrDef): Attribute definition to serialize. Returns: Dict[str, Any]: Serialized data. @@ -842,7 +908,7 @@ def serialize_attr_defs(attr_defs): """Serialize attribute definitions to data. Args: - attr_defs (List[AbtractAttrDef]): Attribute definitions to serialize. + attr_defs (List[AbstractAttrDef]): Attribute definitions to serialize. Returns: List[Dict[str, Any]]: Serialized data. diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index 12f4a5198b..a9ae27cb79 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -1,6 +1,5 @@ """Should be used only inside of hosts.""" -import os -import copy + import platform import logging import functools @@ -10,17 +9,12 @@ import six from openpype.client import ( get_project, - get_assets, get_asset_by_name, - get_last_version_by_subset_name, - get_workfile_info, ) from openpype.client.operations import ( CURRENT_ASSET_DOC_SCHEMA, CURRENT_PROJECT_SCHEMA, CURRENT_PROJECT_CONFIG_SCHEMA, - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, ) from .profiles_filtering import filter_profiles from .path_templates import StringTemplate @@ -128,70 +122,6 @@ def with_pipeline_io(func): return wrapped -@deprecated("openpype.pipeline.context_tools.is_representation_from_latest") -def is_latest(representation): - """Return whether the representation is from latest version - - Args: - representation (dict): The representation document from the database. - - Returns: - bool: Whether the representation is of latest version. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import is_representation_from_latest - - return is_representation_from_latest(representation) - - -@deprecated("openpype.pipeline.load.any_outdated_containers") -def any_outdated(): - """Return whether the current scene has any outdated content. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.load import any_outdated_containers - - return any_outdated_containers() - - -@deprecated("openpype.pipeline.context_tools.get_current_project_asset") -def get_asset(asset_name=None): - """ Returning asset document from database by its name. - - Doesn't count with duplicities on asset names! - - Args: - asset_name (str) - - Returns: - (MongoDB document) - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import get_current_project_asset - - return get_current_project_asset(asset_name=asset_name) - - -@deprecated("openpype.pipeline.template_data.get_general_template_data") -def get_system_general_anatomy_data(system_settings=None): - """ - Deprecated: - Function will be removed after release version 3.15.* - """ - from openpype.pipeline.template_data import get_general_template_data - - return get_general_template_data(system_settings) - - @deprecated("openpype.client.get_linked_asset_ids") def get_linked_asset_ids(asset_doc): """Return linked asset ids for `asset_doc` from DB @@ -214,66 +144,6 @@ def get_linked_asset_ids(asset_doc): return get_linked_asset_ids(project_name, asset_doc=asset_doc) -@deprecated("openpype.client.get_linked_assets") -def get_linked_assets(asset_doc): - """Return linked assets for `asset_doc` from DB - - Args: - asset_doc (dict): Asset document from DB - - Returns: - (list) Asset documents of input links for passed asset doc. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline import legacy_io - from openpype.client import get_linked_assets - - project_name = legacy_io.active_project() - - return get_linked_assets(project_name, asset_doc=asset_doc) - - -@deprecated("openpype.client.get_last_version_by_subset_name") -def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): - """Retrieve latest version from `asset_name`, and `subset_name`. - - Do not use if you want to query more than 5 latest versions as this method - query 3 times to mongo for each call. For those cases is better to use - more efficient way, e.g. with help of aggregations. - - Args: - asset_name (str): Name of asset. - subset_name (str): Name of subset. - dbcon (AvalonMongoDB, optional): Avalon Mongo connection with Session. - project_name (str, optional): Find latest version in specific project. - - Returns: - None: If asset, subset or version were not found. - dict: Last version document for entered. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - if not project_name: - if not dbcon: - from openpype.pipeline import legacy_io - - log.debug("Using `legacy_io` for query.") - dbcon = legacy_io - # Make sure is installed - dbcon.install() - - project_name = dbcon.active_project() - - return get_last_version_by_subset_name( - project_name, subset_name, asset_name=asset_name - ) - - @deprecated( "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( @@ -361,142 +231,6 @@ def get_workfile_template_key( ) -@deprecated("openpype.pipeline.template_data.get_template_data") -def get_workdir_data(project_doc, asset_doc, task_name, host_name): - """Prepare data for workdir template filling from entered information. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. - - Returns: - dict: Data prepared for filling workdir template. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.template_data import get_template_data - - return get_template_data( - project_doc, asset_doc, task_name, host_name - ) - - -@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") -def get_workdir_with_workdir_data( - workdir_data, anatomy=None, project_name=None, template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - It is possible to pass only project's name instead of project's anatomy but - one of them **must** be entered. It is preferred to enter anatomy if is - available as initialization of a new Anatomy object may be time consuming. - - Args: - workdir_data (dict): Data to fill workdir template. - anatomy (Anatomy): Anatomy object for specific project. Optional if - `project_name` is entered. - project_name (str): Project's name. Optional if `anatomy` is entered - otherwise Anatomy object is created with using the project name. - template_key (str): Key of work templates in anatomy templates. If not - passed `get_workfile_template_key_from_context` is used to get it. - dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key' - and 'project_name' are not passed. - - Returns: - TemplateResult: Workdir path. - - Raises: - ValueError: When both `anatomy` and `project_name` are set to None. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - if not anatomy and not project_name: - raise ValueError(( - "Missing required arguments one of `project_name` or `anatomy`" - " must be entered." - )) - - if not project_name: - project_name = anatomy.project_name - - from openpype.pipeline.workfile import get_workdir_with_workdir_data - - return get_workdir_with_workdir_data( - workdir_data, project_name, anatomy, template_key - ) - - -@deprecated("openpype.pipeline.workfile.get_workdir_with_workdir_data") -def get_workdir( - project_doc, - asset_doc, - task_name, - host_name, - anatomy=None, - template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. In `Session` - is stored under `AVALON_APP` key. - anatomy (Anatomy): Optional argument. Anatomy object is created using - project name from `project_doc`. It is preferred to pass this - argument as initialization of a new Anatomy object may be time - consuming. - template_key (str): Key of work templates in anatomy templates. Default - value is defined in `get_workdir_with_workdir_data`. - - Returns: - TemplateResult: Workdir path. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.workfile import get_workdir - # Output is TemplateResult object which contain useful data - return get_workdir( - project_doc, - asset_doc, - task_name, - host_name, - anatomy, - template_key - ) - - -@deprecated("openpype.pipeline.context_tools.get_template_data_from_session") -def template_data_from_session(session=None): - """ Return dictionary with template from session keys. - - Args: - session (dict, Optional): The Session to use. If not provided use the - currently active global Session. - - Returns: - dict: All available data from session. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.context_tools import get_template_data_from_session - - return get_template_data_from_session(session) - - @deprecated("openpype.pipeline.context_tools.compute_session_changes") def compute_session_changes( session, task=None, asset=None, app=None, template_key=None @@ -588,133 +322,6 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): return change_current_context(asset, task, template_key) -@deprecated("openpype.client.get_workfile_info") -def get_workfile_doc(asset_id, task_name, filename, dbcon=None): - """Return workfile document for entered context. - - Do not use this method to get more than one document. In that cases use - custom query as this will return documents from database one by one. - - Args: - asset_id (ObjectId): Mongo ID of an asset under which workfile belongs. - task_name (str): Name of task under which the workfile belongs. - filename (str): Name of a workfile. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `legacy_io` is used if not entered. - - Returns: - dict: Workfile document or None. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - project_name = dbcon.active_project() - return get_workfile_info(project_name, asset_id, task_name, filename) - - -@deprecated -def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): - """Creates or replace workfile document in mongo. - - Do not use this method to update data. This method will remove all - additional data from existing document. - - Args: - asset_doc (dict): Document of asset under which workfile belongs. - task_name (str): Name of task for which is workfile related to. - filename (str): Filename of workfile. - workdir (str): Path to directory where `filename` is located. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `legacy_io` is used if not entered. - """ - - from openpype.pipeline import Anatomy - from openpype.pipeline.template_data import get_template_data - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - # Filter of workfile document - doc_filter = { - "type": "workfile", - "parent": asset_doc["_id"], - "task_name": task_name, - "filename": filename - } - # Document data are copy of filter - doc_data = copy.deepcopy(doc_filter) - - # Prepare project for workdir data - project_name = dbcon.active_project() - project_doc = get_project(project_name) - workdir_data = get_template_data( - project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] - ) - # Prepare anatomy - anatomy = Anatomy(project_name) - # Get workdir path (result is anatomy.TemplateResult) - template_workdir = get_workdir_with_workdir_data( - workdir_data, anatomy - ) - template_workdir_path = str(template_workdir).replace("\\", "/") - - # Replace slashses in workdir path where workfile is located - mod_workdir = workdir.replace("\\", "/") - - # Replace workdir from templates with rootless workdir - rootles_workdir = mod_workdir.replace( - template_workdir_path, - template_workdir.rootless.replace("\\", "/") - ) - - doc_data["schema"] = "pype:workfile-1.0" - doc_data["files"] = ["/".join([rootles_workdir, filename])] - doc_data["data"] = {} - - dbcon.replace_one( - doc_filter, - doc_data, - upsert=True - ) - - -@deprecated -def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): - if not workfile_doc: - # TODO add log message - return - - if not data: - return - - # Use legacy_io if dbcon is not entered - if not dbcon: - from openpype.pipeline import legacy_io - dbcon = legacy_io - - # Convert data to mongo modification keys/values - # - this is naive implementation which does not expect nested - # dictionaries - set_data = {} - for key, value in data.items(): - new_key = "data.{}".format(key) - set_data[new_key] = value - - # Update workfile document with data - dbcon.update_one( - {"_id": workfile_doc["_id"]}, - {"$set": set_data} - ) - - @deprecated("openpype.pipeline.workfile.BuildWorkfile") def BuildWorkfile(): """Build workfile class was moved to workfile pipeline. @@ -747,38 +354,6 @@ def get_creator_by_name(creator_name, case_sensitive=False): return get_legacy_creator_by_name(creator_name, case_sensitive) -@deprecated -def change_timer_to_current_context(): - """Called after context change to change timers. - - Deprecated: - This method is specific for TimersManager module so please use the - functionality from there. Function will be removed after release - version 3.15.* - """ - - from openpype.pipeline import legacy_io - - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - if not webserver_url: - log.warning("Couldn't find webserver url") - return - - rest_api_url = "{}/timers_manager/start_timer".format(webserver_url) - try: - import requests - except Exception: - log.warning("Couldn't start timer") - return - data = { - "project_name": legacy_io.Session["AVALON_PROJECT"], - "asset_name": legacy_io.Session["AVALON_ASSET"], - "task_name": legacy_io.Session["AVALON_TASK"] - } - - requests.post(rest_api_url, json=data) - - def _get_task_context_data_for_anatomy( project_doc, asset_doc, task_name, anatomy=None ): @@ -800,6 +375,8 @@ def _get_task_context_data_for_anatomy( dict: With Anatomy context data. """ + from openpype.pipeline.template_data import get_general_template_data + if anatomy is None: from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) @@ -840,7 +417,7 @@ def _get_task_context_data_for_anatomy( } } - system_general_data = get_system_general_anatomy_data() + system_general_data = get_general_template_data() data.update(system_general_data) return data diff --git a/openpype/lib/events.py b/openpype/lib/events.py index 096201312f..dca58fcf93 100644 --- a/openpype/lib/events.py +++ b/openpype/lib/events.py @@ -6,10 +6,9 @@ import inspect import logging import weakref from uuid import uuid4 -try: - from weakref import WeakMethod -except Exception: - from openpype.lib.python_2_comp import WeakMethod + +from .python_2_comp import WeakMethod +from .python_module_tools import is_func_signature_supported class MissingEventSystem(Exception): @@ -80,40 +79,8 @@ class EventCallback(object): # Get expected arguments from function spec # - positional arguments are always preferred - expect_args = False - expect_kwargs = False - fake_event = "fake" - if hasattr(inspect, "signature"): - # Python 3 using 'Signature' object where we try to bind arg - # or kwarg. Using signature is recommended approach based on - # documentation. - sig = inspect.signature(func) - try: - sig.bind(fake_event) - expect_args = True - except TypeError: - pass - - try: - sig.bind(event=fake_event) - expect_kwargs = True - except TypeError: - pass - - else: - # In Python 2 'signature' is not available so 'getcallargs' is used - # - 'getcallargs' is marked as deprecated since Python 3.0 - try: - inspect.getcallargs(func, fake_event) - expect_args = True - except TypeError: - pass - - try: - inspect.getcallargs(func, event=fake_event) - expect_kwargs = True - except TypeError: - pass + expect_args = is_func_signature_supported(func, "fake") + expect_kwargs = is_func_signature_supported(func, event="fake") self._func_ref = func_ref self._func_name = func_name @@ -156,7 +123,7 @@ class EventCallback(object): self._enabled = enabled def deregister(self): - """Calling this funcion will cause that callback will be removed.""" + """Calling this function will cause that callback will be removed.""" # Fake reference self._ref_valid = False diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index f1f2a4fa0a..6f52efdfcc 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -8,6 +8,8 @@ import tempfile from .log import Logger from .vendor_bin_utils import find_executable +from .openpype_version import is_running_from_build + # MSDN process creation flag (Windows only) CREATE_NO_WINDOW = 0x08000000 @@ -81,11 +83,14 @@ def run_subprocess(*args, **kwargs): Entered arguments and keyword arguments are passed to subprocess Popen. + On windows are 'creationflags' filled with flags that should cause ignore + creation of new window. + Args: - *args: Variable length arument list passed to Popen. + *args: Variable length argument list passed to Popen. **kwargs : Arbitrary keyword arguments passed to Popen. Is possible to - pass `logging.Logger` object under "logger" if want to use - different than lib's logger. + pass `logging.Logger` object under "logger" to use custom logger + for output. Returns: str: Full output of subprocess concatenated stdout and stderr. @@ -95,6 +100,21 @@ def run_subprocess(*args, **kwargs): return code. """ + # Modify creation flags on windows to hide console window if in UI mode + if ( + platform.system().lower() == "windows" + and "creationflags" not in kwargs + # shell=True already tries to hide the console window + # and passing these creationflags then shows the window again + # so we avoid it for shell=True cases + and kwargs.get("shell") is not True + ): + kwargs["creationflags"] = ( + subprocess.CREATE_NEW_PROCESS_GROUP + | getattr(subprocess, "DETACHED_PROCESS", 0) + | getattr(subprocess, "CREATE_NO_WINDOW", 0) + ) + # Get environents from kwarg or use current process environments if were # not passed. env = kwargs.get("env") or os.environ @@ -107,22 +127,22 @@ def run_subprocess(*args, **kwargs): logger = Logger.get_logger("run_subprocess") # set overrides - kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE) - kwargs['stderr'] = kwargs.get('stderr', subprocess.PIPE) - kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE) - kwargs['env'] = filtered_env + kwargs["stdout"] = kwargs.get("stdout", subprocess.PIPE) + kwargs["stderr"] = kwargs.get("stderr", subprocess.PIPE) + kwargs["stdin"] = kwargs.get("stdin", subprocess.PIPE) + kwargs["env"] = filtered_env proc = subprocess.Popen(*args, **kwargs) full_output = "" _stdout, _stderr = proc.communicate() if _stdout: - _stdout = _stdout.decode("utf-8") + _stdout = _stdout.decode("utf-8", errors="backslashreplace") full_output += _stdout logger.debug(_stdout) if _stderr: - _stderr = _stderr.decode("utf-8") + _stderr = _stderr.decode("utf-8", errors="backslashreplace") # Add additional line break if output already contains stdout if full_output: full_output += "\n" @@ -143,18 +163,20 @@ def run_subprocess(*args, **kwargs): def clean_envs_for_openpype_process(env=None): - """Modify environemnts that may affect OpenPype process. + """Modify environments that may affect OpenPype process. Main reason to implement this function is to pop PYTHONPATH which may be affected by in-host environments. """ if env is None: env = os.environ - return { - key: value - for key, value in env.items() - if key not in ("PYTHONPATH",) - } + + # Exclude some environment variables from a copy of the environment + env = env.copy() + for key in ["PYTHONPATH", "PYTHONHOME"]: + env.pop(key, None) + + return env def run_openpype_process(*args, **kwargs): @@ -168,7 +190,7 @@ def run_openpype_process(*args, **kwargs): Example: ``` - run_openpype_process("run", "") + run_detached_process("run", "") ``` Args: @@ -182,6 +204,11 @@ def run_openpype_process(*args, **kwargs): # Skip envs that can affect OpenPype process # - fill more if you find more env = clean_envs_for_openpype_process(os.environ) + + # Only keep OpenPype version if we are running from build. + if not is_running_from_build(): + env.pop("OPENPYPE_VERSION", None) + return run_subprocess(args, env=env, **kwargs) diff --git a/openpype/lib/file_transaction.py b/openpype/lib/file_transaction.py index cba361a8d4..80f4e81f2c 100644 --- a/openpype/lib/file_transaction.py +++ b/openpype/lib/file_transaction.py @@ -13,6 +13,16 @@ else: from shutil import copyfile +class DuplicateDestinationError(ValueError): + """Error raised when transfer destination already exists in queue. + + The error is only raised if `allow_queue_replacements` is False on the + FileTransaction instance and the added file to transfer is of a different + src file than the one already detected in the queue. + + """ + + class FileTransaction(object): """File transaction with rollback options. @@ -44,7 +54,7 @@ class FileTransaction(object): MODE_COPY = 0 MODE_HARDLINK = 1 - def __init__(self, log=None): + def __init__(self, log=None, allow_queue_replacements=False): if log is None: log = logging.getLogger("FileTransaction") @@ -60,6 +70,8 @@ class FileTransaction(object): # Backup file location mapping to original locations self._backup_to_original = {} + self._allow_queue_replacements = allow_queue_replacements + def add(self, src, dst, mode=MODE_COPY): """Add a new file to transfer queue. @@ -82,6 +94,14 @@ class FileTransaction(object): src, dst)) return else: + if not self._allow_queue_replacements: + raise DuplicateDestinationError( + "Transfer to destination is already in queue: " + "{} -> {}. It's not allowed to be replaced by " + "a new transfer from {}".format( + queued_src, dst, src + )) + self.log.warning("File transfer in queue replaced..") self.log.debug( "Removed from queue: {} -> {} replaced by {} -> {}".format( @@ -110,7 +130,7 @@ class FileTransaction(object): path_same = self._same_paths(src, dst) if path_same: self.log.debug( - "Source and destionation are same files {} -> {}".format( + "Source and destination are same files {} -> {}".format( src, dst)) continue @@ -189,6 +209,6 @@ class FileTransaction(object): def _same_paths(self, src, dst): # handles same paths but with C:/project vs c:/project if os.path.exists(src) and os.path.exists(dst): - return os.path.samefile(src, dst) + return os.stat(src) == os.stat(dst) return src == dst diff --git a/openpype/lib/openpype_version.py b/openpype/lib/openpype_version.py index d547d34755..e052002468 100644 --- a/openpype/lib/openpype_version.py +++ b/openpype/lib/openpype_version.py @@ -57,15 +57,66 @@ def is_running_from_build(): return True +def is_staging_enabled(): + return os.environ.get("OPENPYPE_USE_STAGING") == "1" + + def is_running_staging(): """Currently used OpenPype is staging version. + This function is not 100% proper check of staging version. It is possible + to have enabled to use staging version but be in different one. + + The function is based on 4 factors: + - env 'OPENPYPE_IS_STAGING' is set + - current production version + - current staging version + - use staging is enabled + + First checks for 'OPENPYPE_IS_STAGING' environment which can be set to '1'. + The value should be set only when a process without access to + OpenPypeVersion is launched (e.g. in DCCs). If current version is same + as production version it is expected that it is not staging, and it + doesn't matter what would 'is_staging_enabled' return. If current version + is same as staging version it is expected we're in staging. In all other + cases 'is_staging_enabled' is used as source of outpu value. + + The function is used to decide which icon is used. To check e.g. updates + the output should be combined with other functions from this file. + Returns: - bool: True if openpype version containt 'staging'. + bool: Using staging version or not. """ - if "staging" in get_openpype_version(): + + if os.environ.get("OPENPYPE_IS_STAGING") == "1": return True - return False + + if not op_version_control_available(): + return False + + from openpype.settings import get_global_settings + + global_settings = get_global_settings() + production_version = global_settings["production_version"] + latest_version = None + if not production_version or production_version == "latest": + latest_version = get_latest_version(local=False, remote=True) + production_version = latest_version + + current_version = get_openpype_version() + if current_version == production_version: + return False + + staging_version = global_settings["staging_version"] + if not staging_version or staging_version == "latest": + if latest_version is None: + latest_version = get_latest_version(local=False, remote=True) + staging_version = latest_version + + if current_version == production_version: + return True + + return is_staging_enabled() # ---------------------------------------- @@ -131,13 +182,11 @@ def get_remote_versions(*args, **kwargs): return None -def get_latest_version(staging=None, local=None, remote=None): +def get_latest_version(local=None, remote=None): """Get latest version from repository path.""" - if staging is None: - staging = is_running_staging() + if op_version_control_available(): return get_OpenPypeVersion().get_latest_version( - staging=staging, local=local, remote=remote ) @@ -146,9 +195,9 @@ def get_latest_version(staging=None, local=None, remote=None): def get_expected_studio_version(staging=None): """Expected production or staging version in studio.""" - if staging is None: - staging = is_running_staging() if op_version_control_available(): + if staging is None: + staging = is_staging_enabled() return get_OpenPypeVersion().get_expected_studio_version(staging) return None @@ -158,7 +207,7 @@ def get_expected_version(staging=None): if expected_version is None: # Look for latest if expected version is not set in settings expected_version = get_latest_version( - staging=staging, + local=False, remote=True ) return expected_version diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 0f99efb430..9be1736abf 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -256,17 +256,18 @@ class TemplatesDict(object): elif isinstance(templates, dict): self._raw_templates = copy.deepcopy(templates) self._templates = templates - self._objected_templates = self.create_ojected_templates(templates) + self._objected_templates = self.create_objected_templates( + templates) else: raise TypeError("<{}> argument must be a dict, not {}.".format( self.__class__.__name__, str(type(templates)) )) def __getitem__(self, key): - return self.templates[key] + return self.objected_templates[key] def get(self, key, *args, **kwargs): - return self.templates.get(key, *args, **kwargs) + return self.objected_templates.get(key, *args, **kwargs) @property def raw_templates(self): @@ -280,8 +281,21 @@ class TemplatesDict(object): def objected_templates(self): return self._objected_templates - @classmethod - def create_ojected_templates(cls, templates): + def _create_template_object(self, template): + """Create template object from a template string. + + Separated into method to give option change class of templates. + + Args: + template (str): Template string. + + Returns: + StringTemplate: Object of template. + """ + + return StringTemplate(template) + + def create_objected_templates(self, templates): if not isinstance(templates, dict): raise TypeError("Expected dict object, got {}".format( str(type(templates)) @@ -297,7 +311,7 @@ class TemplatesDict(object): for key in tuple(item.keys()): value = item[key] if isinstance(value, six.string_types): - item[key] = StringTemplate(value) + item[key] = self._create_template_object(value) elif isinstance(value, dict): inner_queue.append(value) return objected_templates diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index 1e157dfbfd..10fd3940b8 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -8,7 +8,6 @@ import warnings import functools from openpype.client import get_asset_by_id -from openpype.settings import get_project_settings log = logging.getLogger(__name__) @@ -101,8 +100,6 @@ def get_subset_name_with_asset_doc( is not passed. dynamic_data (dict): Dynamic data specific for a creator which creates instance. - dbcon (AvalonMongoDB): Mongo connection to be able query asset document - if 'asset_doc' is not passed. """ from openpype.pipeline.create import get_subset_name @@ -202,122 +199,6 @@ def prepare_template_data(fill_pairs): return fill_data -@deprecated("openpype.pipeline.publish.lib.filter_pyblish_plugins") -def filter_pyblish_plugins(plugins): - """Filter pyblish plugins by presets. - - This servers as plugin filter / modifier for pyblish. It will load plugin - definitions from presets and filter those needed to be excluded. - - Args: - plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` - `discover()` method. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - from openpype.pipeline.publish.lib import filter_pyblish_plugins - - filter_pyblish_plugins(plugins) - - -@deprecated -def set_plugin_attributes_from_settings( - plugins, superclass, host_name=None, project_name=None -): - """Change attribute values on Avalon plugins by project settings. - - This function should be used only in host context. Modify - behavior of plugins. - - Args: - plugins (list): Plugins discovered by origin avalon discover method. - superclass (object): Superclass of plugin type (e.g. Cretor, Loader). - host_name (str): Name of host for which plugins are loaded and from. - Value from environment `AVALON_APP` is used if not entered. - project_name (str): Name of project for which settings will be loaded. - Value from environment `AVALON_PROJECT` is used if not entered. - - Deprecated: - Function will be removed after release version 3.15.* - """ - - # Function is not used anymore - from openpype.pipeline import LegacyCreator, LoaderPlugin - - # determine host application to use for finding presets - if host_name is None: - host_name = os.environ.get("AVALON_APP") - - if project_name is None: - project_name = os.environ.get("AVALON_PROJECT") - - # map plugin superclass to preset json. Currently supported is load and - # create (LoaderPlugin and LegacyCreator) - plugin_type = None - if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin): - plugin_type = "load" - elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator): - plugin_type = "create" - - if not host_name or not project_name or plugin_type is None: - msg = "Skipped attributes override from settings." - if not host_name: - msg += " Host name is not defined." - - if not project_name: - msg += " Project name is not defined." - - if plugin_type is None: - msg += " Plugin type is unsupported for class {}.".format( - superclass.__name__ - ) - - print(msg) - return - - print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type)) - - project_settings = get_project_settings(project_name) - plugin_type_settings = ( - project_settings - .get(host_name, {}) - .get(plugin_type, {}) - ) - global_type_settings = ( - project_settings - .get("global", {}) - .get(plugin_type, {}) - ) - if not global_type_settings and not plugin_type_settings: - return - - for plugin in plugins: - plugin_name = plugin.__name__ - - plugin_settings = None - # Look for plugin settings in host specific settings - if plugin_name in plugin_type_settings: - plugin_settings = plugin_type_settings[plugin_name] - - # Look for plugin settings in global settings - elif plugin_name in global_type_settings: - plugin_settings = global_type_settings[plugin_name] - - if not plugin_settings: - continue - - print(">>> We have preset for {}".format(plugin_name)) - for option, value in plugin_settings.items(): - if option == "enabled" and value is False: - setattr(plugin, "active", False) - print(" - is disabled by preset") - else: - setattr(plugin, option, value) - print(" - setting `{}`: `{}`".format(option, value)) - - def source_hash(filepath, *args): """Generate simple identifier for a source file. This is used to identify whether a source file has previously been diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index ff2f1d4b88..674eaa3b91 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -1,16 +1,19 @@ -"""These lib functions are primarily for development purposes. +"""These lib functions are for development purposes. -WARNING: This is not meant for production data. +WARNING: + This is not meant for production data. Please don't write code which is + dependent on functionality here. -Goal is to be able create package of current state of project with related -documents from mongo and files from disk to zip file and then be able recreate -the project based on the zip. +Goal is to be able to create package of current state of project with related +documents from mongo and files from disk to zip file and then be able +to recreate the project based on the zip. This gives ability to create project where a changes and tests can be done. -Keep in mind that to be able create a package of project has few requirements. -Possible requirement should be listed in 'pack_project' function. +Keep in mind that to be able to create a package of project has few +requirements. Possible requirement should be listed in 'pack_project' function. """ + import os import json import platform @@ -19,16 +22,12 @@ import shutil import datetime import zipfile -from bson.json_util import ( - loads, - dumps, - CANONICAL_JSON_OPTIONS +from openpype.client.mongo import ( + load_json_file, + get_project_connection, + replace_project_documents, + store_project_documents, ) -from openpype.client import ( - get_project, - get_whole_project, -) -from openpype.pipeline import AvalonMongoDB DOCUMENTS_FILE_NAME = "database" METADATA_FILE_NAME = "metadata" @@ -43,7 +42,52 @@ def add_timestamp(filepath): return new_base + ext -def pack_project(project_name, destination_dir=None): +def get_project_document(project_name, database_name=None): + """Query project document. + + Function 'get_project' from client api cannot be used as it does not allow + to change which 'database_name' is used. + + Args: + project_name (str): Name of project. + database_name (Optional[str]): Name of mongo database where to look for + project. + + Returns: + Union[dict[str, Any], None]: Project document or None. + """ + + col = get_project_connection(project_name, database_name) + return col.find_one({"type": "project"}) + + +def _pack_files_to_zip(zip_stream, source_path, root_path): + """Pack files to a zip stream. + + Args: + zip_stream (zipfile.ZipFile): Stream to a zipfile. + source_path (str): Path to a directory where files are. + root_path (str): Path to a directory which is used for calculation + of relative path. + """ + + for root, _, filenames in os.walk(source_path): + for filename in filenames: + filepath = os.path.join(root, filename) + # TODO add one more folder + archive_name = os.path.join( + PROJECT_FILES_DIR, + os.path.relpath(filepath, root_path) + ) + zip_stream.write(filepath, archive_name) + + +def pack_project( + project_name, + destination_dir=None, + only_documents=False, + database_name=None +): """Make a package of a project with mongo documents and files. This function has few restrictions: @@ -52,38 +96,46 @@ def pack_project(project_name, destination_dir=None): "{root[...]}/{project[name]}" Args: - project_name(str): Project that should be packaged. - destination_dir(str): Optional path where zip will be stored. Project's - root is used if not passed. + project_name (str): Project that should be packaged. + destination_dir (Optional[str]): Optional path where zip will be + stored. Project's root is used if not passed. + only_documents (Optional[bool]): Pack only Mongo documents and skip + files. + database_name (Optional[str]): Custom database name from which is + project queried. """ + print("Creating package of project \"{}\"".format(project_name)) # Validate existence of project - project_doc = get_project(project_name) + project_doc = get_project_document(project_name, database_name) if not project_doc: raise ValueError("Project \"{}\" was not found in database".format( project_name )) - roots = project_doc["config"]["roots"] - # Determine root directory of project - source_root = None - source_root_name = None - for root_name, root_value in roots.items(): - if source_root is not None: - raise ValueError( - "Packaging is supported only for single root projects" - ) - source_root = root_value - source_root_name = root_name + root_path = None + source_root = {} + project_source_path = None + if not only_documents: + roots = project_doc["config"]["roots"] + # Determine root directory of project + source_root_name = None + for root_name, root_value in roots.items(): + if source_root is not None: + raise ValueError( + "Packaging is supported only for single root projects" + ) + source_root = root_value + source_root_name = root_name - root_path = source_root[platform.system().lower()] - print("Using root \"{}\" with path \"{}\"".format( - source_root_name, root_path - )) + root_path = source_root[platform.system().lower()] + print("Using root \"{}\" with path \"{}\"".format( + source_root_name, root_path + )) - project_source_path = os.path.join(root_path, project_name) - if not os.path.exists(project_source_path): - raise ValueError("Didn't find source of project files") + project_source_path = os.path.join(root_path, project_name) + if not os.path.exists(project_source_path): + raise ValueError("Didn't find source of project files") # Determine zip filepath where data will be stored if not destination_dir: @@ -119,12 +171,7 @@ def pack_project(project_name, destination_dir=None): temp_docs_json = s.name # Query all project documents and store them to temp json - docs = list(get_whole_project(project_name)) - data = dumps( - docs, json_options=CANONICAL_JSON_OPTIONS - ) - with open(temp_docs_json, "w") as stream: - stream.write(data) + store_project_documents(project_name, temp_docs_json, database_name) print("Packing files into zip") # Write all to zip file @@ -133,16 +180,10 @@ def pack_project(project_name, destination_dir=None): zip_stream.write(temp_metadata_json, METADATA_FILE_NAME + ".json") # Add database documents zip_stream.write(temp_docs_json, DOCUMENTS_FILE_NAME + ".json") + # Add project files to zip - for root, _, filenames in os.walk(project_source_path): - for filename in filenames: - filepath = os.path.join(root, filename) - # TODO add one more folder - archive_name = os.path.join( - PROJECT_FILES_DIR, - os.path.relpath(filepath, root_path) - ) - zip_stream.write(filepath, archive_name) + if not only_documents: + _pack_files_to_zip(zip_stream, project_source_path, root_path) print("Cleaning up") # Cleanup @@ -152,80 +193,30 @@ def pack_project(project_name, destination_dir=None): print("*** Packing finished ***") -def unpack_project(path_to_zip, new_root=None): - """Unpack project zip file to recreate project. +def _unpack_project_files(unzip_dir, root_path, project_name): + """Move project files from unarchived temp folder to new root. + + Unpack is skipped if source files are not available in the zip. That can + happen if nothing was published yet or only documents were stored to + package. Args: - path_to_zip(str): Path to zip which was created using 'pack_project' - function. - new_root(str): Optional way how to set different root path for unpacked - project. + unzip_dir (str): Location where zip was unzipped. + root_path (str): Path to new root. + project_name (str): Name of project. """ - print("Unpacking project from zip {}".format(path_to_zip)) - if not os.path.exists(path_to_zip): - print("Zip file does not exists: {}".format(path_to_zip)) + + src_project_files_dir = os.path.join( + unzip_dir, PROJECT_FILES_DIR, project_name + ) + # Skip if files are not in the zip + if not os.path.exists(src_project_files_dir): return - tmp_dir = tempfile.mkdtemp(prefix="unpack_") - print("Zip is extracted to temp: {}".format(tmp_dir)) - with zipfile.ZipFile(path_to_zip, "r") as zip_stream: - zip_stream.extractall(tmp_dir) - - metadata_json_path = os.path.join(tmp_dir, METADATA_FILE_NAME + ".json") - with open(metadata_json_path, "r") as stream: - metadata = json.load(stream) - - docs_json_path = os.path.join(tmp_dir, DOCUMENTS_FILE_NAME + ".json") - with open(docs_json_path, "r") as stream: - content = stream.readlines() - docs = loads("".join(content)) - - low_platform = platform.system().lower() - project_name = metadata["project_name"] - source_root = metadata["root"] - root_path = source_root[low_platform] - - # Drop existing collection - dbcon = AvalonMongoDB() - database = dbcon.database - if project_name in database.list_collection_names(): - database.drop_collection(project_name) - print("Removed existing project collection") - - print("Creating project documents ({})".format(len(docs))) - # Create new collection with loaded docs - collection = database[project_name] - collection.insert_many(docs) - - # Skip change of root if is the same as the one stored in metadata - if ( - new_root - and (os.path.normpath(new_root) == os.path.normpath(root_path)) - ): - new_root = None - - if new_root: - print("Using different root path {}".format(new_root)) - root_path = new_root - - project_doc = get_project(project_name) - roots = project_doc["config"]["roots"] - key = tuple(roots.keys())[0] - update_key = "config.roots.{}.{}".format(key, low_platform) - collection.update_one( - {"_id": project_doc["_id"]}, - {"$set": { - update_key: new_root - }} - ) - # Make sure root path exists if not os.path.exists(root_path): os.makedirs(root_path) - src_project_files_dir = os.path.join( - tmp_dir, PROJECT_FILES_DIR, project_name - ) dst_project_files_dir = os.path.normpath( os.path.join(root_path, project_name) ) @@ -241,8 +232,82 @@ def unpack_project(path_to_zip, new_root=None): )) shutil.move(src_project_files_dir, dst_project_files_dir) + +def unpack_project( + path_to_zip, new_root=None, database_only=None, database_name=None +): + """Unpack project zip file to recreate project. + + Args: + path_to_zip (str): Path to zip which was created using 'pack_project' + function. + new_root (str): Optional way how to set different root path for + unpacked project. + database_only (Optional[bool]): Unpack only database from zip. + database_name (str): Name of database where project will be recreated. + """ + + if database_only is None: + database_only = False + + print("Unpacking project from zip {}".format(path_to_zip)) + if not os.path.exists(path_to_zip): + print("Zip file does not exists: {}".format(path_to_zip)) + return + + tmp_dir = tempfile.mkdtemp(prefix="unpack_") + print("Zip is extracted to temp: {}".format(tmp_dir)) + with zipfile.ZipFile(path_to_zip, "r") as zip_stream: + if database_only: + for filename in ( + "{}.json".format(METADATA_FILE_NAME), + "{}.json".format(DOCUMENTS_FILE_NAME), + ): + zip_stream.extract(filename, tmp_dir) + else: + zip_stream.extractall(tmp_dir) + + metadata_json_path = os.path.join(tmp_dir, METADATA_FILE_NAME + ".json") + with open(metadata_json_path, "r") as stream: + metadata = json.load(stream) + + docs_json_path = os.path.join(tmp_dir, DOCUMENTS_FILE_NAME + ".json") + docs = load_json_file(docs_json_path) + + low_platform = platform.system().lower() + project_name = metadata["project_name"] + root_path = metadata["root"].get(low_platform) + + # Drop existing collection + replace_project_documents(project_name, docs, database_name) + print("Creating project documents ({})".format(len(docs))) + + # Skip change of root if is the same as the one stored in metadata + if ( + new_root + and (os.path.normpath(new_root) == os.path.normpath(root_path)) + ): + new_root = None + + if new_root: + print("Using different root path {}".format(new_root)) + root_path = new_root + + project_doc = get_project_document(project_name) + roots = project_doc["config"]["roots"] + key = tuple(roots.keys())[0] + update_key = "config.roots.{}.{}".format(key, low_platform) + collection = get_project_connection(project_name, database_name) + collection.update_one( + {"_id": project_doc["_id"]}, + {"$set": { + update_key: new_root + }} + ) + + _unpack_project_files(tmp_dir, root_path, project_name) + # CLeanup print("Cleaning up") shutil.rmtree(tmp_dir) - dbcon.uninstall() print("*** Unpack finished ***") diff --git a/openpype/lib/python_2_comp.py b/openpype/lib/python_2_comp.py index d7137dbe9c..091c51a6f6 100644 --- a/openpype/lib/python_2_comp.py +++ b/openpype/lib/python_2_comp.py @@ -1,41 +1,44 @@ import weakref -class _weak_callable: - def __init__(self, obj, func): - self.im_self = obj - self.im_func = func +WeakMethod = getattr(weakref, "WeakMethod", None) - def __call__(self, *args, **kws): - if self.im_self is None: - return self.im_func(*args, **kws) - else: - return self.im_func(self.im_self, *args, **kws) +if WeakMethod is None: + class _WeakCallable: + def __init__(self, obj, func): + self.im_self = obj + self.im_func = func + + def __call__(self, *args, **kws): + if self.im_self is None: + return self.im_func(*args, **kws) + else: + return self.im_func(self.im_self, *args, **kws) -class WeakMethod: - """ Wraps a function or, more importantly, a bound method in - a way that allows a bound method's object to be GCed, while - providing the same interface as a normal weak reference. """ + class WeakMethod: + """ Wraps a function or, more importantly, a bound method in + a way that allows a bound method's object to be GCed, while + providing the same interface as a normal weak reference. """ - def __init__(self, fn): - try: - self._obj = weakref.ref(fn.im_self) - self._meth = fn.im_func - except AttributeError: - # It's not a bound method - self._obj = None - self._meth = fn + def __init__(self, fn): + try: + self._obj = weakref.ref(fn.im_self) + self._meth = fn.im_func + except AttributeError: + # It's not a bound method + self._obj = None + self._meth = fn - def __call__(self): - if self._dead(): - return None - return _weak_callable(self._getobj(), self._meth) + def __call__(self): + if self._dead(): + return None + return _WeakCallable(self._getobj(), self._meth) - def _dead(self): - return self._obj is not None and self._obj() is None + def _dead(self): + return self._obj is not None and self._obj() is None - def _getobj(self): - if self._obj is None: - return None - return self._obj() + def _getobj(self): + if self._obj is None: + return None + return self._obj() diff --git a/openpype/lib/python_module_tools.py b/openpype/lib/python_module_tools.py index 6fad3b547f..a10263f991 100644 --- a/openpype/lib/python_module_tools.py +++ b/openpype/lib/python_module_tools.py @@ -28,6 +28,7 @@ def import_filepath(filepath, module_name=None): # Prepare module object where content of file will be parsed module = types.ModuleType(module_name) + module.__file__ = filepath if six.PY3: # Use loader so module has full specs @@ -41,7 +42,6 @@ def import_filepath(filepath, module_name=None): # Execute content and store it to module object six.exec_(_stream.read(), module.__dict__) - module.__file__ = filepath return module @@ -230,3 +230,70 @@ def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None): dirpath, folder_name, dst_module_name ) return module + + +def is_func_signature_supported(func, *args, **kwargs): + """Check if a function signature supports passed args and kwargs. + + This check does not actually call the function, just look if function can + be called with the arguments. + + Notes: + This does NOT check if the function would work with passed arguments + only if they can be passed in. If function have *args, **kwargs + in paramaters, this will always return 'True'. + + Example: + >>> def my_function(my_number): + ... return my_number + 1 + ... + >>> is_func_signature_supported(my_function, 1) + True + >>> is_func_signature_supported(my_function, 1, 2) + False + >>> is_func_signature_supported(my_function, my_number=1) + True + >>> is_func_signature_supported(my_function, number=1) + False + >>> is_func_signature_supported(my_function, "string") + True + >>> def my_other_function(*args, **kwargs): + ... my_function(*args, **kwargs) + ... + >>> is_func_signature_supported( + ... my_other_function, + ... "string", + ... 1, + ... other=None + ... ) + True + + Args: + func (function): A function where the signature should be tested. + *args (tuple[Any]): Positional arguments for function signature. + **kwargs (dict[str, Any]): Keyword arguments for function signature. + + Returns: + bool: Function can pass in arguments. + """ + + if hasattr(inspect, "signature"): + # Python 3 using 'Signature' object where we try to bind arg + # or kwarg. Using signature is recommended approach based on + # documentation. + sig = inspect.signature(func) + try: + sig.bind(*args, **kwargs) + return True + except TypeError: + pass + + else: + # In Python 2 'signature' is not available so 'getcallargs' is used + # - 'getcallargs' is marked as deprecated since Python 3.0 + try: + inspect.getcallargs(func, *args, **kwargs) + return True + except TypeError: + pass + return False diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 57279d0380..de6495900e 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -5,6 +5,7 @@ import json import collections import tempfile import subprocess +import platform import xml.etree.ElementTree @@ -50,7 +51,7 @@ IMAGE_EXTENSIONS = { ".jng", ".jpeg", ".jpeg-ls", ".jpeg", ".2000", ".jpg", ".xr", ".jpeg", ".xt", ".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd", ".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf", - ".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras", + ".pictor", ".png", ".psd", ".psb", ".psp", ".qtvr", ".ras", ".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep", ".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf", ".xpm", ".xwd" @@ -539,7 +540,7 @@ def convert_for_ffmpeg( continue # Remove attributes that have string value longer than allowed length - # for ffmpeg or when containt unallowed symbols + # for ffmpeg or when contain unallowed symbols erase_reason = "Missing reason" erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: @@ -679,7 +680,7 @@ def convert_input_paths_for_ffmpeg( continue # Remove attributes that have string value longer than allowed - # length for ffmpeg or when containt unallowed symbols + # length for ffmpeg or when containing unallowed symbols erase_reason = "Missing reason" erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: @@ -745,11 +746,18 @@ def get_ffprobe_data(path_to_file, logger=None): logger.debug("FFprobe command: {}".format( subprocess.list2cmdline(args) )) - popen = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + kwargs = { + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + } + if platform.system().lower() == "windows": + kwargs["creationflags"] = ( + subprocess.CREATE_NEW_PROCESS_GROUP + | getattr(subprocess, "DETACHED_PROCESS", 0) + | getattr(subprocess, "CREATE_NO_WINDOW", 0) + ) + + popen = subprocess.Popen(args, **kwargs) popen_stdout, popen_stderr = popen.communicate() if popen_stdout: @@ -960,7 +968,7 @@ def _ffmpeg_dnxhd_codec_args(stream_data, source_ffmpeg_cmd): if source_ffmpeg_cmd: # Define bitrate arguments bit_rate_args = ("-b:v", "-vb",) - # Seprate the two variables in case something else should be copied + # Separate the two variables in case something else should be copied # from source command copy_args = [] copy_args.extend(bit_rate_args) @@ -1037,3 +1045,90 @@ def convert_ffprobe_fps_to_float(value): if divisor == 0.0: return 0.0 return dividend / divisor + + +def convert_colorspace( + input_path, + output_path, + config_path, + source_colorspace, + target_colorspace=None, + view=None, + display=None, + additional_command_args=None, + logger=None +): + """Convert source file from one color space to another. + + Args: + input_path (str): Path that should be converted. It is expected that + contains single file or image sequence of same type + (sequence in format 'file.FRAMESTART-FRAMEEND#.ext', see oiio docs, + eg `big.1-3#.tif`) + output_path (str): Path to output filename. + (must follow format of 'input_path', eg. single file or + sequence in 'file.FRAMESTART-FRAMEEND#.ext', `output.1-3#.tif`) + config_path (str): path to OCIO config file + source_colorspace (str): ocio valid color space of source files + target_colorspace (str): ocio valid target color space + if filled, 'view' and 'display' must be empty + view (str): name for viewer space (ocio valid) + both 'view' and 'display' must be filled (if 'target_colorspace') + display (str): name for display-referred reference space (ocio valid) + additional_command_args (list): arguments for oiiotool (like binary + depth for .dpx) + logger (logging.Logger): Logger used for logging. + Raises: + ValueError: if misconfigured + """ + if logger is None: + logger = logging.getLogger(__name__) + + oiio_cmd = [ + get_oiio_tools_path(), + input_path, + # Don't add any additional attributes + "--nosoftwareattrib", + "--colorconfig", config_path + ] + + if all([target_colorspace, view, display]): + raise ValueError("Colorspace and both screen and display" + " cannot be set together." + "Choose colorspace or screen and display") + if not target_colorspace and not all([view, display]): + raise ValueError("Both screen and display must be set.") + + if additional_command_args: + oiio_cmd.extend(additional_command_args) + + if target_colorspace: + oiio_cmd.extend(["--colorconvert", + source_colorspace, + target_colorspace]) + if view and display: + oiio_cmd.extend(["--iscolorspace", source_colorspace]) + oiio_cmd.extend(["--ociodisplay", display, view]) + + oiio_cmd.extend(["-o", output_path]) + + logger.debug("Conversion command: {}".format(" ".join(oiio_cmd))) + run_subprocess(oiio_cmd, logger=logger) + + +def split_cmd_args(in_args): + """Makes sure all entered arguments are separated in individual items. + + Split each argument string with " -" to identify if string contains + one or more arguments. + Args: + in_args (list): of arguments ['-n', '-d uint10'] + Returns + (list): ['-n', '-d', 'unint10'] + """ + splitted_args = [] + for arg in in_args: + if not arg.strip(): + continue + splitted_args.extend(arg.split(" ")) + return splitted_args diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 20703ee308..5ef1d38f87 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -327,7 +327,8 @@ def get_usd_master_path(asset, subset, representation): else: asset_doc = get_asset_by_name(project_name, asset, fields=["name"]) - formatted_result = anatomy.format( + template_obj = anatomy.templates_obj["publish"]["path"] + path = template_obj.format_strict( { "project": { "name": project_name, @@ -340,7 +341,6 @@ def get_usd_master_path(asset, subset, representation): } ) - path = formatted_result["publish"]["path"] # Remove the version folder subset_folder = os.path.dirname(os.path.dirname(path)) master_folder = os.path.join(subset_folder, "master") diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index b6797dbba0..f27c78d486 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -224,18 +224,26 @@ def find_tool_in_custom_paths(paths, tool, validation_func=None): def _check_args_returncode(args): try: - # Python 2 compatibility where DEVNULL is not available + kwargs = {} + if platform.system().lower() == "windows": + kwargs["creationflags"] = ( + subprocess.CREATE_NEW_PROCESS_GROUP + | getattr(subprocess, "DETACHED_PROCESS", 0) + | getattr(subprocess, "CREATE_NO_WINDOW", 0) + ) + if hasattr(subprocess, "DEVNULL"): proc = subprocess.Popen( args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, + **kwargs ) proc.wait() else: with open(os.devnull, "w") as devnull: proc = subprocess.Popen( - args, stdout=devnull, stderr=devnull, + args, stdout=devnull, stderr=devnull, **kwargs ) proc.wait() @@ -252,7 +260,7 @@ def _oiio_executable_validation(filepath): that it can be executed. For that is used '--help' argument which is fast and does not need any other inputs. - Any possible crash of missing libraries or invalid build should be catched. + Any possible crash of missing libraries or invalid build should be caught. Main reason is to validate if executable can be executed on OS just running which can be issue ob linux machines. @@ -321,7 +329,7 @@ def _ffmpeg_executable_validation(filepath): that it can be executed. For that is used '-version' argument which is fast and does not need any other inputs. - Any possible crash of missing libraries or invalid build should be catched. + Any possible crash of missing libraries or invalid build should be caught. Main reason is to validate if executable can be executed on OS just running which can be issue ob linux machines. @@ -375,7 +383,7 @@ def get_ffmpeg_tool_path(tool="ffmpeg"): # Look to PATH for the tool if not tool_executable_path: from_path = find_executable(tool) - if from_path and _oiio_executable_validation(from_path): + if from_path and _ffmpeg_executable_validation(from_path): tool_executable_path = from_path CachedToolPaths.cache_executable_path(tool, tool_executable_path) diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 0fd21492e8..732525b6eb 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -311,6 +311,7 @@ def _load_modules(): # Look for OpenPype modules in paths defined with `get_module_dirs` # - dynamically imported OpenPype modules and addons module_dirs = get_module_dirs() + # Add current directory at first place # - has small differences in import logic current_dir = os.path.abspath(os.path.dirname(__file__)) @@ -318,8 +319,11 @@ def _load_modules(): module_dirs.insert(0, hosts_dir) module_dirs.insert(0, current_dir) + addons_dir = os.path.join(os.path.dirname(current_dir), "addons") + module_dirs.append(addons_dir) + processed_paths = set() - for dirpath in module_dirs: + for dirpath in frozenset(module_dirs): # Skip already processed paths if dirpath in processed_paths: continue @@ -472,7 +476,7 @@ class OpenPypeModule: Args: application (Application): Application that is launched. - env (dict): Current environemnt variables. + env (dict): Current environment variables. """ pass @@ -622,7 +626,7 @@ class ModulesManager: # Check if class is abstract (Developing purpose) if inspect.isabstract(modules_item): - # Find missing implementations by convetion on `abc` module + # Find abstract attributes by convention on `abc` module not_implemented = [] for attr_name in dir(modules_item): attr = getattr(modules_item, attr_name, None) @@ -708,13 +712,13 @@ class ModulesManager: ] def collect_global_environments(self): - """Helper to collect global enviornment variabled from modules. + """Helper to collect global environment variabled from modules. Returns: dict: Global environment variables from enabled modules. Raises: - AssertionError: Gobal environment variables must be unique for + AssertionError: Global environment variables must be unique for all modules. """ module_envs = {} @@ -1174,7 +1178,7 @@ class TrayModulesManager(ModulesManager): def get_module_settings_defs(): - """Check loaded addons/modules for existence of thei settings definition. + """Check loaded addons/modules for existence of their settings definition. Check if OpenPype addon/module as python module has class that inherit from `ModuleSettingsDef` in python module variables (imported @@ -1204,7 +1208,7 @@ def get_module_settings_defs(): continue if inspect.isabstract(attr): - # Find missing implementations by convetion on `abc` module + # Find missing implementations by convention on `abc` module not_implemented = [] for attr_name in dir(attr): attr = getattr(attr, attr_name, None) @@ -1293,7 +1297,7 @@ class BaseModuleSettingsDef: class ModuleSettingsDef(BaseModuleSettingsDef): - """Settings definiton with separated system and procect settings parts. + """Settings definition with separated system and procect settings parts. Reduce conditions that must be checked and adds predefined methods for each case. diff --git a/openpype/modules/clockify/clockify_api.py b/openpype/modules/clockify/clockify_api.py index 6af911fffc..47af002f7a 100644 --- a/openpype/modules/clockify/clockify_api.py +++ b/openpype/modules/clockify/clockify_api.py @@ -6,34 +6,22 @@ import datetime import requests from .constants import ( CLOCKIFY_ENDPOINT, - ADMIN_PERMISSION_NAMES + ADMIN_PERMISSION_NAMES, ) from openpype.lib.local_settings import OpenPypeSecureRegistry - - -def time_check(obj): - if obj.request_counter < 10: - obj.request_counter += 1 - return - - wait_time = 1 - (time.time() - obj.request_time) - if wait_time > 0: - time.sleep(wait_time) - - obj.request_time = time.time() - obj.request_counter = 0 +from openpype.lib import Logger class ClockifyAPI: + log = Logger.get_logger(__name__) + def __init__(self, api_key=None, master_parent=None): self.workspace_name = None - self.workspace_id = None self.master_parent = master_parent self.api_key = api_key - self.request_counter = 0 - self.request_time = time.time() - + self._workspace_id = None + self._user_id = None self._secure_registry = None @property @@ -44,11 +32,19 @@ class ClockifyAPI: @property def headers(self): - return {"X-Api-Key": self.api_key} + return {"x-api-key": self.api_key} + + @property + def workspace_id(self): + return self._workspace_id + + @property + def user_id(self): + return self._user_id def verify_api(self): for key, value in self.headers.items(): - if value is None or value.strip() == '': + if value is None or value.strip() == "": return False return True @@ -59,65 +55,55 @@ class ClockifyAPI: if api_key is not None and self.validate_api_key(api_key) is True: self.api_key = api_key self.set_workspace() + self.set_user_id() if self.master_parent: self.master_parent.signed_in() return True return False def validate_api_key(self, api_key): - test_headers = {'X-Api-Key': api_key} - action_url = 'workspaces/' - time_check(self) + test_headers = {"x-api-key": api_key} + action_url = "user" response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=test_headers + CLOCKIFY_ENDPOINT + action_url, headers=test_headers ) if response.status_code != 200: return False return True - def validate_workspace_perm(self, workspace_id=None): - user_id = self.get_user_id() + def validate_workspace_permissions(self, workspace_id=None, user_id=None): if user_id is None: + self.log.info("No user_id found during validation") return False if workspace_id is None: workspace_id = self.workspace_id - action_url = "/workspaces/{}/users/{}/permissions".format( - workspace_id, user_id - ) - time_check(self) + action_url = f"workspaces/{workspace_id}/users?includeRoles=1" response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) - user_permissions = response.json() - for perm in user_permissions: - if perm['name'] in ADMIN_PERMISSION_NAMES: + data = response.json() + for user in data: + if user.get("id") == user_id: + roles_data = user.get("roles") + for entities in roles_data: + if entities.get("role") in ADMIN_PERMISSION_NAMES: return True return False def get_user_id(self): - action_url = 'v1/user/' - time_check(self) + action_url = "user" response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) - # this regex is neccessary: UNICODE strings are crashing - # during json serialization - id_regex = '\"{1}id\"{1}\:{1}\"{1}\w+\"{1}' - result = re.findall(id_regex, str(response.content)) - if len(result) != 1: - # replace with log and better message? - print('User ID was not found (this is a BUG!!!)') - return None - return json.loads('{'+result[0]+'}')['id'] + result = response.json() + user_id = result.get("id", None) + + return user_id def set_workspace(self, name=None): if name is None: - name = os.environ.get('CLOCKIFY_WORKSPACE', None) + name = os.environ.get("CLOCKIFY_WORKSPACE", None) self.workspace_name = name - self.workspace_id = None if self.workspace_name is None: return try: @@ -125,7 +111,7 @@ class ClockifyAPI: except Exception: result = False if result is not False: - self.workspace_id = result + self._workspace_id = result if self.master_parent is not None: self.master_parent.start_timer_check() return True @@ -139,6 +125,14 @@ class ClockifyAPI: return all_workspaces[name] return False + def set_user_id(self): + try: + user_id = self.get_user_id() + except Exception: + user_id = None + if user_id is not None: + self._user_id = user_id + def get_api_key(self): return self.secure_registry.get_item("api_key", None) @@ -146,11 +140,9 @@ class ClockifyAPI: self.secure_registry.set_item("api_key", api_key) def get_workspaces(self): - action_url = 'workspaces/' - time_check(self) + action_url = "workspaces/" response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) return { workspace["name"]: workspace["id"] for workspace in response.json() @@ -159,27 +151,22 @@ class ClockifyAPI: def get_projects(self, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/projects/'.format(workspace_id) - time_check(self) + action_url = f"workspaces/{workspace_id}/projects" response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) - - return { - project["name"]: project["id"] for project in response.json() - } + if response.status_code != 403: + result = response.json() + return {project["name"]: project["id"] for project in result} def get_project_by_id(self, project_id, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/projects/{}/'.format( + action_url = "workspaces/{}/projects/{}".format( workspace_id, project_id ) - time_check(self) response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) return response.json() @@ -187,32 +174,24 @@ class ClockifyAPI: def get_tags(self, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/tags/'.format(workspace_id) - time_check(self) + action_url = "workspaces/{}/tags".format(workspace_id) response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) - return { - tag["name"]: tag["id"] for tag in response.json() - } + return {tag["name"]: tag["id"] for tag in response.json()} def get_tasks(self, project_id, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/projects/{}/tasks/'.format( + action_url = "workspaces/{}/projects/{}/tasks".format( workspace_id, project_id ) - time_check(self) response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) - return { - task["name"]: task["id"] for task in response.json() - } + return {task["name"]: task["id"] for task in response.json()} def get_workspace_id(self, workspace_name): all_workspaces = self.get_workspaces() @@ -236,48 +215,64 @@ class ClockifyAPI: return None return all_tasks[tag_name] - def get_task_id( - self, task_name, project_id, workspace_id=None - ): + def get_task_id(self, task_name, project_id, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - all_tasks = self.get_tasks( - project_id, workspace_id - ) + all_tasks = self.get_tasks(project_id, workspace_id) if task_name not in all_tasks: return None return all_tasks[task_name] def get_current_time(self): - return str(datetime.datetime.utcnow().isoformat())+'Z' + return str(datetime.datetime.utcnow().isoformat()) + "Z" def start_time_entry( - self, description, project_id, task_id=None, tag_ids=[], - workspace_id=None, billable=True + self, + description, + project_id, + task_id=None, + tag_ids=None, + workspace_id=None, + user_id=None, + billable=True, ): # Workspace if workspace_id is None: workspace_id = self.workspace_id + # User ID + if user_id is None: + user_id = self._user_id + + # get running timer to check if we need to start it + current_timer = self.get_in_progress() # Check if is currently run another times and has same values - current = self.get_in_progress(workspace_id) - if current is not None: + # DO not restart the timer, if it is already running for current task + if current_timer: + current_timer_hierarchy = current_timer.get("description") + current_project_id = current_timer.get("projectId") + current_task_id = current_timer.get("taskId") if ( - current.get("description", None) == description and - current.get("projectId", None) == project_id and - current.get("taskId", None) == task_id + description == current_timer_hierarchy + and project_id == current_project_id + and task_id == current_task_id ): + self.log.info( + "Timer for the current project is already running" + ) self.bool_timer_run = True return self.bool_timer_run - self.finish_time_entry(workspace_id) + self.finish_time_entry() # Convert billable to strings if billable: - billable = 'true' + billable = "true" else: - billable = 'false' + billable = "false" # Rest API Action - action_url = 'workspaces/{}/timeEntries/'.format(workspace_id) + action_url = "workspaces/{}/user/{}/time-entries".format( + workspace_id, user_id + ) start = self.get_current_time() body = { "start": start, @@ -285,169 +280,135 @@ class ClockifyAPI: "description": description, "projectId": project_id, "taskId": task_id, - "tagIds": tag_ids + "tagIds": tag_ids, } - time_check(self) response = requests.post( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers, - json=body + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) - - success = False if response.status_code < 300: - success = True - return success + return True + return False - def get_in_progress(self, workspace_id=None): - if workspace_id is None: - workspace_id = self.workspace_id - action_url = 'workspaces/{}/timeEntries/inProgress'.format( - workspace_id - ) - time_check(self) - response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers - ) + def _get_current_timer_values(self, response): + if response is None: + return try: output = response.json() except json.decoder.JSONDecodeError: - output = None - return output + return None + if output and isinstance(output, list): + return output[0] + return None - def finish_time_entry(self, workspace_id=None): + def get_in_progress(self, user_id=None, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - current = self.get_in_progress(workspace_id) - if current is None: - return + if user_id is None: + user_id = self.user_id - current_id = current["id"] - action_url = 'workspaces/{}/timeEntries/{}'.format( - workspace_id, current_id + action_url = ( + f"workspaces/{workspace_id}/user/" + f"{user_id}/time-entries?in-progress=1" ) - body = { - "start": current["timeInterval"]["start"], - "billable": current["billable"], - "description": current["description"], - "projectId": current["projectId"], - "taskId": current["taskId"], - "tagIds": current["tagIds"], - "end": self.get_current_time() - } - time_check(self) - response = requests.put( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers, - json=body + response = requests.get( + CLOCKIFY_ENDPOINT + action_url, headers=self.headers + ) + return self._get_current_timer_values(response) + + def finish_time_entry(self, workspace_id=None, user_id=None): + if workspace_id is None: + workspace_id = self.workspace_id + if user_id is None: + user_id = self.user_id + current_timer = self.get_in_progress() + if not current_timer: + return + action_url = "workspaces/{}/user/{}/time-entries".format( + workspace_id, user_id + ) + body = {"end": self.get_current_time()} + response = requests.patch( + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) return response.json() - def get_time_entries( - self, workspace_id=None, quantity=10 - ): + def get_time_entries(self, workspace_id=None, user_id=None, quantity=10): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/timeEntries/'.format(workspace_id) - time_check(self) + if user_id is None: + user_id = self.user_id + action_url = "workspaces/{}/user/{}/time-entries".format( + workspace_id, user_id + ) response = requests.get( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) return response.json()[:quantity] - def remove_time_entry(self, tid, workspace_id=None): + def remove_time_entry(self, tid, workspace_id=None, user_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/timeEntries/{}'.format( - workspace_id, tid + action_url = "workspaces/{}/user/{}/time-entries/{}".format( + workspace_id, user_id, tid ) - time_check(self) response = requests.delete( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers + CLOCKIFY_ENDPOINT + action_url, headers=self.headers ) return response.json() def add_project(self, name, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/projects/'.format(workspace_id) + action_url = "workspaces/{}/projects".format(workspace_id) body = { "name": name, "clientId": "", "isPublic": "false", - "estimate": { - "estimate": 0, - "type": "AUTO" - }, + "estimate": {"estimate": 0, "type": "AUTO"}, "color": "#f44336", - "billable": "true" + "billable": "true", } - time_check(self) response = requests.post( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers, - json=body + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) return response.json() def add_workspace(self, name): - action_url = 'workspaces/' + action_url = "workspaces/" body = {"name": name} - time_check(self) response = requests.post( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers, - json=body + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) return response.json() - def add_task( - self, name, project_id, workspace_id=None - ): + def add_task(self, name, project_id, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/projects/{}/tasks/'.format( + action_url = "workspaces/{}/projects/{}/tasks".format( workspace_id, project_id ) - body = { - "name": name, - "projectId": project_id - } - time_check(self) + body = {"name": name, "projectId": project_id} response = requests.post( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers, - json=body + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) return response.json() def add_tag(self, name, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = 'workspaces/{}/tags'.format(workspace_id) - body = { - "name": name - } - time_check(self) + action_url = "workspaces/{}/tags".format(workspace_id) + body = {"name": name} response = requests.post( - CLOCKIFY_ENDPOINT + action_url, - headers=self.headers, - json=body + CLOCKIFY_ENDPOINT + action_url, headers=self.headers, json=body ) return response.json() - def delete_project( - self, project_id, workspace_id=None - ): + def delete_project(self, project_id, workspace_id=None): if workspace_id is None: workspace_id = self.workspace_id - action_url = '/workspaces/{}/projects/{}'.format( + action_url = "/workspaces/{}/projects/{}".format( workspace_id, project_id ) - time_check(self) response = requests.delete( CLOCKIFY_ENDPOINT + action_url, headers=self.headers, @@ -455,12 +416,12 @@ class ClockifyAPI: return response.json() def convert_input( - self, entity_id, entity_name, mode='Workspace', project_id=None + self, entity_id, entity_name, mode="Workspace", project_id=None ): if entity_id is None: error = False error_msg = 'Missing information "{}"' - if mode.lower() == 'workspace': + if mode.lower() == "workspace": if entity_id is None and entity_name is None: if self.workspace_id is not None: entity_id = self.workspace_id @@ -471,14 +432,14 @@ class ClockifyAPI: else: if entity_id is None and entity_name is None: error = True - elif mode.lower() == 'project': + elif mode.lower() == "project": entity_id = self.get_project_id(entity_name) - elif mode.lower() == 'task': + elif mode.lower() == "task": entity_id = self.get_task_id( task_name=entity_name, project_id=project_id ) else: - raise TypeError('Unknown type') + raise TypeError("Unknown type") # Raise error if error: raise ValueError(error_msg.format(mode)) diff --git a/openpype/modules/clockify/clockify_module.py b/openpype/modules/clockify/clockify_module.py index 300d5576e2..b6efec7907 100644 --- a/openpype/modules/clockify/clockify_module.py +++ b/openpype/modules/clockify/clockify_module.py @@ -2,24 +2,13 @@ import os import threading import time -from openpype.modules import ( - OpenPypeModule, - ITrayModule, - IPluginPaths -) +from openpype.modules import OpenPypeModule, ITrayModule, IPluginPaths +from openpype.client import get_asset_by_name -from .clockify_api import ClockifyAPI -from .constants import ( - CLOCKIFY_FTRACK_USER_PATH, - CLOCKIFY_FTRACK_SERVER_PATH -) +from .constants import CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH -class ClockifyModule( - OpenPypeModule, - ITrayModule, - IPluginPaths -): +class ClockifyModule(OpenPypeModule, ITrayModule, IPluginPaths): name = "clockify" def initialize(self, modules_settings): @@ -33,18 +22,23 @@ class ClockifyModule( self.timer_manager = None self.MessageWidgetClass = None self.message_widget = None - - self.clockapi = ClockifyAPI(master_parent=self) + self._clockify_api = None # TimersManager attributes # - set `timers_manager_connector` only in `tray_init` self.timers_manager_connector = None self._timers_manager_module = None + @property + def clockify_api(self): + if self._clockify_api is None: + from .clockify_api import ClockifyAPI + + self._clockify_api = ClockifyAPI(master_parent=self) + return self._clockify_api + def get_global_environments(self): - return { - "CLOCKIFY_WORKSPACE": self.workspace_name - } + return {"CLOCKIFY_WORKSPACE": self.workspace_name} def tray_init(self): from .widgets import ClockifySettings, MessageWidget @@ -52,7 +46,7 @@ class ClockifyModule( self.MessageWidgetClass = MessageWidget self.message_widget = None - self.widget_settings = ClockifySettings(self.clockapi) + self.widget_settings = ClockifySettings(self.clockify_api) self.widget_settings_required = None self.thread_timer_check = None @@ -61,7 +55,7 @@ class ClockifyModule( self.bool_api_key_set = False self.bool_workspace_set = False self.bool_timer_run = False - self.bool_api_key_set = self.clockapi.set_api() + self.bool_api_key_set = self.clockify_api.set_api() # Define itself as TimersManager connector self.timers_manager_connector = self @@ -71,37 +65,32 @@ class ClockifyModule( self.show_settings() return - self.bool_workspace_set = self.clockapi.workspace_id is not None + self.bool_workspace_set = self.clockify_api.workspace_id is not None if self.bool_workspace_set is False: return self.start_timer_check() - self.set_menu_visibility() def tray_exit(self, *_a, **_kw): return def get_plugin_paths(self): - """Implementaton of IPluginPaths to get plugin paths.""" + """Implementation of IPluginPaths to get plugin paths.""" actions_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "launcher_actions" + os.path.dirname(os.path.abspath(__file__)), "launcher_actions" ) - return { - "actions": [actions_path] - } + return {"actions": [actions_path]} def get_ftrack_event_handler_paths(self): """Function for Ftrack module to add ftrack event handler paths.""" return { "user": [CLOCKIFY_FTRACK_USER_PATH], - "server": [CLOCKIFY_FTRACK_SERVER_PATH] + "server": [CLOCKIFY_FTRACK_SERVER_PATH], } def clockify_timer_stopped(self): self.bool_timer_run = False - # Call `ITimersManager` method self.timer_stopped() def start_timer_check(self): @@ -122,45 +111,44 @@ class ClockifyModule( def check_running(self): while self.bool_thread_check_running is True: bool_timer_run = False - if self.clockapi.get_in_progress() is not None: + if self.clockify_api.get_in_progress() is not None: bool_timer_run = True if self.bool_timer_run != bool_timer_run: if self.bool_timer_run is True: self.clockify_timer_stopped() elif self.bool_timer_run is False: - actual_timer = self.clockapi.get_in_progress() - if not actual_timer: + current_timer = self.clockify_api.get_in_progress() + if current_timer is None: + continue + current_proj_id = current_timer.get("projectId") + if not current_proj_id: continue - actual_proj_id = actual_timer["projectId"] - if not actual_proj_id: - continue - - project = self.clockapi.get_project_by_id(actual_proj_id) + project = self.clockify_api.get_project_by_id( + current_proj_id + ) if project and project.get("code") == 501: continue - project_name = project["name"] + project_name = project.get("name") - actual_timer_hierarchy = actual_timer["description"] - hierarchy_items = actual_timer_hierarchy.split("/") + current_timer_hierarchy = current_timer.get("description") + if not current_timer_hierarchy: + continue + hierarchy_items = current_timer_hierarchy.split("/") # Each pype timer must have at least 2 items! if len(hierarchy_items) < 2: continue + task_name = hierarchy_items[-1] hierarchy = hierarchy_items[:-1] - task_type = None - if len(actual_timer.get("tags", [])) > 0: - task_type = actual_timer["tags"][0].get("name") data = { "task_name": task_name, "hierarchy": hierarchy, "project_name": project_name, - "task_type": task_type } - # Call `ITimersManager` method self.timer_started(data) self.bool_timer_run = bool_timer_run @@ -184,6 +172,7 @@ class ClockifyModule( def tray_menu(self, parent_menu): # Menu for Tray App from qtpy import QtWidgets + menu = QtWidgets.QMenu("Clockify", parent_menu) menu.setProperty("submenu", "on") @@ -204,7 +193,9 @@ class ClockifyModule( parent_menu.addMenu(menu) def show_settings(self): - self.widget_settings.input_api_key.setText(self.clockapi.get_api_key()) + self.widget_settings.input_api_key.setText( + self.clockify_api.get_api_key() + ) self.widget_settings.show() def set_menu_visibility(self): @@ -218,72 +209,82 @@ class ClockifyModule( def timer_started(self, data): """Tell TimersManager that timer started.""" if self._timers_manager_module is not None: - self._timers_manager_module.timer_started(self._module.id, data) + self._timers_manager_module.timer_started(self.id, data) def timer_stopped(self): """Tell TimersManager that timer stopped.""" if self._timers_manager_module is not None: - self._timers_manager_module.timer_stopped(self._module.id) + self._timers_manager_module.timer_stopped(self.id) def stop_timer(self): """Called from TimersManager to stop timer.""" - self.clockapi.finish_time_entry() + self.clockify_api.finish_time_entry() - def start_timer(self, input_data): - """Called from TimersManager to start timer.""" - # If not api key is not entered then skip - if not self.clockapi.get_api_key(): - return - - actual_timer = self.clockapi.get_in_progress() - actual_timer_hierarchy = None - actual_project_id = None - if actual_timer is not None: - actual_timer_hierarchy = actual_timer.get("description") - actual_project_id = actual_timer.get("projectId") - - # Concatenate hierarchy and task to get description - desc_items = [val for val in input_data.get("hierarchy", [])] - desc_items.append(input_data["task_name"]) - description = "/".join(desc_items) - - # Check project existence - project_name = input_data["project_name"] - project_id = self.clockapi.get_project_id(project_name) + def _verify_project_exists(self, project_name): + project_id = self.clockify_api.get_project_id(project_name) if not project_id: - self.log.warning(( - "Project \"{}\" was not found in Clockify. Timer won't start." - ).format(project_name)) + self.log.warning( + 'Project "{}" was not found in Clockify. Timer won\'t start.' + ).format(project_name) if not self.MessageWidgetClass: return msg = ( - "Project \"{}\" is not" - " in Clockify Workspace \"{}\"." + 'Project "{}" is not' + ' in Clockify Workspace "{}".' "

Please inform your Project Manager." - ).format(project_name, str(self.clockapi.workspace_name)) + ).format(project_name, str(self.clockify_api.workspace_name)) self.message_widget = self.MessageWidgetClass( msg, "Clockify - Info Message" ) self.message_widget.closed.connect(self.on_message_widget_close) self.message_widget.show() + return False + return project_id + def start_timer(self, input_data): + """Called from TimersManager to start timer.""" + # If not api key is not entered then skip + if not self.clockify_api.get_api_key(): return - if ( - actual_timer is not None and - description == actual_timer_hierarchy and - project_id == actual_project_id - ): + task_name = input_data.get("task_name") + + # Concatenate hierarchy and task to get description + description_items = list(input_data.get("hierarchy", [])) + description_items.append(task_name) + description = "/".join(description_items) + + # Check project existence + project_name = input_data.get("project_name") + project_id = self._verify_project_exists(project_name) + if not project_id: return + # Setup timer tags tag_ids = [] - task_tag_id = self.clockapi.get_tag_id(input_data["task_type"]) + tag_name = input_data.get("task_type") + if not tag_name: + # no task_type found in the input data + # if the timer is restarted by idle time (bug?) + asset_name = input_data["hierarchy"][-1] + asset_doc = get_asset_by_name(project_name, asset_name) + task_info = asset_doc["data"]["tasks"][task_name] + tag_name = task_info.get("type", "") + if not tag_name: + self.log.info("No tag information found for the timer") + + task_tag_id = self.clockify_api.get_tag_id(tag_name) if task_tag_id is not None: tag_ids.append(task_tag_id) - self.clockapi.start_time_entry( - description, project_id, tag_ids=tag_ids + # Start timer + self.clockify_api.start_time_entry( + description, + project_id, + tag_ids=tag_ids, + workspace_id=self.clockify_api.workspace_id, + user_id=self.clockify_api.user_id, ) diff --git a/openpype/modules/clockify/constants.py b/openpype/modules/clockify/constants.py index 66f6cb899a..4574f91be1 100644 --- a/openpype/modules/clockify/constants.py +++ b/openpype/modules/clockify/constants.py @@ -9,4 +9,4 @@ CLOCKIFY_FTRACK_USER_PATH = os.path.join( ) ADMIN_PERMISSION_NAMES = ["WORKSPACE_OWN", "WORKSPACE_ADMIN"] -CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/" +CLOCKIFY_ENDPOINT = "https://api.clockify.me/api/v1/" diff --git a/openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py b/openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py index c6b55947da..985cf49b97 100644 --- a/openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py +++ b/openpype/modules/clockify/ftrack/server/action_clockify_sync_server.py @@ -4,7 +4,7 @@ from openpype_modules.ftrack.lib import ServerAction from openpype_modules.clockify.clockify_api import ClockifyAPI -class SyncClocifyServer(ServerAction): +class SyncClockifyServer(ServerAction): '''Synchronise project names and task types.''' identifier = "clockify.sync.server" @@ -14,12 +14,12 @@ class SyncClocifyServer(ServerAction): role_list = ["Pypeclub", "Administrator", "project Manager"] def __init__(self, *args, **kwargs): - super(SyncClocifyServer, self).__init__(*args, **kwargs) + super(SyncClockifyServer, self).__init__(*args, **kwargs) workspace_name = os.environ.get("CLOCKIFY_WORKSPACE") api_key = os.environ.get("CLOCKIFY_API_KEY") - self.clockapi = ClockifyAPI(api_key) - self.clockapi.set_workspace(workspace_name) + self.clockify_api = ClockifyAPI(api_key) + self.clockify_api.set_workspace(workspace_name) if api_key is None: modified_key = "None" else: @@ -48,13 +48,16 @@ class SyncClocifyServer(ServerAction): return True def launch(self, session, entities, event): - if self.clockapi.workspace_id is None: + self.clockify_api.set_api() + if self.clockify_api.workspace_id is None: return { "success": False, "message": "Clockify Workspace or API key are not set!" } - if self.clockapi.validate_workspace_perm() is False: + if not self.clockify_api.validate_workspace_permissions( + self.clockify_api.workspace_id, self.clockify_api.user_id + ): return { "success": False, "message": "Missing permissions for this action!" @@ -88,9 +91,9 @@ class SyncClocifyServer(ServerAction): task_type["name"] for task_type in task_types ] try: - clockify_projects = self.clockapi.get_projects() + clockify_projects = self.clockify_api.get_projects() if project_name not in clockify_projects: - response = self.clockapi.add_project(project_name) + response = self.clockify_api.add_project(project_name) if "id" not in response: self.log.warning( "Project \"{}\" can't be created. Response: {}".format( @@ -105,7 +108,7 @@ class SyncClocifyServer(ServerAction): ).format(project_name) } - clockify_workspace_tags = self.clockapi.get_tags() + clockify_workspace_tags = self.clockify_api.get_tags() for task_type_name in task_type_names: if task_type_name in clockify_workspace_tags: self.log.debug( @@ -113,7 +116,7 @@ class SyncClocifyServer(ServerAction): ) continue - response = self.clockapi.add_tag(task_type_name) + response = self.clockify_api.add_tag(task_type_name) if "id" not in response: self.log.warning( "Task \"{}\" can't be created. Response: {}".format( @@ -138,4 +141,4 @@ class SyncClocifyServer(ServerAction): def register(session, **kw): - SyncClocifyServer(session).register() + SyncClockifyServer(session).register() diff --git a/openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py b/openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py index a430791906..0e8cf6bd37 100644 --- a/openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py +++ b/openpype/modules/clockify/ftrack/user/action_clockify_sync_local.py @@ -3,7 +3,7 @@ from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.clockify.clockify_api import ClockifyAPI -class SyncClocifyLocal(BaseAction): +class SyncClockifyLocal(BaseAction): '''Synchronise project names and task types.''' #: Action identifier. @@ -18,9 +18,9 @@ class SyncClocifyLocal(BaseAction): icon = statics_icon("app_icons", "clockify-white.png") def __init__(self, *args, **kwargs): - super(SyncClocifyLocal, self).__init__(*args, **kwargs) + super(SyncClockifyLocal, self).__init__(*args, **kwargs) #: CLockifyApi - self.clockapi = ClockifyAPI() + self.clockify_api = ClockifyAPI() def discover(self, session, entities, event): if ( @@ -31,14 +31,18 @@ class SyncClocifyLocal(BaseAction): return False def launch(self, session, entities, event): - self.clockapi.set_api() - if self.clockapi.workspace_id is None: + self.clockify_api.set_api() + if self.clockify_api.workspace_id is None: return { "success": False, "message": "Clockify Workspace or API key are not set!" } - if self.clockapi.validate_workspace_perm() is False: + if ( + self.clockify_api.validate_workspace_permissions( + self.clockify_api.workspace_id, self.clockify_api.user_id) + is False + ): return { "success": False, "message": "Missing permissions for this action!" @@ -74,9 +78,9 @@ class SyncClocifyLocal(BaseAction): task_type["name"] for task_type in task_types ] try: - clockify_projects = self.clockapi.get_projects() + clockify_projects = self.clockify_api.get_projects() if project_name not in clockify_projects: - response = self.clockapi.add_project(project_name) + response = self.clockify_api.add_project(project_name) if "id" not in response: self.log.warning( "Project \"{}\" can't be created. Response: {}".format( @@ -91,7 +95,7 @@ class SyncClocifyLocal(BaseAction): ).format(project_name) } - clockify_workspace_tags = self.clockapi.get_tags() + clockify_workspace_tags = self.clockify_api.get_tags() for task_type_name in task_type_names: if task_type_name in clockify_workspace_tags: self.log.debug( @@ -99,7 +103,7 @@ class SyncClocifyLocal(BaseAction): ) continue - response = self.clockapi.add_tag(task_type_name) + response = self.clockify_api.add_tag(task_type_name) if "id" not in response: self.log.warning( "Task \"{}\" can't be created. Response: {}".format( @@ -121,4 +125,4 @@ class SyncClocifyLocal(BaseAction): def register(session, **kw): - SyncClocifyLocal(session).register() + SyncClockifyLocal(session).register() diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/clockify/launcher_actions/ClockifyStart.py index 7663aecc31..4a653c1b8d 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/openpype/modules/clockify/launcher_actions/ClockifyStart.py @@ -6,9 +6,9 @@ from openpype_modules.clockify.clockify_api import ClockifyAPI class ClockifyStart(LauncherAction): name = "clockify_start_timer" label = "Clockify - Start Timer" - icon = "clockify_icon" + icon = "app_icons/clockify.png" order = 500 - clockapi = ClockifyAPI() + clockify_api = ClockifyAPI() def is_compatible(self, session): """Return whether the action is compatible with the session""" @@ -17,23 +17,39 @@ class ClockifyStart(LauncherAction): return False def process(self, session, **kwargs): + self.clockify_api.set_api() + user_id = self.clockify_api.user_id + workspace_id = self.clockify_api.workspace_id project_name = session["AVALON_PROJECT"] asset_name = session["AVALON_ASSET"] task_name = session["AVALON_TASK"] - description = asset_name - asset_doc = get_asset_by_name( - project_name, asset_name, fields=["data.parents"] - ) - if asset_doc is not None: - desc_items = asset_doc.get("data", {}).get("parents", []) - desc_items.append(asset_name) - desc_items.append(task_name) - description = "/".join(desc_items) - project_id = self.clockapi.get_project_id(project_name) - tag_ids = [] - tag_ids.append(self.clockapi.get_tag_id(task_name)) - self.clockapi.start_time_entry( - description, project_id, tag_ids=tag_ids + # fetch asset docs + asset_doc = get_asset_by_name(project_name, asset_name) + + # get task type to fill the timer tag + task_info = asset_doc["data"]["tasks"][task_name] + task_type = task_info["type"] + + # check if the task has hierarchy and fill the + parents_data = asset_doc["data"] + if parents_data is not None: + description_items = parents_data.get("parents", []) + description_items.append(asset_name) + description_items.append(task_name) + description = "/".join(description_items) + + project_id = self.clockify_api.get_project_id( + project_name, workspace_id + ) + tag_ids = [] + tag_name = task_type + tag_ids.append(self.clockify_api.get_tag_id(tag_name, workspace_id)) + self.clockify_api.start_time_entry( + description, + project_id, + tag_ids=tag_ids, + workspace_id=workspace_id, + user_id=user_id, ) diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/clockify/launcher_actions/ClockifySync.py index c346a1b4f6..cbd2519a04 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/openpype/modules/clockify/launcher_actions/ClockifySync.py @@ -3,20 +3,39 @@ from openpype_modules.clockify.clockify_api import ClockifyAPI from openpype.pipeline import LauncherAction -class ClockifySync(LauncherAction): +class ClockifyPermissionsCheckFailed(Exception): + """Timer start failed due to user permissions check. + Message should be self explanatory as traceback won't be shown. + """ + pass + + +class ClockifySync(LauncherAction): name = "sync_to_clockify" label = "Sync to Clockify" - icon = "clockify_white_icon" + icon = "app_icons/clockify-white.png" order = 500 - clockapi = ClockifyAPI() - have_permissions = clockapi.validate_workspace_perm() + clockify_api = ClockifyAPI() def is_compatible(self, session): - """Return whether the action is compatible with the session""" - return self.have_permissions + """Check if there's some projects to sync""" + try: + next(get_projects()) + return True + except StopIteration: + return False def process(self, session, **kwargs): + self.clockify_api.set_api() + workspace_id = self.clockify_api.workspace_id + user_id = self.clockify_api.user_id + if not self.clockify_api.validate_workspace_permissions( + workspace_id, user_id + ): + raise ClockifyPermissionsCheckFailed( + "Current CLockify user is missing permissions for this action!" + ) project_name = session.get("AVALON_PROJECT") or "" projects_to_sync = [] @@ -30,24 +49,28 @@ class ClockifySync(LauncherAction): task_types = project["config"]["tasks"].keys() projects_info[project["name"]] = task_types - clockify_projects = self.clockapi.get_projects() + clockify_projects = self.clockify_api.get_projects(workspace_id) for project_name, task_types in projects_info.items(): if project_name in clockify_projects: continue - response = self.clockapi.add_project(project_name) + response = self.clockify_api.add_project( + project_name, workspace_id + ) if "id" not in response: - self.log.error("Project {} can't be created".format( - project_name - )) + self.log.error( + "Project {} can't be created".format(project_name) + ) continue - clockify_workspace_tags = self.clockapi.get_tags() + clockify_workspace_tags = self.clockify_api.get_tags(workspace_id) for task_type in task_types: if task_type not in clockify_workspace_tags: - response = self.clockapi.add_tag(task_type) + response = self.clockify_api.add_tag( + task_type, workspace_id + ) if "id" not in response: - self.log.error('Task {} can\'t be created'.format( - task_type - )) + self.log.error( + "Task {} can't be created".format(task_type) + ) continue diff --git a/openpype/modules/clockify/widgets.py b/openpype/modules/clockify/widgets.py index 122b6212c0..86e67569f2 100644 --- a/openpype/modules/clockify/widgets.py +++ b/openpype/modules/clockify/widgets.py @@ -34,7 +34,7 @@ class MessageWidget(QtWidgets.QWidget): def _ui_layout(self, messages): if not messages: - messages = ["*Misssing messages (This is a bug)*", ] + messages = ["*Missing messages (This is a bug)*", ] elif not isinstance(messages, (tuple, list)): messages = [messages, ] @@ -77,15 +77,15 @@ class MessageWidget(QtWidgets.QWidget): class ClockifySettings(QtWidgets.QWidget): - SIZE_W = 300 + SIZE_W = 500 SIZE_H = 130 loginSignal = QtCore.Signal(object, object, object) - def __init__(self, clockapi, optional=True): + def __init__(self, clockify_api, optional=True): super(ClockifySettings, self).__init__() - self.clockapi = clockapi + self.clockify_api = clockify_api self.optional = optional self.validated = False @@ -162,17 +162,17 @@ class ClockifySettings(QtWidgets.QWidget): def click_ok(self): api_key = self.input_api_key.text().strip() if self.optional is True and api_key == '': - self.clockapi.save_api_key(None) - self.clockapi.set_api(api_key) + self.clockify_api.save_api_key(None) + self.clockify_api.set_api(api_key) self.validated = False self._close_widget() return - validation = self.clockapi.validate_api_key(api_key) + validation = self.clockify_api.validate_api_key(api_key) if validation: - self.clockapi.save_api_key(api_key) - self.clockapi.set_api(api_key) + self.clockify_api.save_api_key(api_key) + self.clockify_api.set_api(api_key) self.validated = True self._close_widget() else: diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 648eb77007..e3e94d50cd 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -534,8 +534,8 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): template_data["comment"] = None anatomy = instance.context.data['anatomy'] - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["path"] + template_obj = anatomy.templates_obj["publish"]["path"] + template_filled = template_obj.format_strict(template_data) file_path = os.path.normpath(template_filled) self.log.info("Using published scene for render {}".format(file_path)) @@ -582,7 +582,6 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): metadata_folder = metadata_folder.replace(orig_scene, new_scene) instance.data["publishRenderMetadataFolder"] = metadata_folder - self.log.info("Scene name was switched {} -> {}".format( orig_scene, new_scene )) @@ -663,7 +662,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): # test if there is instance of workfile waiting # to be published. - assert i.data["publish"] is True, ( + assert i.data.get("publish", True) is True, ( "Workfile (scene) must be published along") return i diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index 9981bead3e..2de6073e29 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -5,23 +5,26 @@ This is resolving index of server lists stored in `deadlineServers` instance attribute or using default server if that attribute doesn't exists. """ +from maya import cmds + import pyblish.api class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" - order = pyblish.api.CollectorOrder + 0.415 + # Run before collect_render. + order = pyblish.api.CollectorOrder + 0.005 label = "Deadline Webservice from the Instance" families = ["rendering", "renderlayer"] + hosts = ["maya"] def process(self, instance): instance.data["deadlineUrl"] = self._collect_deadline_url(instance) self.log.info( "Using {} for submission.".format(instance.data["deadlineUrl"])) - @staticmethod - def _collect_deadline_url(render_instance): + def _collect_deadline_url(self, render_instance): # type: (pyblish.api.Instance) -> str """Get Deadline Webservice URL from render instance. @@ -49,8 +52,16 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): default_server = render_instance.context.data["defaultDeadline"] instance_server = render_instance.data.get("deadlineServers") if not instance_server: + self.log.debug("Using default server.") return default_server + # Get instance server as sting. + if isinstance(instance_server, int): + instance_server = cmds.getAttr( + "{}.deadlineServers".format(render_instance.data["objset"]), + asString=True + ) + default_servers = deadline_settings["deadline_urls"] project_servers = ( render_instance.context.data @@ -58,15 +69,23 @@ class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): ["deadline"] ["deadline_servers"] ) - deadline_servers = { + if not project_servers: + self.log.debug("Not project servers found. Using default servers.") + return default_servers[instance_server] + + project_enabled_servers = { k: default_servers[k] for k in project_servers if k in default_servers } - # This is Maya specific and may not reflect real selection of deadline - # url as dictionary keys in Python 2 are not ordered - return deadline_servers[ - list(deadline_servers.keys())[ - int(render_instance.data.get("deadlineServers")) - ] - ] + + msg = ( + "\"{}\" server on instance is not enabled in project settings." + " Enabled project servers:\n{}".format( + instance_server, project_enabled_servers + ) + ) + assert instance_server in project_enabled_servers, msg + + self.log.debug("Using project approved server.") + return project_enabled_servers[instance_server] diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py index e6ad6a9aa1..1a0d615dc3 100644 --- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -4,9 +4,21 @@ import pyblish.api class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): - """Collect default Deadline Webservice URL.""" + """Collect default Deadline Webservice URL. - order = pyblish.api.CollectorOrder + 0.410 + DL webservice addresses must be configured first in System Settings for + project settings enum to work. + + Default webservice could be overriden by + `project_settings/deadline/deadline_servers`. Currently only single url + is expected. + + This url could be overriden by some hosts directly on instances with + `CollectDeadlineServerFromInstance`. + """ + + # Run before collect_deadline_server_instance. + order = pyblish.api.CollectorOrder + 0.0025 label = "Default Deadline Webservice" pass_mongo_url = False @@ -23,3 +35,16 @@ class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): context.data["defaultDeadline"] = deadline_module.deadline_urls["default"] # noqa: E501 context.data["deadlinePassMongoUrl"] = self.pass_mongo_url + + deadline_servers = (context.data + ["project_settings"] + ["deadline"] + ["deadline_servers"]) + if deadline_servers: + deadline_server_name = deadline_servers[0] + deadline_webservice = deadline_module.deadline_urls.get( + deadline_server_name) + if deadline_webservice: + context.data["defaultDeadline"] = deadline_webservice + self.log.debug("Overriding from project settings with {}".format( # noqa: E501 + deadline_webservice)) diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py index 48130848d5..e221eb00ea 100644 --- a/openpype/modules/deadline/plugins/publish/collect_pools.py +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -3,21 +3,60 @@ """ import pyblish.api +from openpype.lib import TextDef +from openpype.pipeline.publish import OpenPypePyblishPluginMixin -class CollectDeadlinePools(pyblish.api.InstancePlugin): +class CollectDeadlinePools(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): """Collect pools from instance if present, from Setting otherwise.""" order = pyblish.api.CollectorOrder + 0.420 label = "Collect Deadline Pools" - families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + families = ["rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender"] primary_pool = None secondary_pool = None + @classmethod + def apply_settings(cls, project_settings, system_settings): + # deadline.publish.CollectDeadlinePools + settings = project_settings["deadline"]["publish"]["CollectDeadlinePools"] # noqa + cls.primary_pool = settings.get("primary_pool", None) + cls.secondary_pool = settings.get("secondary_pool", None) + def process(self, instance): + + attr_values = self.get_attr_values_from_data(instance.data) if not instance.data.get("primaryPool"): - instance.data["primaryPool"] = self.primary_pool or "none" + instance.data["primaryPool"] = ( + attr_values.get("primaryPool") or self.primary_pool or "none" + ) if not instance.data.get("secondaryPool"): - instance.data["secondaryPool"] = self.secondary_pool or "none" + instance.data["secondaryPool"] = ( + attr_values.get("secondaryPool") or self.secondary_pool or "none" # noqa + ) + + @classmethod + def get_attribute_defs(cls): + # TODO: Preferably this would be an enum for the user + # but the Deadline server URL can be dynamic and + # can be set per render instance. Since get_attribute_defs + # can't be dynamic unfortunately EnumDef isn't possible (yet?) + # pool_names = self.deadline_module.get_deadline_pools(deadline_url, + # self.log) + # secondary_pool_names = ["-"] + pool_names + + return [ + TextDef("primaryPool", + label="Primary Pool", + default=cls.primary_pool), + TextDef("secondaryPool", + label="Secondary Pool", + default=cls.secondary_pool) + ] diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index 0c1ffa6bd7..83dd5b49e2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -2,6 +2,7 @@ import os import attr import getpass import pyblish.api +from datetime import datetime from openpype.lib import ( env_value_to_bool, @@ -10,6 +11,8 @@ from openpype.lib import ( from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build @attr.s @@ -48,9 +51,11 @@ class AfterEffectsSubmitDeadline( context = self._instance.context + batch_name = os.path.basename(self._instance.data["source"]) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") dln_job_info.Name = self._instance.data["name"] - dln_job_info.BatchName = os.path.basename(self._instance. - data["source"]) + dln_job_info.BatchName = batch_name dln_job_info.Plugin = "AfterEffects" dln_job_info.UserName = context.data.get( "deadlineUser", getpass.getuser()) @@ -83,8 +88,13 @@ class AfterEffectsSubmitDeadline( "AVALON_APP_NAME", "OPENPYPE_DEV", "OPENPYPE_LOG_NO_COLORS", - "OPENPYPE_VERSION" + "IS_TEST" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py index 038ee4fc03..bcf0850768 100644 --- a/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py @@ -106,7 +106,7 @@ class CelactionSubmitDeadline(pyblish.api.InstancePlugin): # define chunk and priority chunk_size = instance.context.data.get("chunk") - if chunk_size == 0: + if not chunk_size: chunk_size = self.deadline_chunk_size # search for %02d pattern in name, and padding number diff --git a/openpype/hosts/fusion/plugins/publish/submit_deadline.py b/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py similarity index 50% rename from openpype/hosts/fusion/plugins/publish/submit_deadline.py rename to openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py index 8570c759bc..a48596c6bf 100644 --- a/openpype/hosts/fusion/plugins/publish/submit_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_fusion_deadline.py @@ -7,9 +7,19 @@ import requests import pyblish.api from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ( + OpenPypePyblishPluginMixin +) +from openpype.lib import ( + BoolDef, + NumberDef +) -class FusionSubmitDeadline(pyblish.api.InstancePlugin): +class FusionSubmitDeadline( + pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin +): """Submit current Comp to Deadline Renders are submitted to a Deadline Web Service as @@ -17,12 +27,62 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): """ - label = "Submit to Deadline" + label = "Submit Fusion to Deadline" order = pyblish.api.IntegratorOrder hosts = ["fusion"] - families = ["render.farm"] + families = ["render"] + targets = ["local"] + + # presets + priority = 50 + chunk_size = 1 + concurrent_tasks = 1 + group = "" + + @classmethod + def get_attribute_defs(cls): + return [ + NumberDef( + "priority", + label="Priority", + default=cls.priority, + decimals=0 + ), + NumberDef( + "chunk", + label="Frames Per Task", + default=cls.chunk_size, + decimals=0, + minimum=1, + maximum=1000 + ), + NumberDef( + "concurrency", + label="Concurrency", + default=cls.concurrent_tasks, + decimals=0, + minimum=1, + maximum=10 + ), + BoolDef( + "suspend_publish", + default=False, + label="Suspend publish" + ) + ] def process(self, instance): + if not instance.data.get("farm"): + self.log.debug("Skipping local instance.") + return + + attribute_values = self.get_attr_values_from_data( + instance.data) + + # add suspend_publish attributeValue to instance data + instance.data["suspend_publish"] = attribute_values[ + "suspend_publish"] + context = instance.context key = "__hasRun{}".format(self.__class__.__name__) @@ -33,36 +93,55 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): from openpype.hosts.fusion.api.lib import get_frame_path - deadline_url = ( - context.data["system_settings"] - ["modules"] - ["deadline"] - ["DEADLINE_REST_URL"] - ) - assert deadline_url, "Requires DEADLINE_REST_URL" + # get default deadline webservice url from deadline module + deadline_url = instance.context.data["defaultDeadline"] + # if custom one is set in instance, use that + if instance.data.get("deadlineUrl"): + deadline_url = instance.data.get("deadlineUrl") + assert deadline_url, "Requires Deadline Webservice URL" # Collect all saver instances in context that are to be rendered saver_instances = [] - for instance in context[:]: - if not self.families[0] in instance.data.get("families"): + for instance in context: + if instance.data["family"] != "render": # Allow only saver family instances continue if not instance.data.get("publish", True): # Skip inactive instances continue + self.log.debug(instance.data["name"]) saver_instances.append(instance) if not saver_instances: - raise RuntimeError("No instances found for Deadline submittion") + raise RuntimeError("No instances found for Deadline submission") - fusion_version = int(context.data["fusionVersion"]) - filepath = context.data["currentFile"] - filename = os.path.basename(filepath) - comment = context.data.get("comment", "") + comment = instance.data.get("comment", "") deadline_user = context.data.get("deadlineUser", getpass.getuser()) + script_path = context.data["currentFile"] + + for item in context: + if "workfile" in item.data["families"]: + msg = "Workfile (scene) must be published along" + assert item.data["publish"] is True, msg + + template_data = item.data.get("anatomyData") + rep = item.data.get("representations")[0].get("name") + template_data["representation"] = rep + template_data["ext"] = rep + template_data["comment"] = None + anatomy_filled = context.data["anatomy"].format(template_data) + template_filled = anatomy_filled["publish"]["path"] + script_path = os.path.normpath(template_filled) + + self.log.info( + "Using published scene for render {}".format(script_path) + ) + + filename = os.path.basename(script_path) + # Documentation for keys available at: # https://docs.thinkboxsoftware.com # /products/deadline/8.0/1_User%20Manual/manual @@ -73,31 +152,41 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): "BatchName": filename, # Asset dependency to wait for at least the scene file to sync. - "AssetDependency0": filepath, + "AssetDependency0": script_path, # Job name, as seen in Monitor "Name": filename, + "Priority": attribute_values.get( + "priority", self.priority), + "ChunkSize": attribute_values.get( + "chunk", self.chunk_size), + "ConcurrentTasks": attribute_values.get( + "concurrency", + self.concurrent_tasks + ), + # User, as seen in Monitor "UserName": deadline_user, - # Use a default submission pool for Fusion - "Pool": "fusion", + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), + "Group": self.group, "Plugin": "Fusion", "Frames": "{start}-{end}".format( - start=int(context.data["frameStart"]), - end=int(context.data["frameEnd"]) + start=int(instance.data["frameStartHandle"]), + end=int(instance.data["frameEndHandle"]) ), "Comment": comment, }, "PluginInfo": { # Input - "FlowFile": filepath, + "FlowFile": script_path, # Mandatory for Deadline - "Version": str(fusion_version), + "Version": str(instance.data["app_version"]), # Render in high quality "HighQuality": True, @@ -108,7 +197,7 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): # Proxy: higher numbers smaller images for faster test renders # 1 = no proxy quality - "Proxy": 1, + "Proxy": 1 }, # Mandatory for Deadline, may be empty @@ -117,7 +206,9 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): # Enable going to rendered frames from Deadline Monitor for index, instance in enumerate(saver_instances): - head, padding, tail = get_frame_path(instance.data["path"]) + head, padding, tail = get_frame_path( + instance.data["expectedFiles"][0] + ) path = "{}{}{}".format(head, "#" * padding, tail) folder, filename = os.path.split(path) payload["JobInfo"]["OutputDirectory%d" % index] = folder diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 6327143623..84fca11d9d 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -5,6 +5,7 @@ from pathlib import Path from collections import OrderedDict from zipfile import ZipFile, is_zipfile import re +from datetime import datetime import attr import pyblish.api @@ -12,6 +13,8 @@ import pyblish.api from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build class _ZipFile(ZipFile): @@ -261,7 +264,10 @@ class HarmonySubmitDeadline( job_info.Pool = self._instance.data.get("primaryPool") job_info.SecondaryPool = self._instance.data.get("secondaryPool") job_info.ChunkSize = self.chunk_size - job_info.BatchName = os.path.basename(self._instance.data["source"]) + batch_name = os.path.basename(self._instance.data["source"]) + if is_in_tests: + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + job_info.BatchName = batch_name job_info.Department = self.department job_info.Group = self.group @@ -274,9 +280,14 @@ class HarmonySubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS", - "OPENPYPE_VERSION" + "OPENPYPE_LOG_NO_COLORS" + "IS_TEST" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index 95856137e2..68aa653804 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -1,5 +1,6 @@ import os import json +from datetime import datetime import requests import hou @@ -7,6 +8,8 @@ import hou import pyblish.api from openpype.pipeline import legacy_io +from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): @@ -60,6 +63,8 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): job_name = "{scene} [PUBLISH]".format(scene=scenename) batch_name = "{code} - {scene}".format(code=code, scene=scenename) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") deadline_user = "roy" # todo: get deadline user dynamically # Get only major.minor version of Houdini, ignore patch version @@ -129,9 +134,13 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): # Submit along the current Avalon tool setup that we launched # this application with so the Render Slave can build its own # similar environment using it, e.g. "houdini17.5;pluginx2.3" - "AVALON_TOOLS", - "OPENPYPE_VERSION" + "AVALON_TOOLS" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index beda753723..254914a850 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -1,16 +1,27 @@ -import os -import json -import getpass +import hou -import requests +import os +import attr +import getpass +from datetime import datetime import pyblish.api -# import hou ??? - from openpype.pipeline import legacy_io +from openpype.tests.lib import is_in_tests +from openpype_modules.deadline import abstract_submit_deadline +from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.lib import is_running_from_build -class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): +@attr.s +class DeadlinePluginInfo(): + SceneFile = attr.ib(default=None) + OutputDriver = attr.ib(default=None) + Version = attr.ib(default=None) + IgnoreInputs = attr.ib(default=True) + + +class HoudiniSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): """Submit Solaris USD Render ROPs to Deadline. Renders are submitted to a Deadline Web Service as @@ -27,138 +38,108 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder hosts = ["houdini"] families = ["usdrender", - "redshift_rop"] + "redshift_rop", + "arnold_rop", + "mantra_rop", + "karma_rop", + "vray_rop"] targets = ["local"] + use_published = True - def process(self, instance): + def get_job_info(self): + job_info = DeadlineJobInfo(Plugin="Houdini") + instance = self._instance context = instance.context - code = context.data["code"] + filepath = context.data["currentFile"] filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - deadline_user = context.data.get("deadlineUser", getpass.getuser()) - jobname = "%s - %s" % (filename, instance.name) - # Support code prefix label for batch name - batch_name = filename - if code: - batch_name = "{0} - {1}".format(code, batch_name) + job_info.Name = "{} - {}".format(filename, instance.name) + job_info.BatchName = filename + job_info.Plugin = "Houdini" + job_info.UserName = context.data.get( + "deadlineUser", getpass.getuser()) - # Output driver to render - driver = instance[0] + if is_in_tests(): + job_info.BatchName += datetime.now().strftime("%d%m%Y%H%M%S") - # StartFrame to EndFrame by byFrameStep + # Deadline requires integers in frame range frames = "{start}-{end}x{step}".format( start=int(instance.data["frameStart"]), end=int(instance.data["frameEnd"]), step=int(instance.data["byFrameStep"]), ) + job_info.Frames = frames - # Documentation for keys available at: - # https://docs.thinkboxsoftware.com - # /products/deadline/8.0/1_User%20Manual/manual - # /manual-submission.html#job-info-file-options - payload = { - "JobInfo": { - # Top-level group name - "BatchName": batch_name, + job_info.Pool = instance.data.get("primaryPool") + job_info.SecondaryPool = instance.data.get("secondaryPool") + job_info.ChunkSize = instance.data.get("chunkSize", 10) + job_info.Comment = context.data.get("comment") - # Job name, as seen in Monitor - "Name": jobname, - - # Arbitrary username, for visualisation in Monitor - "UserName": deadline_user, - - "Plugin": "Houdini", - "Pool": instance.data.get("primaryPool"), - "secondaryPool": instance.data.get("secondaryPool"), - "Frames": frames, - - "ChunkSize": instance.data.get("chunkSize", 10), - - "Comment": comment - }, - "PluginInfo": { - # Input - "SceneFile": filepath, - "OutputDriver": driver.path(), - - # Mandatory for Deadline - # Houdini version without patch number - "Version": hou.applicationVersionString().rsplit(".", 1)[0], - - "IgnoreInputs": True - }, - - # Mandatory for Deadline, may be empty - "AuxFiles": [] - } - - # Include critical environment variables with submission + api.Session keys = [ - # Submit along the current Avalon tool setup that we launched - # this application with so the Render Slave can build its own - # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" - "AVALON_TOOLS", + "FTRACK_API_KEY", + "FTRACK_API_USER", + "FTRACK_SERVER", + "OPENPYPE_SG_USER", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "AVALON_APP_NAME", + "OPENPYPE_DEV", + "OPENPYPE_LOG_NO_COLORS", "OPENPYPE_VERSION" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled - if context.data.get("deadlinePassMongoUrl"): + if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) + for key in keys: + value = environment.get(key) + if value: + job_info.EnvironmentKeyValue[key] = value - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" - # Include OutputFilename entries - # The first entry also enables double-click to preview rendered - # frames from Deadline Monitor - output_data = {} for i, filepath in enumerate(instance.data["files"]): dirname = os.path.dirname(filepath) fname = os.path.basename(filepath) - output_data["OutputDirectory%d" % i] = dirname.replace("\\", "/") - output_data["OutputFilename%d" % i] = fname + job_info.OutputDirectory += dirname.replace("\\", "/") + job_info.OutputFilename += fname - # For now ensure destination folder exists otherwise HUSK - # will fail to render the output image. This is supposedly fixed - # in new production builds of Houdini - # TODO Remove this workaround with Houdini 18.0.391+ - if not os.path.exists(dirname): - self.log.info("Ensuring output directory exists: %s" % - dirname) - os.makedirs(dirname) + return job_info - payload["JobInfo"].update(output_data) + def get_plugin_info(self): - self.submit(instance, payload) + instance = self._instance + context = instance.context - def submit(self, instance, payload): + # Output driver to render + driver = hou.node(instance.data["instance_node"]) + hou_major_minor = hou.applicationVersionString().rsplit(".", 1)[0] - AVALON_DEADLINE = legacy_io.Session.get("AVALON_DEADLINE", - "http://localhost:8082") - assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" + plugin_info = DeadlinePluginInfo( + SceneFile=context.data["currentFile"], + OutputDriver=driver.path(), + Version=hou_major_minor, + IgnoreInputs=True + ) - plugin = payload["JobInfo"]["Plugin"] - self.log.info("Using Render Plugin : {}".format(plugin)) + return attr.asdict(plugin_info) - self.log.info("Submitting..") - self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(AVALON_DEADLINE) - response = requests.post(url, json=payload) - if not response.ok: - raise Exception(response.text) + def process(self, instance): + super(HoudiniSubmitDeadline, self).process(instance) + # TODO: Avoid the need for this logic here, needed for submit publish # Store output dir for unified publisher (filesequence) output_dir = os.path.dirname(instance.data["files"][0]) instance.data["outputDir"] = output_dir - instance.data["deadlineSubmissionJob"] = response.json() + instance.data["toBeRenderedOn"] = "deadline" diff --git a/openpype/modules/deadline/plugins/publish/submit_max_deadline.py b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py new file mode 100644 index 0000000000..b6a30e36b7 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/submit_max_deadline.py @@ -0,0 +1,286 @@ +import os +import getpass +import copy + +import attr +from openpype.lib import ( + TextDef, + BoolDef, + NumberDef, +) +from openpype.pipeline import ( + legacy_io, + OpenPypePyblishPluginMixin +) +from openpype.settings import get_project_settings +from openpype.hosts.max.api.lib import ( + get_current_renderer, + get_multipass_setting +) +from openpype.hosts.max.api.lib_rendersettings import RenderSettings +from openpype_modules.deadline import abstract_submit_deadline +from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo + + +@attr.s +class MaxPluginInfo(object): + SceneFile = attr.ib(default=None) # Input + Version = attr.ib(default=None) # Mandatory for Deadline + SaveFile = attr.ib(default=True) + IgnoreInputs = attr.ib(default=True) + + +class MaxSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline, + OpenPypePyblishPluginMixin): + + label = "Submit Render to Deadline" + hosts = ["max"] + families = ["maxrender"] + targets = ["local"] + + use_published = True + priority = 50 + chunk_size = 1 + jobInfo = {} + pluginInfo = {} + group = None + + @classmethod + def apply_settings(cls, project_settings, system_settings): + settings = project_settings["deadline"]["publish"]["MaxSubmitDeadline"] # noqa + + # Take some defaults from settings + cls.use_published = settings.get("use_published", + cls.use_published) + cls.priority = settings.get("priority", + cls.priority) + cls.chuck_size = settings.get("chunk_size", cls.chunk_size) + cls.group = settings.get("group", cls.group) + + def get_job_info(self): + job_info = DeadlineJobInfo(Plugin="3dsmax") + + # todo: test whether this works for existing production cases + # where custom jobInfo was stored in the project settings + job_info.update(self.jobInfo) + + instance = self._instance + context = instance.context + # Always use the original work file name for the Job name even when + # rendering is done from the published Work File. The original work + # file name is clearer because it can also have subversion strings, + # etc. which are stripped for the published file. + + src_filepath = context.data["currentFile"] + src_filename = os.path.basename(src_filepath) + + job_info.Name = "%s - %s" % (src_filename, instance.name) + job_info.BatchName = src_filename + job_info.Plugin = instance.data["plugin"] + job_info.UserName = context.data.get("deadlineUser", getpass.getuser()) + job_info.EnableAutoTimeout = True + # Deadline requires integers in frame range + frames = "{start}-{end}".format( + start=int(instance.data["frameStart"]), + end=int(instance.data["frameEnd"]) + ) + job_info.Frames = frames + + job_info.Pool = instance.data.get("primaryPool") + job_info.SecondaryPool = instance.data.get("secondaryPool") + + attr_values = self.get_attr_values_from_data(instance.data) + + job_info.ChunkSize = attr_values.get("chunkSize", 1) + job_info.Comment = context.data.get("comment") + job_info.Priority = attr_values.get("priority", self.priority) + job_info.Group = attr_values.get("group", self.group) + + # Add options from RenderGlobals + render_globals = instance.data.get("renderGlobals", {}) + job_info.update(render_globals) + + keys = [ + "FTRACK_API_KEY", + "FTRACK_API_USER", + "FTRACK_SERVER", + "OPENPYPE_SG_USER", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "AVALON_APP_NAME", + "OPENPYPE_DEV", + "OPENPYPE_VERSION", + "IS_TEST" + ] + # Add mongo url if it's enabled + if self._instance.context.data.get("deadlinePassMongoUrl"): + keys.append("OPENPYPE_MONGO") + + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **legacy_io.Session) + + for key in keys: + value = environment.get(key) + if not value: + continue + job_info.EnvironmentKeyValue[key] = value + + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" + job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" + + # Add list of expected files to job + # --------------------------------- + exp = instance.data.get("expectedFiles") + + for filepath in self._iter_expected_files(exp): + job_info.OutputDirectory += os.path.dirname(filepath) + job_info.OutputFilename += os.path.basename(filepath) + + return job_info + + def get_plugin_info(self): + instance = self._instance + + plugin_info = MaxPluginInfo( + SceneFile=self.scene_path, + Version=instance.data["maxversion"], + SaveFile=True, + IgnoreInputs=True + ) + + plugin_payload = attr.asdict(plugin_info) + + # Patching with pluginInfo from settings + for key, value in self.pluginInfo.items(): + plugin_payload[key] = value + + return plugin_payload + + def process_submission(self): + + instance = self._instance + filepath = self.scene_path + + files = instance.data["expectedFiles"] + if not files: + raise RuntimeError("No Render Elements found!") + first_file = next(self._iter_expected_files(files)) + output_dir = os.path.dirname(first_file) + instance.data["outputDir"] = output_dir + instance.data["toBeRenderedOn"] = "deadline" + + filename = os.path.basename(filepath) + + payload_data = { + "filename": filename, + "dirname": output_dir + } + + self.log.debug("Submitting 3dsMax render..") + payload = self._use_published_name(payload_data) + job_info, plugin_info = payload + self.submit(self.assemble_payload(job_info, plugin_info)) + + def _use_published_name(self, data): + instance = self._instance + job_info = copy.deepcopy(self.job_info) + plugin_info = copy.deepcopy(self.plugin_info) + plugin_data = {} + project_setting = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + multipass = get_multipass_setting(project_setting) + if multipass: + plugin_data["DisableMultipass"] = 0 + else: + plugin_data["DisableMultipass"] = 1 + + files = instance.data.get("expectedFiles") + if not files: + raise RuntimeError("No render elements found") + first_file = next(self._iter_expected_files(files)) + old_output_dir = os.path.dirname(first_file) + output_beauty = RenderSettings().get_render_output(instance.name, + old_output_dir) + rgb_bname = os.path.basename(output_beauty) + dir = os.path.dirname(first_file) + beauty_name = f"{dir}/{rgb_bname}" + beauty_name = beauty_name.replace("\\", "/") + plugin_data["RenderOutput"] = beauty_name + # as 3dsmax has version with different languages + plugin_data["Language"] = "ENU" + renderer_class = get_current_renderer() + + renderer = str(renderer_class).split(":")[0] + if renderer in [ + "ART_Renderer", + "Redshift_Renderer", + "V_Ray_6_Hotfix_3", + "V_Ray_GPU_6_Hotfix_3", + "Default_Scanline_Renderer", + "Quicksilver_Hardware_Renderer", + ]: + render_elem_list = RenderSettings().get_render_element() + for i, element in enumerate(render_elem_list): + elem_bname = os.path.basename(element) + new_elem = f"{dir}/{elem_bname}" + new_elem = new_elem.replace("/", "\\") + plugin_data["RenderElementOutputFilename%d" % i] = new_elem # noqa + + if renderer == "Redshift_Renderer": + plugin_data["redshift_SeparateAovFiles"] = instance.data.get( + "separateAovFiles") + + self.log.debug("plugin data:{}".format(plugin_data)) + plugin_info.update(plugin_data) + + return job_info, plugin_info + + def from_published_scene(self, replace_in_path=True): + instance = self._instance + if instance.data["renderer"] == "Redshift_Renderer": + self.log.debug("Using Redshift...published scene wont be used..") + replace_in_path = False + return replace_in_path + + @staticmethod + def _iter_expected_files(exp): + if isinstance(exp[0], dict): + for _aov, files in exp[0].items(): + for file in files: + yield file + else: + for file in exp: + yield file + + @classmethod + def get_attribute_defs(cls): + defs = super(MaxSubmitDeadline, cls).get_attribute_defs() + defs.extend([ + BoolDef("use_published", + default=cls.use_published, + label="Use Published Scene"), + + NumberDef("priority", + minimum=1, + maximum=250, + decimals=0, + default=cls.priority, + label="Priority"), + + NumberDef("chunkSize", + minimum=1, + maximum=50, + decimals=0, + default=cls.chunk_size, + label="Frame Per Task"), + + TextDef("group", + default=cls.group, + label="Group Name"), + ]) + + return defs diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index a92b996327..a6cdcb7e71 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -37,6 +37,8 @@ from openpype.hosts.maya.api.lib import get_attr_in_layer from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build def _validate_deadline_bool_value(instance, attribute, value): @@ -63,6 +65,7 @@ class MayaPluginInfo(object): # Include all lights flag RenderSetupIncludeLights = attr.ib( default="1", validator=_validate_deadline_bool_value) + StrictErrorChecking = attr.ib(default=True) @attr.s @@ -121,6 +124,9 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): src_filepath = context.data["currentFile"] src_filename = os.path.basename(src_filepath) + if is_in_tests(): + src_filename += datetime.now().strftime("%d%m%Y%H%M%S") + job_info.Name = "%s - %s" % (src_filename, instance.name) job_info.BatchName = src_filename job_info.Plugin = instance.data.get("mayaRenderPlugin", "MayaBatch") @@ -136,10 +142,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): job_info.Pool = instance.data.get("primaryPool") job_info.SecondaryPool = instance.data.get("secondaryPool") - job_info.ChunkSize = instance.data.get("chunkSize", 10) job_info.Comment = context.data.get("comment") job_info.Priority = instance.data.get("priority", self.priority) - job_info.FramesPerTask = instance.data.get("framesPerTask", 1) if self.group != "none" and self.group: job_info.Group = self.group @@ -160,9 +164,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): "AVALON_ASSET", "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_VERSION" + "OPENPYPE_DEV" + "IS_TEST" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") @@ -214,6 +223,8 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): "renderSetupIncludeLights", default_rs_include_lights) if rs_include_lights not in {"1", "0", True, False}: rs_include_lights = default_rs_include_lights + strict_error_checking = instance.data.get("strict_error_checking", + True) plugin_info = MayaPluginInfo( SceneFile=self.scene_path, Version=cmds.about(version=True), @@ -222,6 +233,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): RenderSetupIncludeLights=rs_include_lights, # noqa ProjectPath=context.data["workspaceDir"], UsingRenderLayers=True, + StrictErrorChecking=strict_error_checking ) plugin_payload = attr.asdict(plugin_info) @@ -313,6 +325,11 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): job_info = copy.deepcopy(payload_job_info) plugin_info = copy.deepcopy(payload_plugin_info) + # Force plugin reload for vray cause the region does not get flushed + # between tile renders. + if plugin_info["Renderer"] == "vray": + job_info.ForceReloadPlugin = True + # if we have sequence of files, we need to create tile job for # every frame job_info.TileJob = True @@ -405,8 +422,14 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): assembly_job_info.Name += " - Tile Assembly Job" assembly_job_info.Frames = 1 assembly_job_info.MachineLimit = 1 - assembly_job_info.Priority = instance.data.get("tile_priority", - self.tile_priority) + assembly_job_info.Priority = instance.data.get( + "tile_priority", self.tile_priority + ) + assembly_job_info.TileJob = False + + pool = instance.context.data["project_settings"]["deadline"] + pool = pool["publish"]["ProcessSubmittedJobOnFarm"]["deadline_pool"] + assembly_job_info.Pool = pool or instance.data.get("primaryPool", "") assembly_plugin_info = { "CleanupTiles": 1, @@ -416,6 +439,7 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): assembly_payloads = [] output_dir = self.job_info.OutputDirectory[0] + config_files = [] for file in assembly_files: frame = re.search(R_FRAME_NUMBER, file).group("frame") @@ -431,17 +455,17 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): frame_assembly_job_info.ExtraInfo[0] = file_hash frame_assembly_job_info.ExtraInfo[1] = file frame_assembly_job_info.JobDependencies = tile_job_id + frame_assembly_job_info.Frames = frame # write assembly job config files - now = datetime.now() - config_file = os.path.join( output_dir, "{}_config_{}.txt".format( os.path.splitext(file)[0], - now.strftime("%Y_%m_%d_%H_%M_%S") + datetime.now().strftime("%Y_%m_%d_%H_%M_%S") ) ) + config_files.append(config_file) try: if not os.path.isdir(output_dir): os.makedirs(output_dir) @@ -458,25 +482,34 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): print("ImageHeight={}".format( instance.data.get("resolutionHeight")), file=cf) + reversed_y = False + if plugin_info["Renderer"] == "arnold": + reversed_y = True + + with open(config_file, "a") as cf: + # Need to reverse the order of the y tiles, because image + # coordinates are calculated from bottom left corner. tiles = _format_tiles( file, 0, instance.data.get("tilesX"), instance.data.get("tilesY"), instance.data.get("resolutionWidth"), instance.data.get("resolutionHeight"), - payload_plugin_info["OutputFilePrefix"] + payload_plugin_info["OutputFilePrefix"], + reversed_y=reversed_y )[1] for k, v in sorted(tiles.items()): print("{}={}".format(k, v), file=cf) - payload = self.assemble_payload( - job_info=frame_assembly_job_info, - plugin_info=assembly_plugin_info.copy(), - # todo: aux file transfers don't work with deadline webservice - # add config file as job auxFile - # aux_files=[config_file] + assembly_payloads.append( + self.assemble_payload( + job_info=frame_assembly_job_info, + plugin_info=assembly_plugin_info.copy(), + # This would fail if the client machine and webserice are + # using different storage paths. + aux_files=[config_file] + ) ) - assembly_payloads.append(payload) # Submit assembly jobs assembly_job_ids = [] @@ -486,11 +519,17 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): "submitting assembly job {} of {}".format(i + 1, num_assemblies) ) + self.log.info(payload) assembly_job_id = self.submit(payload) assembly_job_ids.append(assembly_job_id) instance.data["assemblySubmissionJobs"] = assembly_job_ids + # Remove config files to avoid confusion about where data is coming + # from in Deadline. + for config_file in config_files: + os.remove(config_file) + def _get_maya_payload(self, data): job_info = copy.deepcopy(self.job_info) @@ -745,8 +784,15 @@ class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): def _format_tiles( - filename, index, tiles_x, tiles_y, - width, height, prefix): + filename, + index, + tiles_x, + tiles_y, + width, + height, + prefix, + reversed_y=False +): """Generate tile entries for Deadline tile job. Returns two dictionaries - one that can be directly used in Deadline @@ -783,6 +829,7 @@ def _format_tiles( width (int): Width resolution of final image. height (int): Height resolution of final image. prefix (str): Image prefix. + reversed_y (bool): Reverses the order of the y tiles. Returns: (dict, dict): Tuple of two dictionaries - first can be used to @@ -805,12 +852,16 @@ def _format_tiles( cfg["TilesCropped"] = "False" tile = 0 + range_y = range(1, tiles_y + 1) + reversed_y_range = list(reversed(range_y)) for tile_x in range(1, tiles_x + 1): - for tile_y in reversed(range(1, tiles_y + 1)): + for i, tile_y in enumerate(range_y): + tile_y_index = tile_y + if reversed_y: + tile_y_index = reversed_y_range[i] + tile_prefix = "_tile_{}x{}_{}x{}_".format( - tile_x, tile_y, - tiles_x, - tiles_y + tile_x, tile_y_index, tiles_x, tiles_y ) new_filename = "{}/{}{}".format( @@ -825,19 +876,20 @@ def _format_tiles( right = (tile_x * w_space) - 1 # Job info - out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa: E501 + key = "OutputFilename{}".format(index) + out["JobInfo"][key] = new_filename # Plugin Info - out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \ - "/{}".format(tile_prefix).join(prefix.rsplit("/", 1)) + key = "RegionPrefix{}".format(str(tile)) + out["PluginInfo"][key] = "/{}".format( + tile_prefix + ).join(prefix.rsplit("/", 1)) out["PluginInfo"]["RegionTop{}".format(tile)] = top out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom out["PluginInfo"]["RegionLeft{}".format(tile)] = left out["PluginInfo"]["RegionRight{}".format(tile)] = right # Tile config - cfg["Tile{}".format(tile)] = new_filename - cfg["Tile{}Tile".format(tile)] = new_filename cfg["Tile{}FileName".format(tile)] = new_filename cfg["Tile{}X".format(tile)] = left cfg["Tile{}Y".format(tile)] = top diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py index 38ae5d2f7f..25f859554f 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -1,10 +1,13 @@ import os import requests +from datetime import datetime from maya import cmds from openpype.pipeline import legacy_io, PublishXmlValidationError from openpype.settings import get_project_settings +from openpype.tests.lib import is_in_tests +from openpype.lib import is_running_from_build import pyblish.api @@ -57,6 +60,8 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): job_name = "{scene} [PUBLISH]".format(scene=scenename) batch_name = "{code} - {scene}".format(code=project_name, scene=scenename) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") # Generate the payload for Deadline submission payload = { @@ -100,9 +105,13 @@ class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", - "FTRACK_SERVER", - "OPENPYPE_VERSION" + "FTRACK_SERVER" ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **legacy_io.Session) diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index b09d2935ab..4900231783 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -2,15 +2,26 @@ import os import re import json import getpass +from datetime import datetime import requests import pyblish.api import nuke from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ( + OpenPypePyblishPluginMixin +) +from openpype.tests.lib import is_in_tests +from openpype.lib import ( + is_running_from_build, + BoolDef, + NumberDef +) -class NukeSubmitDeadline(pyblish.api.InstancePlugin): +class NukeSubmitDeadline(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): """Submit write to Deadline Renders are submitted to a Deadline Web Service as @@ -18,10 +29,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): """ - label = "Submit to Deadline" + label = "Submit Nuke to Deadline" order = pyblish.api.IntegratorOrder + 0.1 - hosts = ["nuke", "nukestudio"] - families = ["render.farm", "prerender.farm"] + hosts = ["nuke"] + families = ["render", "prerender"] optional = True targets = ["local"] @@ -36,11 +47,59 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): env_allowed_keys = [] env_search_replace_values = {} + @classmethod + def get_attribute_defs(cls): + return [ + NumberDef( + "priority", + label="Priority", + default=cls.priority, + decimals=0 + ), + NumberDef( + "chunk", + label="Frames Per Task", + default=cls.chunk_size, + decimals=0, + minimum=1, + maximum=1000 + ), + NumberDef( + "concurrency", + label="Concurrency", + default=cls.concurrent_tasks, + decimals=0, + minimum=1, + maximum=10 + ), + BoolDef( + "use_gpu", + default=cls.use_gpu, + label="Use GPU" + ), + BoolDef( + "suspend_publish", + default=False, + label="Suspend publish" + ) + ] + def process(self, instance): + if not instance.data.get("farm"): + self.log.debug("Skipping local instance.") + return + + instance.data["attributeValues"] = self.get_attr_values_from_data( + instance.data) + + # add suspend_publish attributeValue to instance data + instance.data["suspend_publish"] = instance.data["attributeValues"][ + "suspend_publish"] + instance.data["toBeRenderedOn"] = "deadline" families = instance.data["families"] - node = instance[0] + node = instance.data["transientData"]["node"] context = instance.context # get default deadline webservice url from deadline module @@ -122,10 +181,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): resp.json()["_id"]) # redefinition of families - if "render.farm" in families: + if "render" in instance.data["family"]: instance.data['family'] = 'write' families.insert(0, "render2d") - elif "prerender.farm" in families: + elif "prerender" in instance.data["family"]: instance.data['family'] = 'write' families.insert(0, "prerender") instance.data["families"] = families @@ -138,16 +197,19 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): exe_node_name, start_frame, end_frame, - responce_data=None + response_data=None ): render_dir = os.path.normpath(os.path.dirname(render_path)) - script_name = os.path.basename(script_path) - jobname = "%s - %s" % (script_name, instance.name) + batch_name = os.path.basename(script_path) + jobname = "%s - %s" % (batch_name, instance.name) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + output_filename_0 = self.preview_fname(render_path) - if not responce_data: - responce_data = {} + if not response_data: + response_data = {} try: # Ensure render folder exists @@ -155,20 +217,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): except OSError: pass - # define chunk and priority - chunk_size = instance.data["deadlineChunkSize"] - if chunk_size == 0 and self.chunk_size: - chunk_size = self.chunk_size - - # define chunk and priority - concurrent_tasks = instance.data["deadlineConcurrentTasks"] - if concurrent_tasks == 0 and self.concurrent_tasks: - concurrent_tasks = self.concurrent_tasks - - priority = instance.data["deadlinePriority"] - if not priority: - priority = self.priority - # resolve any limit groups limit_groups = self.get_limit_groups() self.log.info("Limit groups: `{}`".format(limit_groups)) @@ -176,7 +224,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): payload = { "JobInfo": { # Top-level group name - "BatchName": script_name, + "BatchName": batch_name, # Asset dependency to wait for at least the scene file to sync. # "AssetDependency0": script_path, @@ -187,9 +235,14 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Arbitrary username, for visualisation in Monitor "UserName": self._deadline_user, - "Priority": priority, - "ChunkSize": chunk_size, - "ConcurrentTasks": concurrent_tasks, + "Priority": instance.data["attributeValues"].get( + "priority", self.priority), + "ChunkSize": instance.data["attributeValues"].get( + "chunk", self.chunk_size), + "ConcurrentTasks": instance.data["attributeValues"].get( + "concurrency", + self.concurrent_tasks + ), "Department": self.department, @@ -228,7 +281,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "AWSAssetFile0": render_path, # using GPU by default - "UseGpu": self.use_gpu, + "UseGpu": instance.data["attributeValues"].get( + "use_gpu", self.use_gpu), # Only the specific write node is rendered. "WriteNode": exe_node_name @@ -238,11 +292,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "AuxFiles": [] } - if responce_data.get("_id"): + if response_data.get("_id"): payload["JobInfo"].update({ "JobType": "Normal", - "BatchName": responce_data["Props"]["Batch"], - "JobDependency0": responce_data["_id"], + "BatchName": response_data["Props"]["Batch"], + "JobDependency0": response_data["_id"], "ChunkSize": 99999999 }) @@ -261,8 +315,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "NUKE_PATH", "TOOL_ENV", "FOUNDRY_LICENSE", - "OPENPYPE_VERSION" + "OPENPYPE_SG_USER", ] + + # Add OpenPype version if we are running from build. + if is_running_from_build(): + keys.append("OPENPYPE_VERSION") + # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index 5c5c54febb..590acf86c2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -18,7 +18,9 @@ from openpype.pipeline import ( get_representation_path, legacy_io, ) +from openpype.tests.lib import is_in_tests from openpype.pipeline.farm.patterning import match_aov_pattern +from openpype.lib import is_running_from_build def get_resources(project_name, version, extension=None): @@ -83,10 +85,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): These jobs are dependent on a deadline or muster job submission prior to this plug-in. - - In case of Deadline, it creates dependend job on farm publishing + - In case of Deadline, it creates dependent job on farm publishing rendered image sequence. - - In case of Muster, there is no need for such thing as dependend job, + - In case of Muster, there is no need for such thing as dependent job, post action will be executed and rendered sequence will be published. Options in instance.data: @@ -106,7 +108,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): - publishJobState (str, Optional): "Active" or "Suspended" This defaults to "Suspended" - - expectedFiles (list or dict): explained bellow + - expectedFiles (list or dict): explained below """ @@ -116,15 +118,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): deadline_plugin = "OpenPype" targets = ["local"] - hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"] + hosts = ["fusion", "max", "maya", "nuke", "houdini", + "celaction", "aftereffects", "harmony"] families = ["render.farm", "prerender.farm", - "renderlayer", "imagesequence", "vrayscene"] + "renderlayer", "imagesequence", + "vrayscene", "maxrender", + "arnold_rop", "mantra_rop", + "karma_rop", "vray_rop", + "redshift_rop"] aov_filter = {"maya": [r".*([Bb]eauty).*"], "aftereffects": [r".*"], # for everything from AE "harmony": [r".*"], # for everything from AE - "celaction": [r".*"]} + "celaction": [r".*"], + "max": [r".*"]} environ_job_filter = [ "OPENPYPE_METADATA_FILE" @@ -136,9 +144,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_SERVER", "AVALON_APP_NAME", "OPENPYPE_USERNAME", - "OPENPYPE_VERSION" + "OPENPYPE_VERSION", + "OPENPYPE_SG_USER" ] + # Add OpenPype version if we are running from build. + if is_running_from_build(): + environ_keys.append("OPENPYPE_VERSION") + # custom deadline attributes deadline_department = "" deadline_pool = "" @@ -150,8 +163,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # regex for finding frame number in string R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') - # mapping of instance properties to be transfered to new instance for every - # specified family + # mapping of instance properties to be transferred to new instance + # for every specified family instance_transfer = { "slate": ["slateFrames", "slate"], "review": ["lutPath"], @@ -187,7 +200,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): metadata_path = os.path.join(output_dir, metadata_filename) # Convert output dir to `{root}/rest/of/path/...` with Anatomy - success, roothless_mtdt_p = self.anatomy.find_root_template_from_path( + success, rootless_mtdt_p = self.anatomy.find_root_template_from_path( metadata_path) if not success: # `rootless_path` is not set to `output_dir` if none of roots match @@ -195,9 +208,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Could not find root path for remapping \"{}\"." " This may cause issues on farm." ).format(output_dir)) - roothless_mtdt_p = metadata_path + rootless_mtdt_p = metadata_path - return metadata_path, roothless_mtdt_p + return metadata_path, rootless_mtdt_p def _submit_deadline_post_job(self, instance, job, instances): """Submit publish job to Deadline. @@ -206,6 +219,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): more universal code. Muster post job is sent directly by Muster submitter, so this type of code isn't necessary for it. + Returns: + (str): deadline_publish_job_id """ data = instance.data.copy() subset = data["subset"] @@ -228,7 +243,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # Transfer the environment from the original job to this dependent # job so they use the same environment - metadata_path, roothless_metadata_path = \ + metadata_path, rootless_metadata_path = \ self._create_metadata_path(instance) environment = { @@ -239,7 +254,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "OPENPYPE_PUBLISH_JOB": "1", "OPENPYPE_RENDER_JOB": "0", "OPENPYPE_REMOTE_JOB": "0", - "OPENPYPE_LOG_NO_COLORS": "1" + "OPENPYPE_LOG_NO_COLORS": "1", + "IS_TEST": str(int(is_in_tests())) } # add environments from self.environ_keys @@ -264,12 +280,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): args = [ "--headless", 'publish', - roothless_metadata_path, + '"{}"'.format(rootless_metadata_path), "--targets", "deadline", "--targets", "farm" ] + if is_in_tests(): + args.append("--automatic-tests") + # Generate the payload for Deadline submission + secondary_pool = ( + self.deadline_pool_secondary or instance.data.get("secondaryPool") + ) payload = { "JobInfo": { "Plugin": self.deadline_plugin, @@ -283,10 +305,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Priority": priority, "Group": self.deadline_group, - "Pool": instance.data.get("primaryPool"), - "SecondaryPool": instance.data.get("secondaryPool"), - - "OutputDirectory0": output_dir + "Pool": self.deadline_pool or instance.data.get("primaryPool"), + "SecondaryPool": secondary_pool, + # ensure the outputdirectory with correct slashes + "OutputDirectory0": output_dir.replace("\\", "/") }, "PluginInfo": { "Version": self.plugin_pype_version, @@ -335,6 +357,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if not response.ok: raise Exception(response.text) + deadline_publish_job_id = response.json()["_id"] + + return deadline_publish_job_id + def _copy_extend_frames(self, instance, representation): """Copy existing frames from latest version. @@ -377,7 +403,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): continue r_col.indexes.remove(frame) - # now we need to translate published names from represenation + # now we need to translate published names from representation # back. This is tricky, right now we'll just use same naming # and only switch frame numbers resource_files = [] @@ -394,7 +420,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): assert fn is not None, "padding string wasn't found" # list of tuples (source, destination) staging = representation.get("stagingDir") - staging = self.anatomy.fill_roots(staging) + staging = self.anatomy.fill_root(staging) resource_files.append( (frame, os.path.join(staging, @@ -416,7 +442,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.info( "Finished copying %i files" % len(resource_files)) - def _create_instances_for_aov(self, instance_data, exp_files): + def _create_instances_for_aov( + self, instance_data, exp_files, additional_data, do_not_add_review + ): """Create instance for each AOV found. This will create new instance for every aov it can detect in expected @@ -426,6 +454,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instance_data (pyblish.plugin.Instance): skeleton data for instance (those needed) later by collector exp_files (list): list of expected files divided by aovs + additional_data (dict): + do_not_add_review (bool): explicitly skip review Returns: list of instances @@ -491,8 +521,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): app = os.environ.get("AVALON_APP", "") - preview = False - if isinstance(col, list): render_file_name = os.path.basename(col[0]) else: @@ -503,20 +531,31 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # toggle preview on if multipart is on if instance_data.get("multipartExr"): + self.log.debug("Adding preview tag because its multipartExr") preview = True self.log.debug("preview:{}".format(preview)) new_instance = deepcopy(instance_data) new_instance["subset"] = subset_name new_instance["subsetGroup"] = group_name + + preview = preview and not do_not_add_review if preview: new_instance["review"] = True - # create represenation + # create representation if isinstance(col, (list, tuple)): files = [os.path.basename(f) for f in col] else: files = os.path.basename(col) + # Copy render product "colorspace" data to representation. + colorspace = "" + products = additional_data["renderProducts"].layer_data.products + for product in products: + if product.productName == aov: + colorspace = product.colorspace + break + rep = { "name": ext, "ext": ext, @@ -526,7 +565,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # If expectedFile are absolute, we need only filenames "stagingDir": staging, "fps": new_instance.get("fps"), - "tags": ["review"] if preview else [] + "tags": ["review"] if preview else [], + "colorspaceData": { + "colorspace": colorspace, + "config": { + "path": additional_data["colorspaceConfig"], + "template": additional_data["colorspaceTemplate"] + }, + "display": additional_data["display"], + "view": additional_data["view"] + } } # support conversion from tiled to scanline @@ -550,7 +598,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.debug("instances:{}".format(instances)) return instances - def _get_representations(self, instance, exp_files): + def _get_representations(self, instance, exp_files, do_not_add_review): """Create representations for file sequences. This will return representations of expected files if they are not @@ -561,6 +609,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instance (dict): instance data for which we are setting representations exp_files (list): list of expected files + do_not_add_review (bool): explicitly skip review Returns: list of representations @@ -570,7 +619,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): host_name = os.environ.get("AVALON_APP", "") collections, remainders = clique.assemble(exp_files) - # create representation for every collected sequento ce + # create representation for every collected sequence for collection in collections: ext = collection.tail.lstrip(".") preview = False @@ -582,6 +631,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if instance["useSequenceForReview"]: # toggle preview on if multipart is on if instance.get("multipartExr", False): + self.log.debug( + "Adding preview tag because its multipartExr" + ) preview = True else: render_file_name = list(collection)[0] @@ -607,6 +659,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if instance.get("slate"): frame_start -= 1 + preview = preview and not do_not_add_review rep = { "name": ext, "ext": ext, @@ -635,7 +688,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self._solve_families(instance, preview) - # add reminders as representations + # add remainders as representations for remainder in remainders: ext = remainder.split(".")[-1] @@ -655,12 +708,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "name": ext, "ext": ext, "files": os.path.basename(remainder), - "stagingDir": os.path.dirname(remainder), + "stagingDir": staging, } preview = match_aov_pattern( host_name, self.aov_filter, remainder ) + preview = preview and not do_not_add_review if preview: rep.update({ "fps": instance.get("fps"), @@ -689,8 +743,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if preview: if "ftrack" not in families: if os.environ.get("FTRACK_SERVER"): + self.log.debug( + "Adding \"ftrack\" to families because of preview tag." + ) families.append("ftrack") if "review" not in families: + self.log.debug( + "Adding \"review\" to families because of preview tag." + ) families.append("review") instance["families"] = families @@ -698,7 +758,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # type: (pyblish.api.Instance) -> None """Process plugin. - Detect type of renderfarm submission and create and post dependend job + Detect type of renderfarm submission and create and post dependent job in case of Deadline. It creates json file with metadata needed for publishing in directory of render. @@ -706,6 +766,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instance (pyblish.api.Instance): Instance data. """ + if not instance.data.get("farm"): + self.log.debug("Skipping local instance.") + return + data = instance.data.copy() context = instance.context self.context = context @@ -761,13 +825,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ).format(source)) family = "render" - if "prerender" in instance.data["families"]: + if ("prerender" in instance.data["families"] or + "prerender.farm" in instance.data["families"]): family = "prerender" families = [family] # pass review to families if marked as review + do_not_add_review = False if data.get("review"): families.append("review") + elif data.get("review") == False: + self.log.debug("Instance has review explicitly disabled.") + do_not_add_review = True instance_skeleton_data = { "family": family, @@ -890,6 +959,28 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # we cannot attach AOVs to other subsets as we consider every # AOV subset of its own. + additional_data = { + "renderProducts": instance.data["renderProducts"], + "colorspaceConfig": instance.data["colorspaceConfig"], + "display": instance.data["colorspaceDisplay"], + "view": instance.data["colorspaceView"] + } + + # Get templated path from absolute config path. + anatomy = instance.context.data["anatomy"] + colorspaceTemplate = instance.data["colorspaceConfig"] + success, rootless_staging_dir = ( + anatomy.find_root_template_from_path(colorspaceTemplate) + ) + if success: + colorspaceTemplate = rootless_staging_dir + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(colorspaceTemplate)) + additional_data["colorspaceTemplate"] = colorspaceTemplate + if len(data.get("attachTo")) > 0: assert len(data.get("expectedFiles")[0].keys()) == 1, ( "attaching multiple AOVs or renderable cameras to " @@ -900,7 +991,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # there are multiple renderable cameras in scene) instances = self._create_instances_for_aov( instance_skeleton_data, - data.get("expectedFiles")) + data.get("expectedFiles"), + additional_data, + do_not_add_review + ) self.log.info("got {} instance{}".format( len(instances), "s" if len(instances) > 1 else "")) @@ -908,7 +1002,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: representations = self._get_representations( instance_skeleton_data, - data.get("expectedFiles") + data.get("expectedFiles"), + do_not_add_review ) if "representations" not in instance_skeleton_data.keys(): @@ -919,7 +1014,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instances = [instance_skeleton_data] # if we are attaching to other subsets, create copy of existing - # instances, change data to match thats subset and replace + # instances, change data to match its subset and replace # existing instances with modified data if instance.data.get("attachTo"): self.log.info("Attaching render to subset:") @@ -949,6 +1044,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ''' render_job = None + submission_type = "" if instance.data.get("toBeRenderedOn") == "deadline": render_job = data.pop("deadlineSubmissionJob", None) submission_type = "deadline" @@ -996,7 +1092,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.deadline_url = instance.data.get("deadlineUrl") assert self.deadline_url, "Requires Deadline Webservice URL" - self._submit_deadline_post_job(instance, render_job, instances) + deadline_publish_job_id = \ + self._submit_deadline_post_job(instance, render_job, instances) + + # Inject deadline url to instances. + for inst in instances: + inst["deadlineUrl"] = self.deadline_url # publish job file publish_job = { @@ -1014,6 +1115,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "instances": instances } + if deadline_publish_job_id: + publish_job["deadline_publish_job_id"] = deadline_publish_job_id + # add audio to metadata file if available audio_file = context.data.get("audioFile") if audio_file and os.path.isfile(audio_file): @@ -1028,7 +1132,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } publish_job.update({"ftrack": ftrack}) - metadata_path, roothless_metadata_path = self._create_metadata_path( + metadata_path, rootless_metadata_path = self._create_metadata_path( instance) self.log.info("Writing json file: {}".format(metadata_path)) @@ -1119,10 +1223,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): template_data["family"] = "render" template_data["version"] = version - anatomy_filled = anatomy.format(template_data) - - if "folder" in anatomy.templates["render"]: - publish_folder = anatomy_filled["render"]["folder"] + render_templates = anatomy.templates_obj["render"] + if "folder" in render_templates: + publish_folder = render_templates["folder"].format_strict( + template_data + ) else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy @@ -1132,8 +1237,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " key underneath `publish` (in global of for project `{}`)." ).format(project_name)) - file_path = anatomy_filled["render"]["path"] - # Directory + file_path = render_templates["path"].format_strict(template_data) publish_folder = os.path.dirname(file_path) return publish_folder diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py index 78eed17c98..e1c0595830 100644 --- a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py +++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -17,10 +17,18 @@ class ValidateDeadlinePools(OptionalPyblishPluginMixin, label = "Validate Deadline Pools" order = pyblish.api.ValidatorOrder - families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + families = ["rendering", + "render.farm", + "renderFarm", + "renderlayer", + "maxrender"] optional = True def process(self, instance): + if not instance.data.get("farm"): + self.log.debug("Skipping local instance.") + return + # get default deadline webservice url from deadline module deadline_url = instance.context.data["defaultDeadline"] self.log.info("deadline_url::{}".format(deadline_url)) diff --git a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index f0a3ddd246..ff4be677e7 100644 --- a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -68,8 +68,15 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin): # files to be in the folder that we might not want to use. missing = expected_files - existing_files if missing: - raise RuntimeError("Missing expected files: {}".format( - sorted(missing))) + raise RuntimeError( + "Missing expected files: {}\n" + "Expected files: {}\n" + "Existing files: {}".format( + sorted(missing), + sorted(expected_files), + sorted(existing_files) + ) + ) def _get_frame_list(self, original_job_id): """Returns list of frame ranges from all render job. @@ -91,7 +98,7 @@ class ValidateExpectedFiles(pyblish.api.InstancePlugin): for job_id in render_job_ids: job_info = self._get_job_info(job_id) - frame_list = job_info["Props"]["Frames"] + frame_list = job_info["Props"].get("Frames") if frame_list: all_frame_lists.extend(frame_list.split(',')) diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index 40193bac71..15226bb773 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -1,3 +1,4 @@ +# /usr/bin/env python3 # -*- coding: utf-8 -*- import os import tempfile @@ -35,7 +36,7 @@ class OpenPypeVersion: self.prerelease = prerelease is_valid = True - if not major or not minor or not patch: + if major is None or minor is None or patch is None: is_valid = False self.is_valid = is_valid @@ -157,7 +158,7 @@ def get_openpype_version_from_path(path, build=True): # fix path for application bundle on macos if platform.system().lower() == "darwin": - path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + path = os.path.join(path, "MacOS") version_file = os.path.join(path, "openpype", "version.py") if not os.path.isfile(version_file): @@ -189,6 +190,11 @@ def get_openpype_executable(): exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "") dir_list = config.GetConfigEntryWithDefault( "OpenPypeInstallationDirs", "") + + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + exe_list = exe_list.replace("\\ ", " ") + dir_list = dir_list.replace("\\ ", " ") return exe_list, dir_list @@ -196,19 +202,21 @@ def get_openpype_versions(dir_list): print(">>> Getting OpenPype executable ...") openpype_versions = [] - install_dir = DirectoryUtils.SearchDirectoryList(dir_list) - if install_dir: - print("--- Looking for OpenPype at: {}".format(install_dir)) - sub_dirs = [ - f.path for f in os.scandir(install_dir) - if f.is_dir() - ] - for subdir in sub_dirs: - version = get_openpype_version_from_path(subdir) - if not version: - continue - print(" - found: {} - {}".format(version, subdir)) - openpype_versions.append((version, subdir)) + # special case of multiple install dirs + for dir_list in dir_list.split(","): + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print("--- Looking for OpenPype at: {}".format(install_dir)) + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(" - found: {} - {}".format(version, subdir)) + openpype_versions.append((version, subdir)) return openpype_versions @@ -218,8 +226,8 @@ def get_requested_openpype_executable( requested_version_obj = OpenPypeVersion.from_string(requested_version) if not requested_version_obj: print(( - ">>> Requested version does not match version regex \"{}\"" - ).format(VERSION_REGEX)) + ">>> Requested version '{}' does not match version regex '{}'" + ).format(requested_version, VERSION_REGEX)) return None print(( @@ -272,7 +280,8 @@ def get_requested_openpype_executable( # Deadline decide. exe_list = [ os.path.join(version_dir, "openpype_console.exe"), - os.path.join(version_dir, "openpype_console") + os.path.join(version_dir, "openpype_console"), + os.path.join(version_dir, "MacOS", "openpype_console") ] return FileUtils.SearchFileList(";".join(exe_list)) @@ -333,10 +342,13 @@ def inject_openpype_environment(deadlinePlugin): "app": job.GetJobEnvironmentKeyValue("AVALON_APP_NAME"), "envgroup": "farm" } + + if job.GetJobEnvironmentKeyValue('IS_TEST'): + args.append("--automatic-tests") + if all(add_kwargs.values()): for key, value in add_kwargs.items(): args.extend(["--{}".format(key), value]) - else: raise RuntimeError(( "Missing required env vars: AVALON_PROJECT, AVALON_ASSET," @@ -350,11 +362,11 @@ def inject_openpype_environment(deadlinePlugin): args_str = subprocess.list2cmdline(args) print(">>> Executing: {} {}".format(exe, args_str)) - process = ProcessUtils.SpawnProcess( - exe, args_str, os.path.dirname(exe) + process_exitcode = deadlinePlugin.RunProcess( + exe, args_str, os.path.dirname(exe), -1 ) - ProcessUtils.WaitForExit(process, -1) - if process.ExitCode != 0: + + if process_exitcode != 0: raise RuntimeError( "Failed to run OpenPype process to extract environments." ) diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index 6b0f69d98f..6e1b973fb9 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -73,7 +73,7 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): """ # fix path for application bundle on macos if platform.system().lower() == "darwin": - path = os.path.join(path, "Contents", "MacOS", "lib", "Python") + path = os.path.join(path, "MacOS") version_file = os.path.join(path, "openpype", "version.py") if not os.path.isfile(version_file): @@ -107,19 +107,28 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): "Scanning for compatible requested " f"version {requested_version}")) dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") - install_dir = DirectoryUtils.SearchDirectoryList(dir_list) - if dir: - sub_dirs = [ - f.path for f in os.scandir(install_dir) - if f.is_dir() - ] - for subdir in sub_dirs: - version = self.get_openpype_version_from_path(subdir) - if not version: - continue - openpype_versions.append((version, subdir)) + + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + dir_list = dir_list.replace("\\ ", " ") + + for dir_list in dir_list.split(","): + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = self.get_openpype_version_from_path(subdir) + if not version: + continue + openpype_versions.append((version, subdir)) exe_list = self.GetConfigEntry("OpenPypeExecutable") + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + exe_list = exe_list.replace("\\ ", " ") exe = FileUtils.SearchFileList(exe_list) if openpype_versions: # if looking for requested compatible version, @@ -161,7 +170,9 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): os.path.join( compatible_versions[-1][1], "openpype_console.exe"), os.path.join( - compatible_versions[-1][1], "openpype_console") + compatible_versions[-1][1], "openpype_console"), + os.path.join( + compatible_versions[-1][1], "MacOS", "openpype_console") ] exe = FileUtils.SearchFileList(";".join(exe_list)) diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py index 625a3f1a28..b51daffbc8 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py @@ -16,6 +16,10 @@ from Deadline.Scripting import ( FileUtils, RepositoryUtils, SystemUtils) +version_major = 1 +version_minor = 0 +version_patch = 0 +version_string = "{}.{}.{}".format(version_major, version_minor, version_patch) STRING_TAGS = { "format" } @@ -204,10 +208,10 @@ def info_about_input(oiiotool_path, filepath): _stdout, _stderr = popen.communicate() output = "" if _stdout: - output += _stdout.decode("utf-8") + output += _stdout.decode("utf-8", errors="backslashreplace") if _stderr: - output += _stderr.decode("utf-8") + output += _stderr.decode("utf-8", errors="backslashreplace") output = output.replace("\r\n", "\n") xml_started = False @@ -264,6 +268,7 @@ class OpenPypeTileAssembler(DeadlinePlugin): def initialize_process(self): """Initialization.""" + self.LogInfo("Plugin version: {}".format(version_string)) self.SingleFramesOnly = True self.StdoutHandling = True self.renderer = self.GetPluginInfoEntryWithDefault( @@ -320,12 +325,7 @@ class OpenPypeTileAssembler(DeadlinePlugin): output_file = data["ImageFileName"] output_file = RepositoryUtils.CheckPathMapping(output_file) output_file = self.process_path(output_file) - """ - _, ext = os.path.splitext(output_file) - if "exr" not in ext: - self.FailRender( - "[{}] Only EXR format is supported for now.".format(ext)) - """ + tile_info = [] for tile in range(int(data["TileCount"])): tile_info.append({ @@ -336,11 +336,6 @@ class OpenPypeTileAssembler(DeadlinePlugin): "width": int(data["Tile{}Width".format(tile)]) }) - # FFMpeg doesn't support tile coordinates at the moment. - # arguments = self.tile_completer_ffmpeg_args( - # int(data["ImageWidth"]), int(data["ImageHeight"]), - # tile_info, output_file) - arguments = self.tile_oiio_args( int(data["ImageWidth"]), int(data["ImageHeight"]), tile_info, output_file) @@ -362,20 +357,20 @@ class OpenPypeTileAssembler(DeadlinePlugin): def pre_render_tasks(self): """Load config file and do remapping.""" self.LogInfo("OpenPype Tile Assembler starting...") - scene_filename = self.GetDataFilename() + config_file = self.GetPluginInfoEntry("ConfigFile") temp_scene_directory = self.CreateTempDirectory( "thread" + str(self.GetThreadNumber())) - temp_scene_filename = Path.GetFileName(scene_filename) + temp_scene_filename = Path.GetFileName(config_file) self.config_file = Path.Combine( temp_scene_directory, temp_scene_filename) if SystemUtils.IsRunningOnWindows(): RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator( - scene_filename, self.config_file, "/", "\\") + config_file, self.config_file, "/", "\\") else: RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator( - scene_filename, self.config_file, "\\", "/") + config_file, self.config_file, "\\", "/") os.chmod(self.config_file, os.stat(self.config_file).st_mode) def post_render_tasks(self): @@ -459,75 +454,3 @@ class OpenPypeTileAssembler(DeadlinePlugin): args.append(output_path) return args - - def tile_completer_ffmpeg_args( - self, output_width, output_height, tiles_info, output_path): - """Generate ffmpeg arguments for tile assembly. - - Expected inputs are tiled images. - - Args: - output_width (int): Width of output image. - output_height (int): Height of output image. - tiles_info (list): List of tile items, each item must be - dictionary with `filepath`, `pos_x` and `pos_y` keys - representing path to file and x, y coordinates on output - image where top-left point of tile item should start. - output_path (str): Path to file where should be output stored. - - Returns: - (list): ffmpeg arguments. - - """ - previous_name = "base" - ffmpeg_args = [] - filter_complex_strs = [] - - filter_complex_strs.append("nullsrc=size={}x{}[{}]".format( - output_width, output_height, previous_name - )) - - new_tiles_info = {} - for idx, tile_info in enumerate(tiles_info): - # Add input and store input index - filepath = tile_info["filepath"] - ffmpeg_args.append("-i \"{}\"".format(filepath.replace("\\", "/"))) - - # Prepare initial filter complex arguments - index_name = "input{}".format(idx) - filter_complex_strs.append( - "[{}]setpts=PTS-STARTPTS[{}]".format(idx, index_name) - ) - tile_info["index"] = idx - new_tiles_info[index_name] = tile_info - - # Set frames to 1 - ffmpeg_args.append("-frames 1") - - # Concatenation filter complex arguments - global_index = 1 - total_index = len(new_tiles_info) - for index_name, tile_info in new_tiles_info.items(): - item_str = ( - "[{previous_name}][{index_name}]overlay={pos_x}:{pos_y}" - ).format( - previous_name=previous_name, - index_name=index_name, - pos_x=tile_info["pos_x"], - pos_y=tile_info["pos_y"] - ) - new_previous = "tmp{}".format(global_index) - if global_index != total_index: - item_str += "[{}]".format(new_previous) - filter_complex_strs.append(item_str) - previous_name = new_previous - global_index += 1 - - joined_parts = ";".join(filter_complex_strs) - filter_complex_str = "-filter_complex \"{}\"".format(joined_parts) - - ffmpeg_args.append(filter_complex_str) - ffmpeg_args.append("-y") - ffmpeg_args.append("\"{}\"".format(output_path)) - - return ffmpeg_args diff --git a/openpype/modules/example_addons/example_addon/addon.py b/openpype/modules/example_addons/example_addon/addon.py index ead647b41d..be1d3ff920 100644 --- a/openpype/modules/example_addons/example_addon/addon.py +++ b/openpype/modules/example_addons/example_addon/addon.py @@ -44,7 +44,7 @@ class AddonSettingsDef(JsonFilesSettingsDef): class ExampleAddon(OpenPypeAddOn, IPluginPaths, ITrayAction): - """This Addon has defined it's settings and interface. + """This Addon has defined its settings and interface. This example has system settings with an enabled option. And use few other interfaces: diff --git a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py index 1ad7a17785..333228c699 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py +++ b/openpype/modules/ftrack/event_handlers_server/action_clone_review_session.py @@ -44,7 +44,7 @@ def clone_review_session(session, entity): class CloneReviewSession(ServerAction): '''Generate Client Review action - `label` a descriptive string identifing your action. + `label` a descriptive string identifying your action. `varaint` To group actions together, give them the same label and specify a unique variant per action. `identifier` a unique identifier for your action. diff --git a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py index 21382007a0..42a279e333 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py +++ b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py @@ -230,7 +230,7 @@ class CreateDailyReviewSessionServerAction(ServerAction): if not today_session_name: continue - # Find matchin review session + # Find matching review session project_review_sessions = review_sessions_by_project_id[project_id] todays_session = None yesterdays_session = None diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 332648cd02..02231cbe3c 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -124,7 +124,7 @@ class PrepareProjectServer(ServerAction): root_items.append({ "type": "label", "value": ( - "

NOTE: Roots are crutial for path filling" + "

NOTE: Roots are crucial for path filling" " (and creating folder structure).

" ) }) diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py index 1209375f82..a698195c59 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py @@ -9,7 +9,7 @@ from openpype_modules.ftrack.lib import ( class PushHierValuesToNonHier(ServerAction): - """Action push hierarchical custom attribute values to non hierarchical. + """Action push hierarchical custom attribute values to non-hierarchical. Hierarchical value is also pushed to their task entities. @@ -119,17 +119,109 @@ class PushHierValuesToNonHier(ServerAction): self.join_query_keys(object_ids) )).all() - output = {} + attrs_by_obj_id = collections.defaultdict(list) hiearchical = [] for attr in attrs: if attr["is_hierarchical"]: hiearchical.append(attr) continue obj_id = attr["object_type_id"] - if obj_id not in output: - output[obj_id] = [] - output[obj_id].append(attr) - return output, hiearchical + attrs_by_obj_id[obj_id].append(attr) + return attrs_by_obj_id, hiearchical + + def query_attr_value( + self, + session, + hier_attrs, + attrs_by_obj_id, + dst_object_type_ids, + task_entity_ids, + non_task_entity_ids, + parent_id_by_entity_id + ): + all_non_task_ids_with_parents = set() + for entity_id in non_task_entity_ids: + all_non_task_ids_with_parents.add(entity_id) + _entity_id = entity_id + while True: + parent_id = parent_id_by_entity_id.get(_entity_id) + if ( + parent_id is None + or parent_id in all_non_task_ids_with_parents + ): + break + all_non_task_ids_with_parents.add(parent_id) + _entity_id = parent_id + + all_entity_ids = ( + set(all_non_task_ids_with_parents) + | set(task_entity_ids) + ) + attr_ids = {attr["id"] for attr in hier_attrs} + for obj_id in dst_object_type_ids: + attrs = attrs_by_obj_id.get(obj_id) + if attrs is not None: + for attr in attrs: + attr_ids.add(attr["id"]) + + real_values_by_entity_id = { + entity_id: {} + for entity_id in all_entity_ids + } + + attr_values = query_custom_attributes( + session, attr_ids, all_entity_ids, True + ) + for item in attr_values: + entity_id = item["entity_id"] + attr_id = item["configuration_id"] + real_values_by_entity_id[entity_id][attr_id] = item["value"] + + # Fill hierarchical values + hier_attrs_key_by_id = { + hier_attr["id"]: hier_attr + for hier_attr in hier_attrs + } + hier_values_per_entity_id = {} + for entity_id in all_non_task_ids_with_parents: + real_values = real_values_by_entity_id[entity_id] + hier_values_per_entity_id[entity_id] = {} + for attr_id, attr in hier_attrs_key_by_id.items(): + key = attr["key"] + hier_values_per_entity_id[entity_id][key] = ( + real_values.get(attr_id) + ) + + output = {} + for entity_id in non_task_entity_ids: + output[entity_id] = {} + for attr in hier_attrs_key_by_id.values(): + key = attr["key"] + value = hier_values_per_entity_id[entity_id][key] + tried_ids = set() + if value is None: + tried_ids.add(entity_id) + _entity_id = entity_id + while value is None: + parent_id = parent_id_by_entity_id.get(_entity_id) + if not parent_id: + break + value = hier_values_per_entity_id[parent_id][key] + if value is not None: + break + _entity_id = parent_id + tried_ids.add(parent_id) + + if value is None: + value = attr["default"] + + if value is not None: + for ent_id in tried_ids: + hier_values_per_entity_id[ent_id][key] = value + + output[entity_id][key] = value + + return real_values_by_entity_id, output def propagate_values(self, session, event, selected_entities): ftrack_settings = self.get_ftrack_settings( @@ -156,29 +248,24 @@ class PushHierValuesToNonHier(ServerAction): } task_object_type = object_types_by_low_name["task"] - destination_object_types = [task_object_type] + dst_object_type_ids = {task_object_type["id"]} for ent_type in interest_entity_types: obj_type = object_types_by_low_name.get(ent_type) - if obj_type and obj_type not in destination_object_types: - destination_object_types.append(obj_type) - - destination_object_type_ids = set( - obj_type["id"] - for obj_type in destination_object_types - ) + if obj_type: + dst_object_type_ids.add(obj_type["id"]) interest_attributes = action_settings["interest_attributes"] # Find custom attributes definitions attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, destination_object_type_ids, interest_attributes + session, dst_object_type_ids, interest_attributes ) # Filter destination object types if they have any object specific # custom attribute - for obj_id in tuple(destination_object_type_ids): + for obj_id in tuple(dst_object_type_ids): if obj_id not in attrs_by_obj_id: - destination_object_type_ids.remove(obj_id) + dst_object_type_ids.remove(obj_id) - if not destination_object_type_ids: + if not dst_object_type_ids: # TODO report that there are not matching custom attributes return { "success": True, @@ -192,14 +279,14 @@ class PushHierValuesToNonHier(ServerAction): session, selected_ids, project_entity, - destination_object_type_ids + dst_object_type_ids ) self.log.debug("Preparing whole project hierarchy by ids.") entities_by_obj_id = { obj_id: [] - for obj_id in destination_object_type_ids + for obj_id in dst_object_type_ids } self.log.debug("Filtering Task entities.") @@ -223,10 +310,16 @@ class PushHierValuesToNonHier(ServerAction): "message": "Nothing to do in your selection." } - self.log.debug("Getting Hierarchical custom attribute values parents.") - hier_values_by_entity_id = self.get_hier_values( + self.log.debug("Getting Custom attribute values.") + ( + real_values_by_entity_id, + hier_values_by_entity_id + ) = self.query_attr_value( session, hier_attrs, + attrs_by_obj_id, + dst_object_type_ids, + task_entity_ids, non_task_entity_ids, parent_id_by_entity_id ) @@ -237,7 +330,8 @@ class PushHierValuesToNonHier(ServerAction): hier_attrs, task_entity_ids, hier_values_by_entity_id, - parent_id_by_entity_id + parent_id_by_entity_id, + real_values_by_entity_id ) self.log.debug("Setting values to entities themselves.") @@ -245,7 +339,8 @@ class PushHierValuesToNonHier(ServerAction): session, entities_by_obj_id, attrs_by_obj_id, - hier_values_by_entity_id + hier_values_by_entity_id, + real_values_by_entity_id ) return True @@ -322,112 +417,64 @@ class PushHierValuesToNonHier(ServerAction): return parent_id_by_entity_id, filtered_entities - def get_hier_values( - self, - session, - hier_attrs, - focus_entity_ids, - parent_id_by_entity_id - ): - all_ids_with_parents = set() - for entity_id in focus_entity_ids: - all_ids_with_parents.add(entity_id) - _entity_id = entity_id - while True: - parent_id = parent_id_by_entity_id.get(_entity_id) - if ( - not parent_id - or parent_id in all_ids_with_parents - ): - break - all_ids_with_parents.add(parent_id) - _entity_id = parent_id - - hier_attr_ids = tuple(hier_attr["id"] for hier_attr in hier_attrs) - hier_attrs_key_by_id = { - hier_attr["id"]: hier_attr["key"] - for hier_attr in hier_attrs - } - - values_per_entity_id = {} - for entity_id in all_ids_with_parents: - values_per_entity_id[entity_id] = {} - for key in hier_attrs_key_by_id.values(): - values_per_entity_id[entity_id][key] = None - - values = query_custom_attributes( - session, hier_attr_ids, all_ids_with_parents, True - ) - for item in values: - entity_id = item["entity_id"] - key = hier_attrs_key_by_id[item["configuration_id"]] - - values_per_entity_id[entity_id][key] = item["value"] - - output = {} - for entity_id in focus_entity_ids: - output[entity_id] = {} - for key in hier_attrs_key_by_id.values(): - value = values_per_entity_id[entity_id][key] - tried_ids = set() - if value is None: - tried_ids.add(entity_id) - _entity_id = entity_id - while value is None: - parent_id = parent_id_by_entity_id.get(_entity_id) - if not parent_id: - break - value = values_per_entity_id[parent_id][key] - if value is not None: - break - _entity_id = parent_id - tried_ids.add(parent_id) - - if value is not None: - for ent_id in tried_ids: - values_per_entity_id[ent_id][key] = value - - output[entity_id][key] = value - return output - def set_task_attr_values( self, session, hier_attrs, task_entity_ids, hier_values_by_entity_id, - parent_id_by_entity_id + parent_id_by_entity_id, + real_values_by_entity_id ): hier_attr_id_by_key = { attr["key"]: attr["id"] for attr in hier_attrs } + filtered_task_ids = set() for task_id in task_entity_ids: - parent_id = parent_id_by_entity_id.get(task_id) or {} + parent_id = parent_id_by_entity_id.get(task_id) parent_values = hier_values_by_entity_id.get(parent_id) - if not parent_values: - continue + if parent_values: + filtered_task_ids.add(task_id) + if not filtered_task_ids: + return + + for task_id in filtered_task_ids: + parent_id = parent_id_by_entity_id[task_id] + parent_values = hier_values_by_entity_id[parent_id] hier_values_by_entity_id[task_id] = {} + real_task_attr_values = real_values_by_entity_id[task_id] for key, value in parent_values.items(): hier_values_by_entity_id[task_id][key] = value + if value is None: + continue + configuration_id = hier_attr_id_by_key[key] _entity_key = collections.OrderedDict([ ("configuration_id", configuration_id), ("entity_id", task_id) ]) - - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + op = None + if configuration_id not in real_task_attr_values: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + _entity_key, + {"value": value} + ) + elif real_task_attr_values[configuration_id] != value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", _entity_key, "value", - ftrack_api.symbol.NOT_SET, + real_task_attr_values[configuration_id], value ) - ) - if len(session.recorded_operations) > 100: - session.commit() + + if op is not None: + session.recorded_operations.push(op) + if len(session.recorded_operations) > 100: + session.commit() session.commit() @@ -436,39 +483,68 @@ class PushHierValuesToNonHier(ServerAction): session, entities_by_obj_id, attrs_by_obj_id, - hier_values_by_entity_id + hier_values_by_entity_id, + real_values_by_entity_id ): + """Push values from hierarchical custom attributes to non-hierarchical. + + Args: + session (ftrack_api.Sessison): Session which queried entities, + values and which is used for change propagation. + entities_by_obj_id (dict[str, list[str]]): TypedContext + ftrack entity ids where the attributes are propagated by their + object ids. + attrs_by_obj_id (dict[str, ftrack_api.Entity]): Objects of + 'CustomAttributeConfiguration' by their ids. + hier_values_by_entity_id (doc[str, dict[str, Any]]): Attribute + values by entity id and by their keys. + real_values_by_entity_id (doc[str, dict[str, Any]]): Real attribute + values of entities. + """ + for object_id, entity_ids in entities_by_obj_id.items(): attrs = attrs_by_obj_id.get(object_id) if not attrs or not entity_ids: continue - for attr in attrs: - for entity_id in entity_ids: - value = ( - hier_values_by_entity_id - .get(entity_id, {}) - .get(attr["key"]) - ) + for entity_id in entity_ids: + real_values = real_values_by_entity_id.get(entity_id) + hier_values = hier_values_by_entity_id.get(entity_id) + if hier_values is None: + continue + + for attr in attrs: + attr_id = attr["id"] + attr_key = attr["key"] + value = hier_values.get(attr_key) if value is None: continue _entity_key = collections.OrderedDict([ - ("configuration_id", attr["id"]), + ("configuration_id", attr_id), ("entity_id", entity_id) ]) - session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + op = None + if attr_id not in real_values: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + _entity_key, + {"value": value} + ) + elif real_values[attr_id] != value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", _entity_key, "value", - ftrack_api.symbol.NOT_SET, + real_values[attr_id], value ) - ) - if len(session.recorded_operations) > 100: - session.commit() + + if op is not None: + session.recorded_operations.push(op) + if len(session.recorded_operations) > 100: + session.commit() session.commit() diff --git a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py index d160b7200d..f6899843a3 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py +++ b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py @@ -12,7 +12,7 @@ from openpype_modules.ftrack.lib.avalon_sync import create_chunks class TransferHierarchicalValues(ServerAction): - """Transfer values across hierarhcical attributes. + """Transfer values across hierarchical attributes. Aalso gives ability to convert types meanwhile. That is limited to conversions between numbers and strings @@ -67,7 +67,7 @@ class TransferHierarchicalValues(ServerAction): "type": "label", "value": ( "Didn't found custom attributes" - " that can be transfered." + " that can be transferred." ) }] } diff --git a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py b/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py index a65ae46545..a100c34f67 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py +++ b/openpype/modules/ftrack/event_handlers_server/event_next_task_update.py @@ -279,7 +279,7 @@ class NextTaskUpdate(BaseEvent): except Exception: session.rollback() self.log.warning( - "\"{}\" status couldnt be set to \"{}\"".format( + "\"{}\" status couldn't be set to \"{}\"".format( ent_path, new_status["name"] ), exc_info=True diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index dc76920a57..ed630ad59d 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,6 +1,6 @@ import collections -import datetime import copy +from typing import Any import ftrack_api from openpype_modules.ftrack.lib import ( @@ -9,13 +9,30 @@ from openpype_modules.ftrack.lib import ( ) -class PushFrameValuesToTaskEvent(BaseEvent): +class PushHierValuesToNonHierEvent(BaseEvent): + """Push value changes between hierarchical and non-hierarchical attributes. + + Changes of non-hierarchical attributes are pushed to hierarchical and back. + The attributes must have same definition of custom attribute. + + Handler does not handle changes of hierarchical parents. So if entity does + not have explicitly set value of hierarchical attribute and any parent + would change it the change would not be propagated. + + The handler also push the value to task entity on task creation + and movement. To push values between hierarchical & non-hierarchical + add 'Task' to entity types in settings. + + Todos: + Task attribute values push on create/move should be possible to + enabled by settings. + """ + # Ignore event handler by default cust_attrs_query = ( "select id, key, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" - " where key in ({}) and" - " (object_type_id in ({}) or is_hierarchical is true)" + " where key in ({})" ) _cached_task_object_id = None @@ -26,35 +43,35 @@ class PushFrameValuesToTaskEvent(BaseEvent): settings_key = "sync_hier_entity_attributes" - def session_user_id(self, session): - if self._cached_user_id is None: - user = session.query( - "User where username is \"{}\"".format(session.api_user) - ).one() - self._cached_user_id = user["id"] - return self._cached_user_id + def filter_entities_info( + self, event: ftrack_api.event.base.Event + ) -> dict[str, list[dict[str, Any]]]: + """Basic entities filter info we care about. - def launch(self, session, event): - filtered_entities_info = self.filter_entities_info(event) - if not filtered_entities_info: - return + This filtering is first of many filters. This does not query anything + from ftrack nor use settings. - for project_id, entities_info in filtered_entities_info.items(): - self.process_by_project(session, event, project_id, entities_info) + Args: + event (ftrack_api.event.base.Event): Ftrack event with update + information. + + Returns: + dict[str, list[dict[str, Any]]]: Filtered entity changes by + project id. + """ - def filter_entities_info(self, event): # Filter if event contain relevant data entities_info = event["data"].get("entities") if not entities_info: return - entities_info_by_project_id = {} + entities_info_by_project_id = collections.defaultdict(list) for entity_info in entities_info: - # Care only about tasks - if entity_info.get("entityType") != "task": + # Ignore removed entities + if entity_info.get("action") == "remove": continue - # Care only about changes of status + # Care only about information with changes of entities changes = entity_info.get("changes") if not changes: continue @@ -69,367 +86,287 @@ class PushFrameValuesToTaskEvent(BaseEvent): if project_id is None: continue - # Skip `Task` entity type if parent didn't change - if entity_info["entity_type"].lower() == "task": - if ( - "parent_id" not in changes - or changes["parent_id"]["new"] is None - ): - continue - - if project_id not in entities_info_by_project_id: - entities_info_by_project_id[project_id] = [] entities_info_by_project_id[project_id].append(entity_info) return entities_info_by_project_id - def process_by_project(self, session, event, project_id, entities_info): - project_name = self.get_project_name_from_event( + def _get_attrs_configurations(self, session, interest_attributes): + """Get custom attribute configurations by name. + + Args: + session (ftrack_api.Session): Ftrack sesson. + interest_attributes (list[str]): Names of custom attributes + that should be synchronized. + + Returns: + tuple[dict[str, list], list]: Attributes by object id and + hierarchical attributes. + """ + + attrs = session.query(self.cust_attrs_query.format( + self.join_query_keys(interest_attributes) + )).all() + + attrs_by_obj_id = collections.defaultdict(list) + hier_attrs = [] + for attr in attrs: + if attr["is_hierarchical"]: + hier_attrs.append(attr) + continue + obj_id = attr["object_type_id"] + attrs_by_obj_id[obj_id].append(attr) + return attrs_by_obj_id, hier_attrs + + def _get_handler_project_settings( + self, + session: ftrack_api.Session, + event: ftrack_api.event.base.Event, + project_id: str + ) -> tuple[set[str], set[str]]: + """Get handler settings based on the project. + + Args: + session (ftrack_api.Session): Ftrack session. + event (ftrack_api.event.base.Event): Ftrack event which triggered + the changes. + project_id (str): Project id where the current changes are handled. + + Returns: + tuple[set[str], set[str]]: Attribute names we care about and + entity types we care about. + """ + + project_name: str = self.get_project_name_from_event( session, event, project_id ) # Load settings - project_settings = self.get_project_settings_from_event( - event, project_name + project_settings: dict[str, Any] = ( + self.get_project_settings_from_event(event, project_name) ) # Load status mapping from presets - event_settings = ( + event_settings: dict[str, Any] = ( project_settings ["ftrack"] ["events"] - ["sync_hier_entity_attributes"] + [self.settings_key] ) # Skip if event is not enabled if not event_settings["enabled"]: self.log.debug("Project \"{}\" has disabled {}".format( project_name, self.__class__.__name__ )) - return + return set(), set() - interest_attributes = event_settings["interest_attributes"] + interest_attributes: list[str] = event_settings["interest_attributes"] if not interest_attributes: self.log.info(( "Project \"{}\" does not have filled 'interest_attributes'," " skipping." )) - return - interest_entity_types = event_settings["interest_entity_types"] + + interest_entity_types: list[str] = ( + event_settings["interest_entity_types"]) if not interest_entity_types: self.log.info(( "Project \"{}\" does not have filled 'interest_entity_types'," " skipping." )) - return - interest_attributes = set(interest_attributes) - interest_entity_types = set(interest_entity_types) + # Unify possible issues from settings ('Asset Build' -> 'assetbuild') + interest_entity_types: set[str] = { + entity_type.replace(" ", "").lower() + for entity_type in interest_entity_types + } + return set(interest_attributes), interest_entity_types - # Separate value changes and task parent changes - _entities_info = [] - added_entities = [] - added_entity_ids = set() - task_parent_changes = [] + def _entities_filter_by_settings( + self, + entities_info: list[dict[str, Any]], + interest_attributes: set[str], + interest_entity_types: set[str] + ): + new_entities_info = [] for entity_info in entities_info: - if entity_info["entity_type"].lower() == "task": - task_parent_changes.append(entity_info) - elif entity_info.get("action") == "add": - added_entities.append(entity_info) - added_entity_ids.add(entity_info["entityId"]) - else: - _entities_info.append(entity_info) - entities_info = _entities_info + entity_type_low = entity_info["entity_type"].lower() - # Filter entities info with changes - interesting_data, changed_keys_by_object_id = self.filter_changes( - session, event, entities_info, interest_attributes - ) - self.interesting_data_for_added( - session, - added_entities, - interest_attributes, - interesting_data, - changed_keys_by_object_id - ) - if not interesting_data and not task_parent_changes: - return + changes = entity_info["changes"] + # SPECIAL CASE: Capture changes of task created/moved under + # interested entity type + if ( + entity_type_low == "task" + and "parent_id" in changes + ): + # Direct parent is always second item in 'parents' and 'Task' + # must have at least one parent + parent_info = entity_info["parents"][1] + parent_entity_type = ( + parent_info["entity_type"] + .replace(" ", "") + .lower() + ) + if parent_entity_type in interest_entity_types: + new_entities_info.append(entity_info) + continue - # Prepare object types - object_types = session.query("select id, name from ObjectType").all() - object_types_by_name = {} - for object_type in object_types: - name_low = object_type["name"].lower() - object_types_by_name[name_low] = object_type + # Skip if entity type is not enabled for attr value sync + if entity_type_low not in interest_entity_types: + continue - # NOTE it would be nice to check if `interesting_data` do not contain - # value changs of tasks that were created or moved - # - it is a complex way how to find out - if interesting_data: - self.process_attribute_changes( - session, - object_types_by_name, - interesting_data, - changed_keys_by_object_id, - interest_entity_types, - interest_attributes, - added_entity_ids - ) + valid_attr_change = entity_info.get("action") == "add" + for attr_key in interest_attributes: + if valid_attr_change: + break - if task_parent_changes: - self.process_task_parent_change( - session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes - ) + if attr_key not in changes: + continue - def process_task_parent_change( + if changes[attr_key]["new"] is not None: + valid_attr_change = True + + if not valid_attr_change: + continue + + new_entities_info.append(entity_info) + + return new_entities_info + + def propagate_attribute_changes( self, session, - object_types_by_name, - task_parent_changes, - interest_entity_types, - interest_attributes + interest_attributes, + entities_info, + attrs_by_obj_id, + hier_attrs, + real_values_by_entity_id, + hier_values_by_entity_id, ): - """Push custom attribute values if task parent has changed. + hier_attr_ids_by_key = { + attr["key"]: attr["id"] + for attr in hier_attrs + } + filtered_interest_attributes = { + attr_name + for attr_name in interest_attributes + if attr_name in hier_attr_ids_by_key + } + attrs_keys_by_obj_id = {} + for obj_id, attrs in attrs_by_obj_id.items(): + attrs_keys_by_obj_id[obj_id] = { + attr["key"]: attr["id"] + for attr in attrs + } - Parent is changed if task is created or if is moved under different - entity. We don't care about all task changes only about those that - have it's parent in interest types (from settings). + op_changes = [] + for entity_info in entities_info: + entity_id = entity_info["entityId"] + obj_id = entity_info["objectTypeId"] + # Skip attributes sync if does not have object specific custom + # attribute + if obj_id not in attrs_keys_by_obj_id: + continue + attr_keys = attrs_keys_by_obj_id[obj_id] + real_values = real_values_by_entity_id[entity_id] + hier_values = hier_values_by_entity_id[entity_id] - Tasks hierarchical value should be unset or set based on parents - real hierarchical value and non hierarchical custom attribute value - should be set to hierarchical value. - """ - - # Store task ids which were created or moved under parent with entity - # type defined in settings (interest_entity_types). - task_ids = set() - # Store parent ids of matching task ids - matching_parent_ids = set() - # Store all entity ids of all entities to be able query hierarchical - # values. - whole_hierarchy_ids = set() - # Store parent id of each entity id - parent_id_by_entity_id = {} - for entity_info in task_parent_changes: - # Ignore entities with less parents than 2 - # NOTE entity itself is also part of "parents" value - parents = entity_info.get("parents") or [] - if len(parents) < 2: + changes = copy.deepcopy(entity_info["changes"]) + obj_id_attr_keys = { + attr_key + for attr_key in filtered_interest_attributes + if attr_key in attr_keys + } + if not obj_id_attr_keys: continue - parent_info = parents[1] - # Check if parent has entity type we care about. - if parent_info["entity_type"] not in interest_entity_types: - continue + value_by_key = {} + is_new_entity = entity_info.get("action") == "add" + for attr_key in obj_id_attr_keys: + if ( + attr_key in changes + and changes[attr_key]["new"] is not None + ): + value_by_key[attr_key] = changes[attr_key]["new"] - task_ids.add(entity_info["entityId"]) - matching_parent_ids.add(parent_info["entityId"]) - - # Store whole hierarchi of task entity - prev_id = None - for item in parents: - item_id = item["entityId"] - whole_hierarchy_ids.add(item_id) - - if prev_id is None: - prev_id = item_id + if not is_new_entity: continue - parent_id_by_entity_id[prev_id] = item_id - if item["entityType"] == "show": - break - prev_id = item_id + hier_attr_id = hier_attr_ids_by_key[attr_key] + attr_id = attr_keys[attr_key] + if hier_attr_id in real_values or attr_id in real_values: + continue - # Just skip if nothing is interesting for our settings - if not matching_parent_ids: - return + value_by_key[attr_key] = hier_values[hier_attr_id] - # Query object type ids of parent ids for custom attribute - # definitions query - entities = session.query( - "select object_type_id from TypedContext where id in ({})".format( - self.join_query_keys(matching_parent_ids) - ) - ) + for key, new_value in value_by_key.items(): + if new_value is None: + continue - # Prepare task object id - task_object_id = object_types_by_name["task"]["id"] + hier_id = hier_attr_ids_by_key[key] + std_id = attr_keys[key] + real_hier_value = real_values.get(hier_id) + real_std_value = real_values.get(std_id) + hier_value = hier_values[hier_id] + # Get right type of value for conversion + # - values in event are strings + type_value = real_hier_value + if type_value is None: + type_value = real_std_value + if type_value is None: + type_value = hier_value + # Skip if current values are not set + if type_value is None: + continue - # All object ids for which we're querying custom attribute definitions - object_type_ids = set() - object_type_ids.add(task_object_id) - for entity in entities: - object_type_ids.add(entity["object_type_id"]) + try: + new_value = type(type_value)(new_value) + except Exception: + self.log.warning(( + "Couldn't convert from {} to {}." + " Skipping update values." + ).format(type(new_value), type(type_value))) + continue - attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, object_type_ids, interest_attributes - ) + real_std_value_is_same = new_value == real_std_value + real_hier_value_is_same = new_value == real_hier_value + # New value does not match anything in current entity values + if ( + not is_new_entity + and not real_std_value_is_same + and not real_hier_value_is_same + ): + continue - # Skip if all task attributes are not available - task_attrs = attrs_by_obj_id.get(task_object_id) - if not task_attrs: - return + if not real_std_value_is_same: + op_changes.append(( + std_id, + entity_id, + new_value, + real_values.get(std_id), + std_id in real_values + )) - # Skip attributes that is not in both hierarchical and nonhierarchical - # TODO be able to push values if hierarchical is available - for key in interest_attributes: - if key not in hier_attrs: - task_attrs.pop(key, None) + if not real_hier_value_is_same: + op_changes.append(( + hier_id, + entity_id, + new_value, + real_values.get(hier_id), + hier_id in real_values + )) - elif key not in task_attrs: - hier_attrs.pop(key) + for change in op_changes: + ( + attr_id, + entity_id, + new_value, + old_value, + do_update + ) = change - # Skip if nothing remained - if not task_attrs: - return - - # Do some preparations for custom attribute values query - attr_key_by_id = {} - nonhier_id_by_key = {} - hier_attr_ids = [] - for key, attr_id in hier_attrs.items(): - attr_key_by_id[attr_id] = key - hier_attr_ids.append(attr_id) - - conf_ids = list(hier_attr_ids) - task_conf_ids = [] - for key, attr_id in task_attrs.items(): - attr_key_by_id[attr_id] = key - nonhier_id_by_key[key] = attr_id - conf_ids.append(attr_id) - task_conf_ids.append(attr_id) - - # Query custom attribute values - # - result does not contain values for all entities only result of - # query callback to ftrack server - result = query_custom_attributes( - session, list(hier_attr_ids), whole_hierarchy_ids, True - ) - result.extend( - query_custom_attributes( - session, task_conf_ids, whole_hierarchy_ids, False - ) - ) - - # Prepare variables where result will be stored - # - hierachical values should not contain attribute with value by - # default - hier_values_by_entity_id = { - entity_id: {} - for entity_id in whole_hierarchy_ids - } - # - real values of custom attributes - values_by_entity_id = { - entity_id: { - attr_id: None - for attr_id in conf_ids - } - for entity_id in whole_hierarchy_ids - } - for item in result: - attr_id = item["configuration_id"] - entity_id = item["entity_id"] - value = item["value"] - - values_by_entity_id[entity_id][attr_id] = value - - if attr_id in hier_attr_ids and value is not None: - hier_values_by_entity_id[entity_id][attr_id] = value - - # Prepare values for all task entities - # - going through all parents and storing first value value - # - store None to those that are already known that do not have set - # value at all - for task_id in tuple(task_ids): - for attr_id in hier_attr_ids: - entity_ids = [] - value = None - entity_id = task_id - while value is None: - entity_value = hier_values_by_entity_id[entity_id] - if attr_id in entity_value: - value = entity_value[attr_id] - if value is None: - break - - if value is None: - entity_ids.append(entity_id) - - entity_id = parent_id_by_entity_id.get(entity_id) - if entity_id is None: - break - - for entity_id in entity_ids: - hier_values_by_entity_id[entity_id][attr_id] = value - - # Prepare changes to commit - changes = [] - for task_id in tuple(task_ids): - parent_id = parent_id_by_entity_id[task_id] - for attr_id in hier_attr_ids: - attr_key = attr_key_by_id[attr_id] - nonhier_id = nonhier_id_by_key[attr_key] - - # Real value of hierarchical attribute on parent - # - If is none then should be unset - real_parent_value = values_by_entity_id[parent_id][attr_id] - # Current hierarchical value of a task - # - Will be compared to real parent value - hier_value = hier_values_by_entity_id[task_id][attr_id] - - # Parent value that can be inherited from it's parent entity - parent_value = hier_values_by_entity_id[parent_id][attr_id] - # Task value of nonhierarchical custom attribute - nonhier_value = values_by_entity_id[task_id][nonhier_id] - - if real_parent_value != hier_value: - changes.append({ - "new_value": real_parent_value, - "attr_id": attr_id, - "entity_id": task_id, - "attr_key": attr_key - }) - - if parent_value != nonhier_value: - changes.append({ - "new_value": parent_value, - "attr_id": nonhier_id, - "entity_id": task_id, - "attr_key": attr_key - }) - - self._commit_changes(session, changes) - - def _commit_changes(self, session, changes): - uncommited_changes = False - for idx, item in enumerate(changes): - new_value = item["new_value"] - old_value = item["old_value"] - attr_id = item["attr_id"] - entity_id = item["entity_id"] - attr_key = item["attr_key"] - - entity_key = collections.OrderedDict(( + entity_key = collections.OrderedDict([ ("configuration_id", attr_id), ("entity_id", entity_id) - )) - self._cached_changes.append({ - "attr_key": attr_key, - "entity_id": entity_id, - "value": new_value, - "time": datetime.datetime.now() - }) - old_value_is_set = ( - old_value is not ftrack_api.symbol.NOT_SET - and old_value is not None - ) - if new_value is None: - if not old_value_is_set: - continue - op = ftrack_api.operation.DeleteEntityOperation( - "CustomAttributeValue", - entity_key - ) - - elif old_value_is_set: + ]) + if do_update: op = ftrack_api.operation.UpdateEntityOperation( "CustomAttributeValue", entity_key, @@ -446,449 +383,116 @@ class PushFrameValuesToTaskEvent(BaseEvent): ) session.recorded_operations.push(op) - self.log.info(( - "Changing Custom Attribute \"{}\" to value" - " \"{}\" on entity: {}" - ).format(attr_key, new_value, entity_id)) - - if (idx + 1) % 20 == 0: - uncommited_changes = False - try: - session.commit() - except Exception: - session.rollback() - self.log.warning( - "Changing of values failed.", exc_info=True - ) - else: - uncommited_changes = True - if uncommited_changes: - try: + if len(session.recorded_operations) > 100: session.commit() - except Exception: - session.rollback() - self.log.warning("Changing of values failed.", exc_info=True) + session.commit() - def process_attribute_changes( + def process_by_project( self, - session, - object_types_by_name, - interesting_data, - changed_keys_by_object_id, - interest_entity_types, - interest_attributes, - added_entity_ids + session: ftrack_api.Session, + event: ftrack_api.event.base.Event, + project_id: str, + entities_info: list[dict[str, Any]] ): - # Prepare task object id - task_object_id = object_types_by_name["task"]["id"] + """Process changes in single project. - # Collect object type ids based on settings - interest_object_ids = [] - for entity_type in interest_entity_types: - _entity_type = entity_type.lower() - object_type = object_types_by_name.get(_entity_type) - if not object_type: - self.log.warning("Couldn't find object type \"{}\"".format( - entity_type - )) + Args: + session (ftrack_api.Session): Ftrack session. + event (ftrack_api.event.base.Event): Event which has all changes + information. + project_id (str): Project id related to changes. + entities_info (list[dict[str, Any]]): Changes of entities. + """ - interest_object_ids.append(object_type["id"]) - - # Query entities by filtered data and object ids - entities = self.get_entities( - session, interesting_data, interest_object_ids - ) - if not entities: + ( + interest_attributes, + interest_entity_types + ) = self._get_handler_project_settings(session, event, project_id) + if not interest_attributes or not interest_entity_types: return - # Pop not found entities from interesting data - entity_ids = set( - entity["id"] - for entity in entities + entities_info: list[dict[str, Any]] = ( + self._entities_filter_by_settings( + entities_info, + interest_attributes, + interest_entity_types + ) ) - for entity_id in tuple(interesting_data.keys()): - if entity_id not in entity_ids: - interesting_data.pop(entity_id) - - # Add task object type to list - attr_obj_ids = list(interest_object_ids) - attr_obj_ids.append(task_object_id) - - attrs_by_obj_id, hier_attrs = self.attrs_configurations( - session, attr_obj_ids, interest_attributes - ) - - task_attrs = attrs_by_obj_id.get(task_object_id) - - changed_keys = set() - # Skip keys that are not both in hierachical and type specific - for object_id, keys in changed_keys_by_object_id.items(): - changed_keys |= set(keys) - object_id_attrs = attrs_by_obj_id.get(object_id) - for key in keys: - if key not in hier_attrs: - attrs_by_obj_id[object_id].pop(key) - continue - - if ( - (not object_id_attrs or key not in object_id_attrs) - and (not task_attrs or key not in task_attrs) - ): - hier_attrs.pop(key) - - # Clean up empty values - for key, value in tuple(attrs_by_obj_id.items()): - if not value: - attrs_by_obj_id.pop(key) - - if not attrs_by_obj_id: - self.log.warning(( - "There is not created Custom Attributes {} " - " for entity types: {}" - ).format( - self.join_query_keys(interest_attributes), - self.join_query_keys(interest_entity_types) - )) + if not entities_info: return - # Prepare task entities - task_entities = [] - # If task entity does not contain changed attribute then skip - if task_attrs: - task_entities = self.get_task_entities(session, interesting_data) - - task_entity_ids = set() - parent_id_by_task_id = {} - for task_entity in task_entities: - task_id = task_entity["id"] - task_entity_ids.add(task_id) - parent_id_by_task_id[task_id] = task_entity["parent_id"] - - self.finalize_attribute_changes( - session, - interesting_data, - changed_keys, - attrs_by_obj_id, - hier_attrs, - task_entity_ids, - parent_id_by_task_id, - added_entity_ids - ) - - def finalize_attribute_changes( - self, - session, - interesting_data, - changed_keys, - attrs_by_obj_id, - hier_attrs, - task_entity_ids, - parent_id_by_task_id, - added_entity_ids - ): - attr_id_to_key = {} - for attr_confs in attrs_by_obj_id.values(): - for key in changed_keys: - custom_attr_id = attr_confs.get(key) - if custom_attr_id: - attr_id_to_key[custom_attr_id] = key - - for key in changed_keys: - custom_attr_id = hier_attrs.get(key) - if custom_attr_id: - attr_id_to_key[custom_attr_id] = key - - entity_ids = ( - set(interesting_data.keys()) | task_entity_ids - ) - attr_ids = set(attr_id_to_key.keys()) - - current_values_by_id = self.get_current_values( - session, - attr_ids, - entity_ids, - task_entity_ids, - hier_attrs - ) - - changes = [] - for entity_id, current_values in current_values_by_id.items(): - parent_id = parent_id_by_task_id.get(entity_id) - if not parent_id: - parent_id = entity_id - values = interesting_data[parent_id] - - added_entity = entity_id in added_entity_ids - for attr_id, old_value in current_values.items(): - if added_entity and attr_id in hier_attrs: - continue - - attr_key = attr_id_to_key.get(attr_id) - if not attr_key: - continue - - # Convert new value from string - new_value = values.get(attr_key) - new_value_is_valid = ( - old_value is not ftrack_api.symbol.NOT_SET - and new_value is not None - ) - if added_entity and not new_value_is_valid: - continue - - if new_value is not None and new_value_is_valid: - try: - new_value = type(old_value)(new_value) - except Exception: - self.log.warning(( - "Couldn't convert from {} to {}." - " Skipping update values." - ).format(type(new_value), type(old_value))) - if new_value == old_value: - continue - - changes.append({ - "new_value": new_value, - "attr_id": attr_id, - "old_value": old_value, - "entity_id": entity_id, - "attr_key": attr_key - }) - self._commit_changes(session, changes) - - def filter_changes( - self, session, event, entities_info, interest_attributes - ): - session_user_id = self.session_user_id(session) - user_data = event["data"].get("user") - changed_by_session = False - if user_data and user_data.get("userid") == session_user_id: - changed_by_session = True - - current_time = datetime.datetime.now() - - interesting_data = {} - changed_keys_by_object_id = {} - - for entity_info in entities_info: - # Care only about changes if specific keys - entity_changes = {} - changes = entity_info["changes"] - for key in interest_attributes: - if key in changes: - entity_changes[key] = changes[key]["new"] - - entity_id = entity_info["entityId"] - if changed_by_session: - for key, new_value in tuple(entity_changes.items()): - for cached in tuple(self._cached_changes): - if ( - cached["entity_id"] != entity_id - or cached["attr_key"] != key - ): - continue - - cached_value = cached["value"] - try: - new_value = type(cached_value)(new_value) - except Exception: - pass - - if cached_value == new_value: - self._cached_changes.remove(cached) - entity_changes.pop(key) - break - - delta = (current_time - cached["time"]).seconds - if delta > self._max_delta: - self._cached_changes.remove(cached) - - if not entity_changes: - continue - - entity_id = entity_info["entityId"] - object_id = entity_info["objectTypeId"] - interesting_data[entity_id] = entity_changes - if object_id not in changed_keys_by_object_id: - changed_keys_by_object_id[object_id] = set() - changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) - - return interesting_data, changed_keys_by_object_id - - def interesting_data_for_added( - self, - session, - added_entities, - interest_attributes, - interesting_data, - changed_keys_by_object_id - ): - if not added_entities or not interest_attributes: - return - - object_type_ids = set() - entity_ids = set() - all_entity_ids = set() - object_id_by_entity_id = {} - project_id = None - entity_ids_by_parent_id = collections.defaultdict(set) - for entity_info in added_entities: - object_id = entity_info["objectTypeId"] - entity_id = entity_info["entityId"] - object_type_ids.add(object_id) - entity_ids.add(entity_id) - object_id_by_entity_id[entity_id] = object_id - - for item in entity_info["parents"]: - entity_id = item["entityId"] - all_entity_ids.add(entity_id) - parent_id = item["parentId"] - if not parent_id: - project_id = entity_id - else: - entity_ids_by_parent_id[parent_id].add(entity_id) - - hier_attrs = self.get_hierarchical_configurations( + attrs_by_obj_id, hier_attrs = self._get_attrs_configurations( session, interest_attributes ) - if not hier_attrs: + # Skip if attributes are not available + # - there is nothing to sync + if not attrs_by_obj_id or not hier_attrs: return - hier_attrs_key_by_id = { - attr_conf["id"]: attr_conf["key"] - for attr_conf in hier_attrs - } - default_values_by_key = { - attr_conf["key"]: attr_conf["default"] - for attr_conf in hier_attrs - } + entity_ids_by_parent_id = collections.defaultdict(set) + all_entity_ids = set() + for entity_info in entities_info: + entity_id = None + for item in entity_info["parents"]: + item_id = item["entityId"] + all_entity_ids.add(item_id) + if entity_id is not None: + entity_ids_by_parent_id[item_id].add(entity_id) + entity_id = item_id - values = query_custom_attributes( - session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + attr_ids = {attr["id"] for attr in hier_attrs} + for attrs in attrs_by_obj_id.values(): + attr_ids |= {attr["id"] for attr in attrs} + + # Query real custom attribute values + # - we have to know what are the real values, if are set and to what + # value + value_items = query_custom_attributes( + session, attr_ids, all_entity_ids, True ) - values_per_entity_id = {} - for entity_id in all_entity_ids: - values_per_entity_id[entity_id] = {} - for attr_name in interest_attributes: - values_per_entity_id[entity_id][attr_name] = None - - for item in values: - entity_id = item["entity_id"] - key = hier_attrs_key_by_id[item["configuration_id"]] - values_per_entity_id[entity_id][key] = item["value"] - - fill_queue = collections.deque() - fill_queue.append((project_id, default_values_by_key)) - while fill_queue: - item = fill_queue.popleft() - entity_id, values_by_key = item - entity_values = values_per_entity_id[entity_id] - new_values_by_key = copy.deepcopy(values_by_key) - for key, value in values_by_key.items(): - current_value = entity_values[key] - if current_value is None: - entity_values[key] = value - else: - new_values_by_key[key] = current_value - - for child_id in entity_ids_by_parent_id[entity_id]: - fill_queue.append((child_id, new_values_by_key)) - - for entity_id in entity_ids: - entity_changes = {} - for key, value in values_per_entity_id[entity_id].items(): - if value is not None: - entity_changes[key] = value - - if not entity_changes: - continue - - interesting_data[entity_id] = entity_changes - object_id = object_id_by_entity_id[entity_id] - if object_id not in changed_keys_by_object_id: - changed_keys_by_object_id[object_id] = set() - changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) - - def get_current_values( - self, - session, - attr_ids, - entity_ids, - task_entity_ids, - hier_attrs - ): - current_values_by_id = {} - if not attr_ids or not entity_ids: - return current_values_by_id - - for entity_id in entity_ids: - current_values_by_id[entity_id] = {} - for attr_id in attr_ids: - current_values_by_id[entity_id][attr_id] = ( - ftrack_api.symbol.NOT_SET - ) - - values = query_custom_attributes( - session, attr_ids, entity_ids, True - ) - - for item in values: + real_values_by_entity_id = collections.defaultdict(dict) + for item in value_items: entity_id = item["entity_id"] attr_id = item["configuration_id"] - if entity_id in task_entity_ids and attr_id in hier_attrs: - continue + real_values_by_entity_id[entity_id][attr_id] = item["value"] - if entity_id not in current_values_by_id: - current_values_by_id[entity_id] = {} - current_values_by_id[entity_id][attr_id] = item["value"] - return current_values_by_id + hier_values_by_entity_id = {} + default_values = { + attr["id"]: attr["default"] + for attr in hier_attrs + } + hier_queue = collections.deque() + hier_queue.append((default_values, [project_id])) + while hier_queue: + parent_values, entity_ids = hier_queue.popleft() + for entity_id in entity_ids: + entity_values = copy.deepcopy(parent_values) + real_values = real_values_by_entity_id[entity_id] + for attr_id, value in real_values.items(): + entity_values[attr_id] = value + hier_values_by_entity_id[entity_id] = entity_values + hier_queue.append( + (entity_values, entity_ids_by_parent_id[entity_id]) + ) - def get_entities(self, session, interesting_data, interest_object_ids): - return session.query(( - "select id from TypedContext" - " where id in ({}) and object_type_id in ({})" - ).format( - self.join_query_keys(interesting_data.keys()), - self.join_query_keys(interest_object_ids) - )).all() - - def get_task_entities(self, session, interesting_data): - return session.query( - "select id, parent_id from Task where parent_id in ({})".format( - self.join_query_keys(interesting_data.keys()) - ) - ).all() - - def attrs_configurations(self, session, object_ids, interest_attributes): - attrs = session.query(self.cust_attrs_query.format( - self.join_query_keys(interest_attributes), - self.join_query_keys(object_ids) - )).all() - - output = {} - hiearchical = {} - for attr in attrs: - if attr["is_hierarchical"]: - hiearchical[attr["key"]] = attr["id"] - continue - obj_id = attr["object_type_id"] - if obj_id not in output: - output[obj_id] = {} - output[obj_id][attr["key"]] = attr["id"] - return output, hiearchical - - def get_hierarchical_configurations(self, session, interest_attributes): - hier_attr_query = ( - "select id, key, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where key in ({}) and is_hierarchical is true" + self.propagate_attribute_changes( + session, + interest_attributes, + entities_info, + attrs_by_obj_id, + hier_attrs, + real_values_by_entity_id, + hier_values_by_entity_id, ) - if not interest_attributes: - return [] - return list(session.query(hier_attr_query.format( - self.join_query_keys(interest_attributes), - )).all()) + + def launch(self, session, event): + filtered_entities_info = self.filter_entities_info(event) + if not filtered_entities_info: + return + + for project_id, entities_info in filtered_entities_info.items(): + self.process_by_project(session, event, project_id, entities_info) def register(session): - PushFrameValuesToTaskEvent(session).register() + PushHierValuesToNonHierEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py b/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py index 99ad3aec37..358a8d2310 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py +++ b/openpype/modules/ftrack/event_handlers_server/event_radio_buttons.py @@ -7,7 +7,7 @@ class RadioButtons(BaseEvent): ignore_me = True def launch(self, session, event): - '''Provides a readio button behaviour to any bolean attribute in + '''Provides a radio button behaviour to any boolean attribute in radio_button group.''' # start of event procedure ---------------------------------- diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index e549de7ed0..0aa0b9f9f5 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -787,7 +787,7 @@ class SyncToAvalonEvent(BaseEvent): # Filter updates where name is changing for ftrack_id, ent_info in updated.items(): ent_keys = ent_info["keys"] - # Seprate update info from rename + # Separate update info from rename if "name" not in ent_keys: continue @@ -827,7 +827,7 @@ class SyncToAvalonEvent(BaseEvent): # 5.) Process updated self.process_updated() time_6 = time.time() - # 6.) Process changes in hierarchy or hier custom attribues + # 6.) Process changes in hierarchy or hier custom attributes self.process_hier_cleanup() time_7 = time.time() self.process_task_updates() @@ -973,7 +973,7 @@ class SyncToAvalonEvent(BaseEvent): except Exception: # TODO logging # TODO report - self.process_session.rolback() + self.process_session.rollback() ent_path_items = [self.cur_project["full_name"]] ent_path_items.extend([ par for par in avalon_entity["data"]["parents"] @@ -1016,7 +1016,7 @@ class SyncToAvalonEvent(BaseEvent): except Exception: # TODO logging # TODO report - self.process_session.rolback() + self.process_session.rollback() error_msg = ( "Couldn't update custom attributes after recreation" " of entity in Ftrack" @@ -1094,7 +1094,7 @@ class SyncToAvalonEvent(BaseEvent): def check_names_synchronizable(self, names): """Check if entities with specific names are importable. - This check should happend after removing entity or renaming entity. + This check should happen after removing entity or renaming entity. When entity was removed or renamed then it's name is possible to sync. """ joined_passed_names = ", ".join( @@ -1338,7 +1338,7 @@ class SyncToAvalonEvent(BaseEvent): try: self.process_session.commit() except Exception: - self.process_session.rolback() + self.process_session.rollback() # TODO logging # TODO report error_msg = ( @@ -1743,7 +1743,7 @@ class SyncToAvalonEvent(BaseEvent): def process_moved(self): """ - Handles moved entities to different place in hiearchy. + Handles moved entities to different place in hierarchy. (Not tasks - handled separately.) """ if not self.ftrack_moved: @@ -1792,7 +1792,7 @@ class SyncToAvalonEvent(BaseEvent): self.log.warning("{} <{}>".format(error_msg, ent_path)) continue - # THIS MUST HAPPEND AFTER CREATING NEW ENTITIES !!!! + # THIS MUST HAPPEN AFTER CREATING NEW ENTITIES !!!! # - because may be moved to new created entity if "data" not in self.updates[mongo_id]: self.updates[mongo_id]["data"] = {} @@ -2323,7 +2323,7 @@ class SyncToAvalonEvent(BaseEvent): items.append("{} - \"{}\"".format(ent_path, value)) self.report_items["error"][fps_msg] = items - # Get dictionary with not None hierarchical values to pull to childs + # Get dictionary with not None hierarchical values to pull to children project_values = {} for key, value in ( entities_dict[ftrack_project_id]["hier_attrs"].items() @@ -2460,7 +2460,7 @@ class SyncToAvalonEvent(BaseEvent): def update_entities(self): """ Update Avalon entities by mongo bulk changes. - Expects self.updates which are transfered to $set part of update + Expects self.updates which are transferred to $set part of update command. Resets self.updates afterwards. """ diff --git a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py b/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py index a0e039926e..25fa3b0535 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py +++ b/openpype/modules/ftrack/event_handlers_server/event_task_to_parent_status.py @@ -291,7 +291,7 @@ class TaskStatusToParent(BaseEvent): except Exception: session.rollback() self.log.warning( - "\"{}\" status couldnt be set to \"{}\"".format( + "\"{}\" status couldn't be set to \"{}\"".format( ent_path, new_status["name"] ), exc_info=True @@ -399,7 +399,7 @@ class TaskStatusToParent(BaseEvent): # For cases there are multiple tasks in changes # - task status which match any new status item by order in the - # list `single_match` is preffered + # list `single_match` is preferred best_order = len(single_match) best_order_status = None for task_entity in task_entities: diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index c4e48b92f0..9539a34f5e 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -10,11 +10,11 @@ from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY class UserAssigmentEvent(BaseEvent): """ - This script will intercept user assigment / de-assigment event and + This script will intercept user assignment / de-assignment event and run shell script, providing as much context as possible. It expects configuration file ``presets/ftrack/user_assigment_event.json``. - In it, you define paths to scripts to be run for user assigment event and + In it, you define paths to scripts to be run for user assignment event and for user-deassigment:: { "add": [ diff --git a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py b/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py index e36c3eecd9..fb40fd6417 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py +++ b/openpype/modules/ftrack/event_handlers_server/event_version_to_task_statuses.py @@ -102,7 +102,7 @@ class VersionToTaskStatus(BaseEvent): asset_version_entities.append(asset_version) task_ids.add(asset_version["task_id"]) - # Skipt if `task_ids` are empty + # Skip if `task_ids` are empty if not task_ids: return diff --git a/openpype/modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/ftrack/event_handlers_user/action_applications.py index 102f04c956..30399b463d 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_applications.py +++ b/openpype/modules/ftrack/event_handlers_user/action_applications.py @@ -124,6 +124,11 @@ class AppplicationsAction(BaseAction): if not avalon_project_apps: return False + settings = self.get_project_settings_from_event( + event, avalon_project_doc["name"]) + + only_available = settings["applications"]["only_available"] + items = [] for app_name in avalon_project_apps: app = self.application_manager.applications.get(app_name) @@ -133,6 +138,10 @@ class AppplicationsAction(BaseAction): if app.group.name in CUSTOM_LAUNCH_APP_GROUPS: continue + # Skip applications without valid executables + if only_available and not app.find_executable(): + continue + app_icon = app.icon if app_icon and self.icon_url: try: diff --git a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py b/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py index c7fb1af98b..06d572601d 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py +++ b/openpype/modules/ftrack/event_handlers_user/action_batch_task_creation.py @@ -7,7 +7,7 @@ from openpype_modules.ftrack.lib import BaseAction, statics_icon class BatchTasksAction(BaseAction): '''Batch Tasks action - `label` a descriptive string identifing your action. + `label` a descriptive string identifying your action. `varaint` To group actions together, give them the same label and specify a unique variant per action. `identifier` a unique identifier for your action. diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py index c19cfd1502..471a8c4182 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py @@ -82,9 +82,9 @@ config (dictionary) write_security_roles/read_security_roles (array of strings) - default: ["ALL"] - strings should be role names (e.g.: ["API", "Administrator"]) - - if set to ["ALL"] - all roles will be availabled + - if set to ["ALL"] - all roles will be available - if first is 'except' - roles will be set to all except roles in array - - Warning: Be carefull with except - roles can be different by company + - Warning: Be careful with except - roles can be different by company - example: write_security_roles = ["except", "User"] read_security_roles = ["ALL"] # (User is can only read) @@ -500,7 +500,7 @@ class CustomAttributes(BaseAction): data = {} # Get key, label, type data.update(self.get_required(cust_attr_data)) - # Get hierachical/ entity_type/ object_id + # Get hierarchical/ entity_type/ object_id data.update(self.get_entity_type(cust_attr_data)) # Get group, default, security roles data.update(self.get_optional(cust_attr_data)) diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index 9806f83773..cbeff5343f 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -51,7 +51,7 @@ class CreateFolders(BaseAction): }, { "type": "label", - "value": "With all chilren entities" + "value": "With all children entities" }, { "name": "children_included", diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py index 03d029b0c1..72a5efbcfe 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py @@ -18,7 +18,7 @@ class DeleteAssetSubset(BaseAction): # Action label. label = "Delete Asset/Subsets" # Action description. - description = "Removes from Avalon with all childs and asset from Ftrack" + description = "Removes from Avalon with all children and asset from Ftrack" icon = statics_icon("ftrack", "action_icons", "DeleteAsset.svg") settings_key = "delete_asset_subset" diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index c543dc8834..ec14c6918b 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -27,7 +27,7 @@ class DeleteOldVersions(BaseAction): variant = "- Delete old versions" description = ( "Delete files from older publishes so project can be" - " archived with only lates versions." + " archived with only latest versions." ) icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") @@ -307,7 +307,7 @@ class DeleteOldVersions(BaseAction): file_path, seq_path = self.path_from_represenation(repre, anatomy) if file_path is None: self.log.warning(( - "Could not format path for represenation \"{}\"" + "Could not format path for representation \"{}\"" ).format(str(repre))) continue diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index a400c8f5f0..559de3a24d 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -601,7 +601,7 @@ class Delivery(BaseAction): return self.report(report_items) def report(self, report_items): - """Returns dict with final status of delivery (succes, fail etc.).""" + """Returns dict with final status of delivery (success, fail etc.).""" items = [] for msg, _items in report_items.items(): diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index fb1cdf340e..36d29db96b 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -246,7 +246,7 @@ class FillWorkfileAttributeAction(BaseAction): project_name = project_entity["full_name"] - # Find matchin asset documents and map them by ftrack task entities + # Find matching asset documents and map them by ftrack task entities # - result stored to 'asset_docs_with_task_entities' is list with # tuple `(asset document, [task entitis, ...])` # Quety all asset documents diff --git a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py b/openpype/modules/ftrack/event_handlers_user/action_job_killer.py index f489c0c54c..dd68c75f84 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_job_killer.py +++ b/openpype/modules/ftrack/event_handlers_user/action_job_killer.py @@ -54,14 +54,14 @@ class JobKiller(BaseAction): for job in jobs: try: data = json.loads(job["data"]) - desctiption = data["description"] + description = data["description"] except Exception: - desctiption = "*No description*" + description = "*No description*" user_id = job["user_id"] username = usernames_by_id.get(user_id) or "Unknown user" created = job["created_at"].strftime('%d.%m.%Y %H:%M:%S') label = "{} - {} - {}".format( - username, desctiption, created + username, description, created ) item_label = { "type": "label", diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index e825198180..19d5701e08 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -24,7 +24,7 @@ class PrepareProjectLocal(BaseAction): settings_key = "prepare_project" - # Key to store info about trigerring create folder structure + # Key to store info about triggering create folder structure create_project_structure_key = "create_folder_structure" create_project_structure_identifier = "create.project.structure" item_splitter = {"type": "label", "value": "---"} @@ -146,7 +146,7 @@ class PrepareProjectLocal(BaseAction): root_items.append({ "type": "label", "value": ( - "

NOTE: Roots are crutial for path filling" + "

NOTE: Roots are crucial for path filling" " (and creating folder structure).

" ) }) diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index d05f0c47f6..39cf33d605 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -66,7 +66,7 @@ class RVAction(BaseAction): def get_components_from_entity(self, session, entity, components): """Get components from various entity types. - The components dictionary is modifid in place, so nothing is returned. + The components dictionary is modified in place, so nothing is returned. Args: entity (Ftrack entity) diff --git a/openpype/modules/ftrack/event_handlers_user/action_seed.py b/openpype/modules/ftrack/event_handlers_user/action_seed.py index 4021d70c0a..657cd07a9f 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_seed.py +++ b/openpype/modules/ftrack/event_handlers_user/action_seed.py @@ -325,8 +325,8 @@ class SeedDebugProject(BaseAction): ): index = 0 - self.log.debug("*** Commiting Assets") - self.log.debug("Commiting entities. {}/{}".format( + self.log.debug("*** Committing Assets") + self.log.debug("Committing entities. {}/{}".format( created_entities, to_create_length )) self.session.commit() @@ -414,8 +414,8 @@ class SeedDebugProject(BaseAction): ): index = 0 - self.log.debug("*** Commiting Shots") - self.log.debug("Commiting entities. {}/{}".format( + self.log.debug("*** Committing Shots") + self.log.debug("Committing entities. {}/{}".format( created_entities, to_create_length )) self.session.commit() @@ -423,7 +423,7 @@ class SeedDebugProject(BaseAction): def temp_commit(self, index, created_entities, to_create_length): if index < self.max_entities_created_at_one_commit: return False - self.log.debug("Commiting {} entities. {}/{}".format( + self.log.debug("Committing {} entities. {}/{}".format( index, created_entities, to_create_length )) self.session.commit() diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py index 8748f426bd..c9e0901623 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py @@ -184,7 +184,7 @@ class StoreThumbnailsToAvalon(BaseAction): self.db_con.install() for entity in entities: - # Skip if entity is not AssetVersion (never should happend, but..) + # Skip if entity is not AssetVersion (should never happen, but..) if entity.entity_type.lower() != "assetversion": continue diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py index 6f14f8428d..d61b5f0b26 100644 --- a/openpype/modules/ftrack/ftrack_module.py +++ b/openpype/modules/ftrack/ftrack_module.py @@ -64,6 +64,16 @@ class FtrackModule( self._timers_manager_module = None def get_ftrack_url(self): + """Resolved ftrack url. + + Resolving is trying to fill missing information in url and tried to + connect to the server. + + Returns: + Union[str, None]: Final variant of url or None if url could not be + reached. + """ + if self._ftrack_url is _URL_NOT_SET: self._ftrack_url = resolve_ftrack_url( self._settings_ftrack_url, @@ -73,8 +83,19 @@ class FtrackModule( ftrack_url = property(get_ftrack_url) + @property + def settings_ftrack_url(self): + """Ftrack url from settings in a format as it is. + + Returns: + str: Ftrack url from settings. + """ + + return self._settings_ftrack_url + def get_global_environments(self): """Ftrack's global environments.""" + return { "FTRACK_SERVER": self.ftrack_url } @@ -510,7 +531,10 @@ def resolve_ftrack_url(url, logger=None): url = "https://" + url ftrack_url = None - if not url.endswith("ftrackapp.com"): + if url and _check_ftrack_url(url): + ftrack_url = url + + if not ftrack_url and not url.endswith("ftrackapp.com"): ftrackapp_url = url + ".ftrackapp.com" if _check_ftrack_url(ftrackapp_url): ftrack_url = ftrackapp_url diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py index 25ebad6658..77f479ee20 100644 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/openpype/modules/ftrack/ftrack_server/event_server_cli.py @@ -33,7 +33,7 @@ class MongoPermissionsError(Exception): """Is used when is created multiple objects of same RestApi class.""" def __init__(self, message=None): if not message: - message = "Exiting because have issue with acces to MongoDB" + message = "Exiting because have issue with access to MongoDB" super().__init__(message) @@ -316,7 +316,7 @@ def main_loop(ftrack_url): statuser_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not statuser_thread.isAlive(): + elif not statuser_thread.is_alive(): statuser_thread.join() statuser_thread = None ftrack_accessible = False @@ -340,7 +340,7 @@ def main_loop(ftrack_url): return 1 # ====== STORER ======= - # Run backup thread which does not requeire mongo to work + # Run backup thread which does not require mongo to work if storer_thread is None: if storer_failed_count < max_fail_count: storer_thread = socket_thread.SocketThread( @@ -359,7 +359,7 @@ def main_loop(ftrack_url): storer_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not storer_thread.isAlive(): + elif not storer_thread.is_alive(): if storer_thread.mongo_error: raise MongoPermissionsError() storer_thread.join() @@ -396,10 +396,10 @@ def main_loop(ftrack_url): processor_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not processor_thread.isAlive(): + elif not processor_thread.is_alive(): if processor_thread.mongo_error: raise Exception( - "Exiting because have issue with acces to MongoDB" + "Exiting because have issue with access to MongoDB" ) processor_thread.join() processor_thread = None diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index eb64063fab..2226c85ef9 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -196,7 +196,7 @@ class ProcessEventHub(SocketBaseEventHub): {"pype_data.is_processed": False} ).sort( [("pype_data.stored", pymongo.ASCENDING)] - ) + ).limit(100) found = False for event_data in not_processed_events: diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 0341c25717..8b4c4619a1 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -891,7 +891,7 @@ class SyncEntitiesFactory: parent_dict = self.entities_dict.get(parent_id, {}) for child_id in parent_dict.get("children", []): - # keep original `remove` value for all childs + # keep original `remove` value for all children _remove = (remove is True) if not _remove: if self.entities_dict[child_id]["avalon_attrs"].get( @@ -1191,8 +1191,8 @@ class SyncEntitiesFactory: avalon_hier = [] for item in items: value = item["value"] - # WARNING It is not possible to propage enumerate hierachical - # attributes with multiselection 100% right. Unseting all values + # WARNING It is not possible to propagate enumerate hierarchical + # attributes with multiselection 100% right. Unsetting all values # will cause inheritance from parent. if ( value is None @@ -1231,7 +1231,7 @@ class SyncEntitiesFactory: items.append("{} - \"{}\"".format(ent_path, value)) self.report_items["error"][fps_msg] = items - # Get dictionary with not None hierarchical values to pull to childs + # Get dictionary with not None hierarchical values to pull to children top_id = self.ft_project_id project_values = {} for key, value in self.entities_dict[top_id]["hier_attrs"].items(): @@ -1749,7 +1749,7 @@ class SyncEntitiesFactory: # TODO logging ent_path = self.get_ent_path(ftrack_id) msg = ( - " It is not possible" + " It is not possible" " to change the hierarchy of an entity or it's parents," " if it already contained published data." ) @@ -2584,8 +2584,8 @@ class SyncEntitiesFactory: # # ent_dict = self.entities_dict[found_by_name_id] - # TODO report - CRITICAL entity with same name alread exists in - # different hierarchy - can't recreate entity + # TODO report - CRITICAL entity with same name already exists + # in different hierarchy - can't recreate entity continue _vis_parent = deleted_entity["data"]["visualParent"] diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py index 2f53815368..3e40bb02f2 100644 --- a/openpype/modules/ftrack/lib/custom_attributes.py +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -65,7 +65,7 @@ def get_openpype_attr(session, split_hierarchical=True, query_keys=None): cust_attrs_query = ( "select {}" " from CustomAttributeConfiguration" - # Kept `pype` for Backwards Compatiblity + # Kept `pype` for Backwards Compatibility " where group.name in (\"pype\", \"{}\")" ).format(", ".join(query_keys), CUST_ATTR_GROUP) all_avalon_attr = session.query(cust_attrs_query).all() diff --git a/openpype/modules/ftrack/lib/ftrack_action_handler.py b/openpype/modules/ftrack/lib/ftrack_action_handler.py index b24fe5f12a..1be4353b26 100644 --- a/openpype/modules/ftrack/lib/ftrack_action_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_action_handler.py @@ -12,7 +12,7 @@ def statics_icon(*icon_statics_file_parts): class BaseAction(BaseHandler): '''Custom Action base class - `label` a descriptive string identifing your action. + `label` a descriptive string identifying your action. `varaint` To group actions together, give them the same label and specify a unique variant per action. @@ -234,6 +234,10 @@ class BaseAction(BaseHandler): if not settings_roles: return default + user_roles = { + role_name.lower() + for role_name in user_roles + } for role_name in settings_roles: if role_name.lower() in user_roles: return True @@ -264,8 +268,15 @@ class BaseAction(BaseHandler): return user_entity @classmethod - def get_user_roles_from_event(cls, session, event): - """Query user entity from event.""" + def get_user_roles_from_event(cls, session, event, lower=True): + """Get user roles based on data in event. + + Args: + session (ftrack_api.Session): Prepared ftrack session. + event (ftrack_api.event.Event): Event which is processed. + lower (Optional[bool]): Lower the role names. Default 'True'. + """ + not_set = object() user_roles = event["data"].get("user_roles", not_set) @@ -273,7 +284,10 @@ class BaseAction(BaseHandler): user_roles = [] user_entity = cls.get_user_entity_from_event(session, event) for role in user_entity["user_security_roles"]: - user_roles.append(role["security_role"]["name"].lower()) + role_name = role["security_role"]["name"] + if lower: + role_name = role_name.lower() + user_roles.append(role_name) event["data"]["user_roles"] = user_roles return user_roles @@ -322,7 +336,8 @@ class BaseAction(BaseHandler): if not settings.get(self.settings_enabled_key, True): return False - user_role_list = self.get_user_roles_from_event(session, event) + user_role_list = self.get_user_roles_from_event( + session, event, lower=False) if not self.roles_check(settings.get("role_list"), user_role_list): return False return True diff --git a/openpype/modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/ftrack/lib/ftrack_base_handler.py index c0b03f8a41..55400c22ab 100644 --- a/openpype/modules/ftrack/lib/ftrack_base_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_base_handler.py @@ -30,7 +30,7 @@ class PreregisterException(Exception): class BaseHandler(object): '''Custom Action base class -