diff --git a/.all-contributorsrc b/.all-contributorsrc new file mode 100644 index 0000000000..b30f3b2499 --- /dev/null +++ b/.all-contributorsrc @@ -0,0 +1,326 @@ +{ + "projectName": "OpenPype", + "projectOwner": "pypeclub", + "repoType": "github", + "repoHost": "https://github.com", + "files": [ + "README.md" + ], + "imageSize": 100, + "commit": true, + "commitConvention": "none", + "contributors": [ + { + "login": "mkolar", + "name": "Milan Kolar", + "avatar_url": "https://avatars.githubusercontent.com/u/3333008?v=4", + "profile": "http://pype.club/", + "contributions": [ + "code", + "doc", + "infra", + "business", + "content", + "fundingFinding", + "maintenance", + "projectManagement", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jakubjezek001", + "name": "Jakub Ježek", + "avatar_url": "https://avatars.githubusercontent.com/u/40640033?v=4", + "profile": "https://www.linkedin.com/in/jakubjezek79", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "antirotor", + "name": "Ondřej Samohel", + "avatar_url": "https://avatars.githubusercontent.com/u/33513211?v=4", + "profile": "https://github.com/antirotor", + "contributions": [ + "code", + "doc", + "infra", + "content", + "review", + "maintenance", + "mentoring", + "projectManagement", + "question" + ] + }, + { + "login": "iLLiCiTiT", + "name": "Jakub Trllo", + "avatar_url": "https://avatars.githubusercontent.com/u/43494761?v=4", + "profile": "https://github.com/iLLiCiTiT", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "kalisp", + "name": "Petr Kalis", + "avatar_url": "https://avatars.githubusercontent.com/u/4457962?v=4", + "profile": "https://github.com/kalisp", + "contributions": [ + "code", + "doc", + "infra", + "review", + "maintenance", + "question" + ] + }, + { + "login": "64qam", + "name": "64qam", + "avatar_url": "https://avatars.githubusercontent.com/u/26925793?v=4", + "profile": "https://github.com/64qam", + "contributions": [ + "code", + "review", + "doc", + "infra", + "projectManagement", + "maintenance", + "content", + "userTesting" + ] + }, + { + "login": "BigRoy", + "name": "Roy Nieterau", + "avatar_url": "https://avatars.githubusercontent.com/u/2439881?v=4", + "profile": "http://www.colorbleed.nl/", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "tokejepsen", + "name": "Toke Jepsen", + "avatar_url": "https://avatars.githubusercontent.com/u/1860085?v=4", + "profile": "https://github.com/tokejepsen", + "contributions": [ + "code", + "doc", + "review", + "mentoring", + "question" + ] + }, + { + "login": "jrsndl", + "name": "Jiri Sindelar", + "avatar_url": "https://avatars.githubusercontent.com/u/45896205?v=4", + "profile": "https://github.com/jrsndl", + "contributions": [ + "code", + "review", + "doc", + "content", + "tutorial", + "userTesting" + ] + }, + { + "login": "simonebarbieri", + "name": "Simone Barbieri", + "avatar_url": "https://avatars.githubusercontent.com/u/1087869?v=4", + "profile": "https://barbierisimone.com/", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "karimmozilla", + "name": "karimmozilla", + "avatar_url": "https://avatars.githubusercontent.com/u/82811760?v=4", + "profile": "http://karimmozilla.xyz/", + "contributions": [ + "code" + ] + }, + { + "login": "Allan-I", + "name": "Allan I. A.", + "avatar_url": "https://avatars.githubusercontent.com/u/76656700?v=4", + "profile": "https://github.com/Allan-I", + "contributions": [ + "code" + ] + }, + { + "login": "m-u-r-p-h-y", + "name": "murphy", + "avatar_url": "https://avatars.githubusercontent.com/u/352795?v=4", + "profile": "https://www.linkedin.com/in/mmuurrpphhyy/", + "contributions": [ + "code", + "review", + "userTesting", + "doc", + "projectManagement" + ] + }, + { + "login": "aardschok", + "name": "Wijnand Koreman", + "avatar_url": "https://avatars.githubusercontent.com/u/26920875?v=4", + "profile": "https://github.com/aardschok", + "contributions": [ + "code" + ] + }, + { + "login": "zhoub", + "name": "Bo Zhou", + "avatar_url": "https://avatars.githubusercontent.com/u/1798206?v=4", + "profile": "http://jedimaster.cnblogs.com/", + "contributions": [ + "code" + ] + }, + { + "login": "ClementHector", + "name": "Clément Hector", + "avatar_url": "https://avatars.githubusercontent.com/u/7068597?v=4", + "profile": "https://www.linkedin.com/in/clementhector/", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "davidlatwe", + "name": "David Lai", + "avatar_url": "https://avatars.githubusercontent.com/u/3357009?v=4", + "profile": "https://twitter.com/davidlatwe", + "contributions": [ + "code", + "review" + ] + }, + { + "login": "2-REC", + "name": "Derek ", + "avatar_url": "https://avatars.githubusercontent.com/u/42170307?v=4", + "profile": "https://github.com/2-REC", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "gabormarinov", + "name": "Gábor Marinov", + "avatar_url": "https://avatars.githubusercontent.com/u/8620515?v=4", + "profile": "https://github.com/gabormarinov", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "icyvapor", + "name": "icyvapor", + "avatar_url": "https://avatars.githubusercontent.com/u/1195278?v=4", + "profile": "https://github.com/icyvapor", + "contributions": [ + "code", + "doc" + ] + }, + { + "login": "jlorrain", + "name": "Jérôme LORRAIN", + "avatar_url": "https://avatars.githubusercontent.com/u/7955673?v=4", + "profile": "https://github.com/jlorrain", + "contributions": [ + "code" + ] + }, + { + "login": "dmo-j-cube", + "name": "David Morris-Oliveros", + "avatar_url": "https://avatars.githubusercontent.com/u/89823400?v=4", + "profile": "https://github.com/dmo-j-cube", + "contributions": [ + "code" + ] + }, + { + "login": "BenoitConnan", + "name": "BenoitConnan", + "avatar_url": "https://avatars.githubusercontent.com/u/82808268?v=4", + "profile": "https://github.com/BenoitConnan", + "contributions": [ + "code" + ] + }, + { + "login": "Malthaldar", + "name": "Malthaldar", + "avatar_url": "https://avatars.githubusercontent.com/u/33671694?v=4", + "profile": "https://github.com/Malthaldar", + "contributions": [ + "code" + ] + }, + { + "login": "svenneve", + "name": "Sven Neve", + "avatar_url": "https://avatars.githubusercontent.com/u/2472863?v=4", + "profile": "http://www.svenneve.com/", + "contributions": [ + "code" + ] + }, + { + "login": "zafrs", + "name": "zafrs", + "avatar_url": "https://avatars.githubusercontent.com/u/26890002?v=4", + "profile": "https://github.com/zafrs", + "contributions": [ + "code" + ] + }, + { + "login": "Tilix4", + "name": "Félix David", + "avatar_url": "https://avatars.githubusercontent.com/u/22875539?v=4", + "profile": "http://felixdavid.com/", + "contributions": [ + "code", + "doc" + ] + } + ], + "contributorsPerLine": 7, + "skipCi": true +} diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 6ed6ae428c..96e768e420 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -6,6 +6,8 @@ labels: bug assignees: '' --- +**Running version** +[ex. 3.14.1-nightly.2] **Describe the bug** A clear and concise description of what the bug is. diff --git a/.github/workflows/automate-projects.yml b/.github/workflows/automate-projects.yml deleted file mode 100644 index b605071c2d..0000000000 --- a/.github/workflows/automate-projects.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Automate Projects - -on: - issues: - types: [opened, labeled] -env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - -jobs: - assign_one_project: - runs-on: ubuntu-latest - name: Assign to One Project - steps: - - name: Assign NEW bugs to triage - uses: srggrs/assign-one-project-github-action@1.2.0 - if: contains(github.event.issue.labels.*.name, 'bug') - with: - project: 'https://github.com/pypeclub/pype/projects/2' - column_name: 'Needs triage' diff --git a/.github/workflows/milestone_assign.yml b/.github/workflows/milestone_assign.yml new file mode 100644 index 0000000000..3cbee51472 --- /dev/null +++ b/.github/workflows/milestone_assign.yml @@ -0,0 +1,28 @@ +name: Milestone - assign to PRs + +on: + pull_request_target: + types: [closed] + +jobs: + run_if_release: + if: startsWith(github.base_ref, 'release/') + runs-on: ubuntu-latest + steps: + - name: 'Assign Milestone [next-minor]' + if: github.event.pull_request.milestone == null + uses: zoispag/action-assign-milestone@v1 + with: + repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" + milestone: 'next-minor' + + run_if_develop: + if: ${{ github.base_ref == 'develop' }} + runs-on: ubuntu-latest + steps: + - name: 'Assign Milestone [next-patch]' + if: github.event.pull_request.milestone == null + uses: zoispag/action-assign-milestone@v1 + with: + repo-token: "${{ secrets.YNPUT_BOT_TOKEN }}" + milestone: 'next-patch' diff --git a/.github/workflows/milestone_create.yml b/.github/workflows/milestone_create.yml new file mode 100644 index 0000000000..632704e64a --- /dev/null +++ b/.github/workflows/milestone_create.yml @@ -0,0 +1,62 @@ +name: Milestone - create default + +on: + milestone: + types: [closed, edited] + +jobs: + generate-next-patch: + runs-on: ubuntu-latest + steps: + - name: 'Get Milestones' + uses: "WyriHaximus/github-action-get-milestones@master" + id: milestones + env: + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" + + - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') + id: querymilestone + env: + MILESTONES: ${{ steps.milestones.outputs.milestones }} + MILESTONE: "next-patch" + + - name: Read output + run: | + echo "${{ steps.querymilestone.outputs.number }}" + + - name: 'Create `next-patch` milestone' + if: steps.querymilestone.outputs.number == '' + id: createmilestone + uses: "WyriHaximus/github-action-create-milestone@v1" + with: + title: 'next-patch' + env: + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" + + generate-next-minor: + runs-on: ubuntu-latest + steps: + - name: 'Get Milestones' + uses: "WyriHaximus/github-action-get-milestones@master" + id: milestones + env: + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" + + - run: printf "name=number::%s" $(printenv MILESTONES | jq --arg MILESTONE $(printenv MILESTONE) '.[] | select(.title == $MILESTONE) | .number') + id: querymilestone + env: + MILESTONES: ${{ steps.milestones.outputs.milestones }} + MILESTONE: "next-minor" + + - name: Read output + run: | + echo "${{ steps.querymilestone.outputs.number }}" + + - name: 'Create `next-minor` milestone' + if: steps.querymilestone.outputs.number == '' + id: createmilestone + uses: "WyriHaximus/github-action-create-milestone@v1" + with: + title: 'next-minor' + env: + GITHUB_TOKEN: "${{ secrets.YNPUT_BOT_TOKEN }}" diff --git a/.github/workflows/nightly_merge.yml b/.github/workflows/nightly_merge.yml index 1d36c89cc7..1776d7a464 100644 --- a/.github/workflows/nightly_merge.yml +++ b/.github/workflows/nightly_merge.yml @@ -14,10 +14,10 @@ jobs: - name: 🚛 Checkout Code uses: actions/checkout@v2 - - name: 🔨 Merge develop to main + - name: 🔨 Merge develop to main uses: everlytic/branch-merge@1.1.0 with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'develop' target_branch: 'main' commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' @@ -26,4 +26,4 @@ jobs: uses: benc-uk/workflow-dispatch@v1 with: workflow: Nightly Prerelease - token: ${{ secrets.ADMIN_TOKEN }} \ No newline at end of file + token: ${{ secrets.YNPUT_BOT_TOKEN }} diff --git a/.github/workflows/prerelease.yml b/.github/workflows/prerelease.yml index d9b4d8089c..571b0339e1 100644 --- a/.github/workflows/prerelease.yml +++ b/.github/workflows/prerelease.yml @@ -17,7 +17,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: 3.9 - name: Install Python requirements run: pip install gitpython semver PyGithub @@ -25,43 +25,15 @@ jobs: - name: 🔎 Determine next version type id: version_type run: | - TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.GITHUB_TOKEN }}) - - echo ::set-output name=type::$TYPE + TYPE=$(python ./tools/ci_tools.py --bump --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) + echo "type=${TYPE}" >> $GITHUB_OUTPUT - name: 💉 Inject new version into files id: version if: steps.version_type.outputs.type != 'skip' run: | - RESULT=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.GITHUB_TOKEN }}) - - echo ::set-output name=next_tag::$RESULT - - - name: "✏️ Generate full changelog" - if: steps.version_type.outputs.type != 'skip' - id: generate-full-changelog - uses: heinrichreimer/github-changelog-generator-action@v2.2 - with: - token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' - issues: false - issuesWoLabels: false - sinceTag: "3.0.0" - maxIssues: 100 - pullRequests: true - prWoLabels: false - author: false - unreleased: true - compareLink: true - stripGeneratorNotice: true - verbose: true - unreleasedLabel: ${{ steps.version.outputs.next_tag }} - excludeTagsRegex: "CI/.+" - releaseBranch: "main" - - - name: "🖨️ Print changelog to console" - if: steps.version_type.outputs.type != 'skip' - run: cat CHANGELOG.md + NEW_VERSION_TAG=$(python ./tools/ci_tools.py --nightly --github_token ${{ secrets.YNPUT_BOT_TOKEN }}) + echo "next_tag=${NEW_VERSION_TAG}" >> $GITHUB_OUTPUT - name: 💾 Commit and Tag id: git_commit @@ -69,29 +41,27 @@ jobs: run: | git config user.email ${{ secrets.CI_EMAIL }} git config user.name ${{ secrets.CI_USER }} - cd repos/avalon-core git checkout main git pull - cd ../.. git add . git commit -m "[Automated] Bump version" tag_name="CI/${{ steps.version.outputs.next_tag }}" echo $tag_name git tag -a $tag_name -m "nightly build" - + - name: Push to protected main branch - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: - token: ${{ secrets.ADMIN_TOKEN }} + token: ${{ secrets.YNPUT_BOT_TOKEN }} branch: main tags: true unprotect_reviews: true - - name: 🔨 Merge main back to develop + - name: 🔨 Merge main back to develop uses: everlytic/branch-merge@1.1.0 if: steps.version_type.outputs.type != 'skip' with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'main' target_branch: 'develop' - commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' \ No newline at end of file + commit_message_template: '[Automated] Merged {source_ref} into {target_branch}' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 917e6c884c..0b4c8af2c7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,7 +2,7 @@ name: Stable Release on: release: - types: + types: - prereleased jobs: @@ -13,47 +13,25 @@ jobs: steps: - name: 🚛 Checkout Code uses: actions/checkout@v2 - with: + with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v2 with: - python-version: 3.7 + python-version: 3.9 - name: Install Python requirements run: pip install gitpython semver PyGithub - name: 💉 Inject new version into files id: version run: | - echo ::set-output name=current_version::${GITHUB_REF#refs/*/} - RESULT=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/}) - LASTRELEASE=$(python ./tools/ci_tools.py --lastversion release) + NEW_VERSION=$(python ./tools/ci_tools.py --finalize ${GITHUB_REF#refs/*/}) + LAST_VERSION=$(python ./tools/ci_tools.py --lastversion release) - echo ::set-output name=last_release::$LASTRELEASE - echo ::set-output name=release_tag::$RESULT - - - name: "✏️ Generate full changelog" - if: steps.version.outputs.release_tag != 'skip' - id: generate-full-changelog - uses: heinrichreimer/github-changelog-generator-action@v2.2 - with: - token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' - issues: false - issuesWoLabels: false - sinceTag: "3.0.0" - maxIssues: 100 - pullRequests: true - prWoLabels: false - author: false - unreleased: true - compareLink: true - stripGeneratorNotice: true - verbose: true - futureRelease: ${{ steps.version.outputs.release_tag }} - excludeTagsRegex: "CI/.+" - releaseBranch: "main" + echo "current_version=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT + echo "last_release=${LAST_VERSION}" >> $GITHUB_OUTPUT + echo "release_tag=${NEW_VERSION}" >> $GITHUB_OUTPUT - name: 💾 Commit and Tag id: git_commit @@ -68,45 +46,19 @@ jobs: - name: 🔏 Push to protected main branch if: steps.version.outputs.release_tag != 'skip' - uses: CasperWA/push-protected@v2 + uses: CasperWA/push-protected@v2.10.0 with: - token: ${{ secrets.ADMIN_TOKEN }} + token: ${{ secrets.YNPUT_BOT_TOKEN }} branch: main tags: true - unprotect_reviews: true - - - name: "✏️ Generate last changelog" - if: steps.version.outputs.release_tag != 'skip' - id: generate-last-changelog - uses: heinrichreimer/github-changelog-generator-action@v2.2 - with: - token: ${{ secrets.ADMIN_TOKEN }} - addSections: '{"documentation":{"prefix":"### 📖 Documentation","labels":["type: documentation"]},"tests":{"prefix":"### ✅ Testing","labels":["tests"]},"feature":{"prefix":"**🆕 New features**", "labels":["type: feature"]},"breaking":{"prefix":"**💥 Breaking**", "labels":["breaking"]},"enhancements":{"prefix":"**🚀 Enhancements**", "labels":["type: enhancement"]},"bugs":{"prefix":"**🐛 Bug fixes**", "labels":["type: bug"]},"deprecated":{"prefix":"**⚠️ Deprecations**", "labels":["depreciated"]}, "refactor":{"prefix":"**🔀 Refactored code**", "labels":["refactor"]}}' - issues: false - issuesWoLabels: false - sinceTag: ${{ steps.version.outputs.last_release }} - maxIssues: 100 - pullRequests: true - prWoLabels: false - author: false - unreleased: true - compareLink: true - stripGeneratorNotice: true - verbose: true - futureRelease: ${{ steps.version.outputs.release_tag }} - excludeTagsRegex: "CI/.+" - releaseBranch: "main" - stripHeaders: true - base: 'none' - + unprotect_reviews: true - name: 🚀 Github Release if: steps.version.outputs.release_tag != 'skip' uses: ncipollo/release-action@v1 with: - body: ${{ steps.generate-last-changelog.outputs.changelog }} tag: ${{ steps.version.outputs.release_tag }} - token: ${{ secrets.ADMIN_TOKEN }} + token: ${{ secrets.YNPUT_BOT_TOKEN }} - name: ☠ Delete Pre-release if: steps.version.outputs.release_tag != 'skip' @@ -114,11 +66,11 @@ jobs: with: tag: "${{ steps.version.outputs.current_version }}" - - name: 🔁 Merge main back to develop + - name: 🔁 Merge main back to develop if: steps.version.outputs.release_tag != 'skip' uses: everlytic/branch-merge@1.1.0 with: - github_token: ${{ secrets.ADMIN_TOKEN }} + github_token: ${{ secrets.YNPUT_BOT_TOKEN }} source_ref: 'main' target_branch: 'develop' - commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}' \ No newline at end of file + commit_message_template: '[Automated] Merged release {source_ref} into {target_branch}' diff --git a/.github/workflows/test_build.yml b/.github/workflows/test_build.yml index ac7279117a..064a4d47e0 100644 --- a/.github/workflows/test_build.yml +++ b/.github/workflows/test_build.yml @@ -18,7 +18,7 @@ jobs: runs-on: windows-latest strategy: matrix: - python-version: [3.7] + python-version: [3.9] steps: - name: 🚛 Checkout Code @@ -28,7 +28,7 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - + - name: 🧵 Install Requirements shell: pwsh run: | @@ -45,7 +45,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7] + python-version: [3.9] steps: - name: 🚛 Checkout Code @@ -64,27 +64,3 @@ jobs: run: | export SKIP_THIRD_PARTY_VALIDATION="1" ./tools/build.sh - - # MacOS-latest: - - # runs-on: macos-latest - # strategy: - # matrix: - # python-version: [3.7] - - # steps: - # - name: 🚛 Checkout Code - # uses: actions/checkout@v2 - - # - name: Set up Python - # uses: actions/setup-python@v2 - # with: - # python-version: ${{ matrix.python-version }} - - # - name: 🧵 Install Requirements - # run: | - # ./tools/create_env.sh - - # - name: 🔨 Build - # run: | - # ./tools/build.sh \ No newline at end of file diff --git a/.gitignore b/.gitignore index fa3fae1ad2..18e7cd7bf2 100644 --- a/.gitignore +++ b/.gitignore @@ -70,6 +70,8 @@ coverage.xml ################## node_modules package-lock.json +package.json +yarn.lock openpype/premiere/ppro/js/debug.log @@ -100,3 +102,13 @@ website/.docusaurus .poetry/ .python-version +.editorconfig +.pre-commit-config.yaml +mypy.ini + +tools/run_eventserver.* + +# Developer tools +tools/dev_* + +.github_changelog_generator diff --git a/.gitmodules b/.gitmodules index 9920ceaad6..fe93791c4e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,7 @@ -[submodule "repos/avalon-core"] - path = repos/avalon-core - url = https://github.com/pypeclub/avalon-core.git \ No newline at end of file +[submodule "tools/modules/powershell/BurntToast"] + path = tools/modules/powershell/BurntToast + url = https://github.com/Windos/BurntToast.git + +[submodule "tools/modules/powershell/PSWriteColor"] + path = tools/modules/powershell/PSWriteColor + url = https://github.com/EvotecIT/PSWriteColor.git \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..eec388924e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,12 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + - id: no-commit-to-branch + args: [ '--pattern', '^(?!((release|enhancement|feature|bugfix|documentation|tests|local|chore)\/[a-zA-Z0-9\-_]+)$).*' ] diff --git a/CHANGELOG.md b/CHANGELOG.md index abe9eaa3ce..0da167763b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,18 +1,1193 @@ # Changelog -## [3.9.2-nightly.2](https://github.com/pypeclub/OpenPype/tree/HEAD) +## [3.15.0](https://github.com/ynput/OpenPype/tree/HEAD) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...HEAD) +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...HEAD) + +**Deprecated:** + +- General: Fill default values of new publish template profiles [\#4245](https://github.com/ynput/OpenPype/pull/4245) ### 📖 Documentation -- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) +- documentation: Split tools into separate entries [\#4342](https://github.com/ynput/OpenPype/pull/4342) +- Documentation: Fix harmony docs [\#4301](https://github.com/ynput/OpenPype/pull/4301) +- Remove staging logic set by OpenPype version [\#3979](https://github.com/ynput/OpenPype/pull/3979) + +**🆕 New features** + +- General: Push to studio library [\#4284](https://github.com/ynput/OpenPype/pull/4284) +- Colorspace Management and Distribution [\#4195](https://github.com/ynput/OpenPype/pull/4195) +- Nuke: refactor to latest publisher workfow [\#4006](https://github.com/ynput/OpenPype/pull/4006) +- Update to Python 3.9 [\#3546](https://github.com/ynput/OpenPype/pull/3546) **🚀 Enhancements** +- Unreal: Don't use mongo queries in 'ExistingLayoutLoader' [\#4356](https://github.com/ynput/OpenPype/pull/4356) +- General: Loader and Creator plugins can be disabled [\#4310](https://github.com/ynput/OpenPype/pull/4310) +- General: Unbind poetry version [\#4306](https://github.com/ynput/OpenPype/pull/4306) +- General: Enhanced enum def items [\#4295](https://github.com/ynput/OpenPype/pull/4295) +- Git: add pre-commit hooks [\#4289](https://github.com/ynput/OpenPype/pull/4289) +- Tray Publisher: Improve Online family functionality [\#4263](https://github.com/ynput/OpenPype/pull/4263) +- General: Update MacOs to PySide6 [\#4255](https://github.com/ynput/OpenPype/pull/4255) +- Build: update to Gazu in toml [\#4208](https://github.com/ynput/OpenPype/pull/4208) +- Global: adding imageio to settings [\#4158](https://github.com/ynput/OpenPype/pull/4158) +- Blender: added project settings for validator no colons in name [\#4149](https://github.com/ynput/OpenPype/pull/4149) +- Dockerfile for Debian Bullseye [\#4108](https://github.com/ynput/OpenPype/pull/4108) +- AfterEffects: publish multiple compositions [\#4092](https://github.com/ynput/OpenPype/pull/4092) +- AfterEffects: make new publisher default [\#4056](https://github.com/ynput/OpenPype/pull/4056) +- Photoshop: make new publisher default [\#4051](https://github.com/ynput/OpenPype/pull/4051) +- Feature/multiverse [\#4046](https://github.com/ynput/OpenPype/pull/4046) +- Tests: add support for deadline for automatic tests [\#3989](https://github.com/ynput/OpenPype/pull/3989) +- Add version to shortcut name [\#3906](https://github.com/ynput/OpenPype/pull/3906) +- TrayPublisher: Removed from experimental tools [\#3667](https://github.com/ynput/OpenPype/pull/3667) + +**🐛 Bug fixes** + +- change 3.7 to 3.9 in folder name [\#4354](https://github.com/ynput/OpenPype/pull/4354) +- PushToProject: Fix hierarchy of project change [\#4350](https://github.com/ynput/OpenPype/pull/4350) +- Fix photoshop workfile save-as [\#4347](https://github.com/ynput/OpenPype/pull/4347) +- Nuke Input process node sourcing improvements [\#4341](https://github.com/ynput/OpenPype/pull/4341) +- New publisher: Some validation plugin tweaks [\#4339](https://github.com/ynput/OpenPype/pull/4339) +- Harmony: fix unable to change workfile on Mac [\#4334](https://github.com/ynput/OpenPype/pull/4334) +- Global: fixing in-place source publishing for editorial [\#4333](https://github.com/ynput/OpenPype/pull/4333) +- General: Use class constants of QMessageBox [\#4332](https://github.com/ynput/OpenPype/pull/4332) +- TVPaint: Fix plugin for TVPaint 11.7 [\#4328](https://github.com/ynput/OpenPype/pull/4328) +- Exctract OTIO review has improved quality [\#4325](https://github.com/ynput/OpenPype/pull/4325) +- Ftrack: fix typos causing bugs in sync [\#4322](https://github.com/ynput/OpenPype/pull/4322) +- General: Python 2 compatibility of instance collector [\#4320](https://github.com/ynput/OpenPype/pull/4320) +- Slack: user groups speedup [\#4318](https://github.com/ynput/OpenPype/pull/4318) +- Maya: Bug - Multiverse extractor executed on plain animation family [\#4315](https://github.com/ynput/OpenPype/pull/4315) +- Fix run\_documentation.ps1 [\#4312](https://github.com/ynput/OpenPype/pull/4312) +- Nuke: new creators fixes [\#4308](https://github.com/ynput/OpenPype/pull/4308) +- General: missing comment on standalone and tray publisher [\#4303](https://github.com/ynput/OpenPype/pull/4303) +- AfterEffects: Fix for audio from mp4 layer [\#4296](https://github.com/ynput/OpenPype/pull/4296) +- General: Update gazu in poetry lock [\#4247](https://github.com/ynput/OpenPype/pull/4247) +- Bug: Fixing version detection and filtering in Igniter [\#3914](https://github.com/ynput/OpenPype/pull/3914) +- Bug: Create missing version dir [\#3903](https://github.com/ynput/OpenPype/pull/3903) + +**🔀 Refactored code** + +- Remove redundant export\_alembic method. [\#4293](https://github.com/ynput/OpenPype/pull/4293) +- Igniter: Use qtpy modules instead of Qt [\#4237](https://github.com/ynput/OpenPype/pull/4237) + +**Merged pull requests:** + +- Sort families by alphabetical order in the Create plugin [\#4346](https://github.com/ynput/OpenPype/pull/4346) +- Global: Validate unique subsets [\#4336](https://github.com/ynput/OpenPype/pull/4336) +- Maya: Collect instances preserve handles even if frameStart + frameEnd matches context [\#3437](https://github.com/ynput/OpenPype/pull/3437) + +## [3.14.10](https://github.com/ynput/OpenPype/tree/HEAD) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.9...3.14.10) + +**🆕 New features** + +- Global | Nuke: Creator placeholders in workfile template builder [\#4266](https://github.com/ynput/OpenPype/pull/4266) +- Slack: Added dynamic message [\#4265](https://github.com/ynput/OpenPype/pull/4265) +- Blender: Workfile Loader [\#4234](https://github.com/ynput/OpenPype/pull/4234) +- Unreal: Publishing and Loading for UAssets [\#4198](https://github.com/ynput/OpenPype/pull/4198) +- Publish: register publishes without copying them [\#4157](https://github.com/ynput/OpenPype/pull/4157) + +**🚀 Enhancements** + +- General: Added install method with docstring to HostBase [\#4298](https://github.com/ynput/OpenPype/pull/4298) +- Traypublisher: simple editorial multiple edl [\#4248](https://github.com/ynput/OpenPype/pull/4248) +- General: Extend 'IPluginPaths' to have more available methods [\#4214](https://github.com/ynput/OpenPype/pull/4214) +- Refactorization of folder coloring [\#4211](https://github.com/ynput/OpenPype/pull/4211) +- Flame - loading multilayer with controlled layer names [\#4204](https://github.com/ynput/OpenPype/pull/4204) + +**🐛 Bug fixes** + +- Unreal: fix missing `maintained_selection` call [\#4300](https://github.com/ynput/OpenPype/pull/4300) +- Ftrack: Fix receive of host ip on MacOs [\#4288](https://github.com/ynput/OpenPype/pull/4288) +- SiteSync: sftp connection failing when shouldnt be tested [\#4278](https://github.com/ynput/OpenPype/pull/4278) +- Deadline: fix default value for passing mongo url [\#4275](https://github.com/ynput/OpenPype/pull/4275) +- Scene Manager: Fix variable name [\#4268](https://github.com/ynput/OpenPype/pull/4268) +- Slack: notification fails because of missing published path [\#4264](https://github.com/ynput/OpenPype/pull/4264) +- hiero: creator gui with min max [\#4257](https://github.com/ynput/OpenPype/pull/4257) +- NiceCheckbox: Fix checker positioning in Python 2 [\#4253](https://github.com/ynput/OpenPype/pull/4253) +- Publisher: Fix 'CreatorType' not equal for Python 2 DCCs [\#4249](https://github.com/ynput/OpenPype/pull/4249) +- Deadline: fix dependencies [\#4242](https://github.com/ynput/OpenPype/pull/4242) +- Houdini: hotfix instance data access [\#4236](https://github.com/ynput/OpenPype/pull/4236) +- bugfix/image plane load error [\#4222](https://github.com/ynput/OpenPype/pull/4222) +- Hiero: thumbnail from multilayer exr [\#4209](https://github.com/ynput/OpenPype/pull/4209) + +**🔀 Refactored code** + +- Resolve: Use qtpy in Resolve [\#4254](https://github.com/ynput/OpenPype/pull/4254) +- Houdini: Use qtpy in Houdini [\#4252](https://github.com/ynput/OpenPype/pull/4252) +- Max: Use qtpy in Max [\#4251](https://github.com/ynput/OpenPype/pull/4251) +- Maya: Use qtpy in Maya [\#4250](https://github.com/ynput/OpenPype/pull/4250) +- Hiero: Use qtpy in Hiero [\#4240](https://github.com/ynput/OpenPype/pull/4240) +- Nuke: Use qtpy in Nuke [\#4239](https://github.com/ynput/OpenPype/pull/4239) +- Flame: Use qtpy in flame [\#4238](https://github.com/ynput/OpenPype/pull/4238) +- General: Legacy io not used in global plugins [\#4134](https://github.com/ynput/OpenPype/pull/4134) + +**Merged pull requests:** + +- Bump json5 from 1.0.1 to 1.0.2 in /website [\#4292](https://github.com/ynput/OpenPype/pull/4292) +- Maya: Fix validate frame range repair + fix create render with deadline disabled [\#4279](https://github.com/ynput/OpenPype/pull/4279) + + +## [3.14.9](https://github.com/pypeclub/OpenPype/tree/3.14.9) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.8...3.14.9) + +### 📖 Documentation + +- Documentation: Testing on Deadline [\#4185](https://github.com/pypeclub/OpenPype/pull/4185) +- Consistent Python version [\#4160](https://github.com/pypeclub/OpenPype/pull/4160) + +**🆕 New features** + +- Feature/op 4397 gl tf extractor for maya [\#4192](https://github.com/pypeclub/OpenPype/pull/4192) +- Maya: Extractor for Unreal SkeletalMesh [\#4174](https://github.com/pypeclub/OpenPype/pull/4174) +- 3dsmax: integration [\#4168](https://github.com/pypeclub/OpenPype/pull/4168) +- Blender: Extract Alembic Animations [\#4128](https://github.com/pypeclub/OpenPype/pull/4128) +- Unreal: Load Alembic Animations [\#4127](https://github.com/pypeclub/OpenPype/pull/4127) + +**🚀 Enhancements** + +- Houdini: Use new interface class name for publish host [\#4220](https://github.com/pypeclub/OpenPype/pull/4220) +- General: Default command for headless mode is interactive [\#4203](https://github.com/pypeclub/OpenPype/pull/4203) +- Maya: Enhanced ASS publishing [\#4196](https://github.com/pypeclub/OpenPype/pull/4196) +- Feature/op 3924 implement ass extractor [\#4188](https://github.com/pypeclub/OpenPype/pull/4188) +- File transactions: Source path is destination path [\#4184](https://github.com/pypeclub/OpenPype/pull/4184) +- Deadline: improve environment processing [\#4182](https://github.com/pypeclub/OpenPype/pull/4182) +- General: Comment per instance in Publisher [\#4178](https://github.com/pypeclub/OpenPype/pull/4178) +- Ensure Mongo database directory exists in Windows. [\#4166](https://github.com/pypeclub/OpenPype/pull/4166) +- Note about unrestricted execution on Windows. [\#4161](https://github.com/pypeclub/OpenPype/pull/4161) +- Maya: Enable thumbnail transparency on extraction. [\#4147](https://github.com/pypeclub/OpenPype/pull/4147) +- Maya: Disable viewport Pan/Zoom on playblast extraction. [\#4146](https://github.com/pypeclub/OpenPype/pull/4146) +- Maya: Optional viewport refresh on pointcache extraction [\#4144](https://github.com/pypeclub/OpenPype/pull/4144) +- CelAction: refactory integration to current openpype [\#4140](https://github.com/pypeclub/OpenPype/pull/4140) +- Maya: create and publish bounding box geometry [\#4131](https://github.com/pypeclub/OpenPype/pull/4131) +- Changed the UOpenPypePublishInstance to use the UDataAsset class [\#4124](https://github.com/pypeclub/OpenPype/pull/4124) +- General: Collection Audio speed up [\#4110](https://github.com/pypeclub/OpenPype/pull/4110) +- Maya: keep existing AOVs when creating render instance [\#4087](https://github.com/pypeclub/OpenPype/pull/4087) +- General: Oiio conversion multipart fix [\#4060](https://github.com/pypeclub/OpenPype/pull/4060) + +**🐛 Bug fixes** + +- Publisher: Signal type issues in Python 2 DCCs [\#4230](https://github.com/pypeclub/OpenPype/pull/4230) +- Blender: Fix Layout Family Versioning [\#4228](https://github.com/pypeclub/OpenPype/pull/4228) +- Blender: Fix Create Camera "Use selection" [\#4226](https://github.com/pypeclub/OpenPype/pull/4226) +- TrayPublisher - join needs list [\#4224](https://github.com/pypeclub/OpenPype/pull/4224) +- General: Event callbacks pass event to callbacks as expected [\#4210](https://github.com/pypeclub/OpenPype/pull/4210) +- Build:Revert .toml update of Gazu [\#4207](https://github.com/pypeclub/OpenPype/pull/4207) +- Nuke: fixed imageio node overrides subset filter [\#4202](https://github.com/pypeclub/OpenPype/pull/4202) +- Maya: pointcache [\#4201](https://github.com/pypeclub/OpenPype/pull/4201) +- Unreal: Support for Unreal Engine 5.1 [\#4199](https://github.com/pypeclub/OpenPype/pull/4199) +- General: Integrate thumbnail looks for thumbnail to multiple places [\#4181](https://github.com/pypeclub/OpenPype/pull/4181) +- Various minor bugfixes [\#4172](https://github.com/pypeclub/OpenPype/pull/4172) +- Nuke/Hiero: Remove tkinter library paths before launch [\#4171](https://github.com/pypeclub/OpenPype/pull/4171) +- Flame: vertical alignment of layers [\#4169](https://github.com/pypeclub/OpenPype/pull/4169) +- Nuke: correct detection of viewer and display [\#4165](https://github.com/pypeclub/OpenPype/pull/4165) +- Settings UI: Don't create QApplication if already exists [\#4156](https://github.com/pypeclub/OpenPype/pull/4156) +- General: Extract review handle start offset of sequences [\#4152](https://github.com/pypeclub/OpenPype/pull/4152) +- Maya: Maintain time connections on Alembic update. [\#4143](https://github.com/pypeclub/OpenPype/pull/4143) + +**🔀 Refactored code** + +- General: Use qtpy in modules and hosts UIs which are running in OpenPype process [\#4225](https://github.com/pypeclub/OpenPype/pull/4225) +- Tools: Use qtpy instead of Qt in standalone tools [\#4223](https://github.com/pypeclub/OpenPype/pull/4223) +- General: Use qtpy in settings UI [\#4215](https://github.com/pypeclub/OpenPype/pull/4215) + +**Merged pull requests:** + +- layout publish more than one container issue [\#4098](https://github.com/pypeclub/OpenPype/pull/4098) + +## [3.14.8](https://github.com/pypeclub/OpenPype/tree/3.14.8) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.7...3.14.8) + +**🚀 Enhancements** + +- General: Refactored extract hierarchy plugin [\#4139](https://github.com/pypeclub/OpenPype/pull/4139) +- General: Find executable enhancement [\#4137](https://github.com/pypeclub/OpenPype/pull/4137) +- Ftrack: Reset session before instance processing [\#4129](https://github.com/pypeclub/OpenPype/pull/4129) +- Ftrack: Editorial asset sync issue [\#4126](https://github.com/pypeclub/OpenPype/pull/4126) +- Deadline: Build version resolving [\#4115](https://github.com/pypeclub/OpenPype/pull/4115) +- Houdini: New Publisher [\#3046](https://github.com/pypeclub/OpenPype/pull/3046) +- Fix: Standalone Publish Directories [\#4148](https://github.com/pypeclub/OpenPype/pull/4148) + +**🐛 Bug fixes** + +- Ftrack: Fix occational double parents issue [\#4153](https://github.com/pypeclub/OpenPype/pull/4153) +- General: Maketx executable issue [\#4136](https://github.com/pypeclub/OpenPype/pull/4136) +- Maya: Looks - add all connections [\#4135](https://github.com/pypeclub/OpenPype/pull/4135) +- General: Fix variable check in collect anatomy instance data [\#4117](https://github.com/pypeclub/OpenPype/pull/4117) + +## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.6...3.14.7) + +**🆕 New features** + +- Hiero: loading effect family to timeline [\#4055](https://github.com/pypeclub/OpenPype/pull/4055) + +**🚀 Enhancements** + +- Photoshop: bug with pop-up window on Instance Creator [\#4121](https://github.com/pypeclub/OpenPype/pull/4121) +- Publisher: Open on specific tab [\#4120](https://github.com/pypeclub/OpenPype/pull/4120) +- Publisher: Hide unknown publish values [\#4116](https://github.com/pypeclub/OpenPype/pull/4116) +- Ftrack: Event server status give more information about version locations [\#4112](https://github.com/pypeclub/OpenPype/pull/4112) +- General: Allow higher numbers in frames and clips [\#4101](https://github.com/pypeclub/OpenPype/pull/4101) +- Publisher: Settings for validate frame range [\#4097](https://github.com/pypeclub/OpenPype/pull/4097) +- Publisher: Ignore escape button [\#4090](https://github.com/pypeclub/OpenPype/pull/4090) +- Flame: Loading clip with native colorspace resolved from mapping [\#4079](https://github.com/pypeclub/OpenPype/pull/4079) +- General: Extract review single frame output [\#4064](https://github.com/pypeclub/OpenPype/pull/4064) +- Publisher: Prepared common function for instance data cache [\#4063](https://github.com/pypeclub/OpenPype/pull/4063) +- Publisher: Easy access to publish page from create page [\#4058](https://github.com/pypeclub/OpenPype/pull/4058) +- General/TVPaint: Attribute defs dialog [\#4052](https://github.com/pypeclub/OpenPype/pull/4052) +- Publisher: Better reset defer [\#4048](https://github.com/pypeclub/OpenPype/pull/4048) +- Publisher: Add thumbnail sources [\#4042](https://github.com/pypeclub/OpenPype/pull/4042) + +**🐛 Bug fixes** + +- General: Move default settings for template name [\#4119](https://github.com/pypeclub/OpenPype/pull/4119) +- Slack: notification fail in new tray publisher [\#4118](https://github.com/pypeclub/OpenPype/pull/4118) +- Nuke: loaded nodes set to first tab [\#4114](https://github.com/pypeclub/OpenPype/pull/4114) +- Nuke: load image first frame [\#4113](https://github.com/pypeclub/OpenPype/pull/4113) +- Files Widget: Ignore case sensitivity of extensions [\#4096](https://github.com/pypeclub/OpenPype/pull/4096) +- Webpublisher: extension is lowercased in Setting and in uploaded files [\#4095](https://github.com/pypeclub/OpenPype/pull/4095) +- Publish Report Viewer: Fix small bugs [\#4086](https://github.com/pypeclub/OpenPype/pull/4086) +- Igniter: fix regex to match semver better [\#4085](https://github.com/pypeclub/OpenPype/pull/4085) +- Maya: aov filtering [\#4083](https://github.com/pypeclub/OpenPype/pull/4083) +- Flame/Flare: Loading to multiple batches [\#4080](https://github.com/pypeclub/OpenPype/pull/4080) +- hiero: creator from settings with set maximum [\#4077](https://github.com/pypeclub/OpenPype/pull/4077) +- Nuke: resolve hashes in file name only for frame token [\#4074](https://github.com/pypeclub/OpenPype/pull/4074) +- Publisher: Fix cache of asset docs [\#4070](https://github.com/pypeclub/OpenPype/pull/4070) +- Webpublisher: cleanup wp extract thumbnail [\#4067](https://github.com/pypeclub/OpenPype/pull/4067) +- Settings UI: Locked setting can't bypass lock [\#4066](https://github.com/pypeclub/OpenPype/pull/4066) +- Loader: Fix comparison of repre name [\#4053](https://github.com/pypeclub/OpenPype/pull/4053) +- Deadline: Extract environment subprocess failure [\#4050](https://github.com/pypeclub/OpenPype/pull/4050) + +**🔀 Refactored code** + +- General: Collect entities plugin minor changes [\#4089](https://github.com/pypeclub/OpenPype/pull/4089) +- General: Direct interfaces import [\#4065](https://github.com/pypeclub/OpenPype/pull/4065) + +**Merged pull requests:** + +- Bump loader-utils from 1.4.1 to 1.4.2 in /website [\#4100](https://github.com/pypeclub/OpenPype/pull/4100) +- Online family for Tray Publisher [\#4093](https://github.com/pypeclub/OpenPype/pull/4093) +- Bump loader-utils from 1.4.0 to 1.4.1 in /website [\#4081](https://github.com/pypeclub/OpenPype/pull/4081) +- remove underscore from subset name [\#4059](https://github.com/pypeclub/OpenPype/pull/4059) +- Alembic Loader as Arnold Standin [\#4047](https://github.com/pypeclub/OpenPype/pull/4047) + +## [3.14.6](https://github.com/pypeclub/OpenPype/tree/3.14.6) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...3.14.6) + +### 📖 Documentation + +- Documentation: Minor updates to dev\_requirements.md [\#4025](https://github.com/pypeclub/OpenPype/pull/4025) + +**🆕 New features** + +- Nuke: add 13.2 variant [\#4041](https://github.com/pypeclub/OpenPype/pull/4041) + +**🚀 Enhancements** + +- Publish Report Viewer: Store reports locally on machine [\#4040](https://github.com/pypeclub/OpenPype/pull/4040) +- General: More specific error in burnins script [\#4026](https://github.com/pypeclub/OpenPype/pull/4026) +- General: Extract review does not crash with old settings overrides [\#4023](https://github.com/pypeclub/OpenPype/pull/4023) +- Publisher: Convertors for legacy instances [\#4020](https://github.com/pypeclub/OpenPype/pull/4020) +- workflows: adding milestone creator and assigner [\#4018](https://github.com/pypeclub/OpenPype/pull/4018) +- Publisher: Catch creator errors [\#4015](https://github.com/pypeclub/OpenPype/pull/4015) + +**🐛 Bug fixes** + +- Hiero - effect collection fixes [\#4038](https://github.com/pypeclub/OpenPype/pull/4038) +- Nuke - loader clip correct hash conversion in path [\#4037](https://github.com/pypeclub/OpenPype/pull/4037) +- Maya: Soft fail when applying capture preset [\#4034](https://github.com/pypeclub/OpenPype/pull/4034) +- Igniter: handle missing directory [\#4032](https://github.com/pypeclub/OpenPype/pull/4032) +- StandalonePublisher: Fix thumbnail publishing [\#4029](https://github.com/pypeclub/OpenPype/pull/4029) +- Experimental Tools: Fix publisher import [\#4027](https://github.com/pypeclub/OpenPype/pull/4027) +- Houdini: fix wrong path in ASS loader [\#4016](https://github.com/pypeclub/OpenPype/pull/4016) + +**🔀 Refactored code** + +- General: Import lib functions from lib [\#4017](https://github.com/pypeclub/OpenPype/pull/4017) + +## [3.14.5](https://github.com/pypeclub/OpenPype/tree/3.14.5) (2022-10-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.4...3.14.5) + +**🚀 Enhancements** + +- Maya: add OBJ extractor to model family [\#4021](https://github.com/pypeclub/OpenPype/pull/4021) +- Publish report viewer tool [\#4010](https://github.com/pypeclub/OpenPype/pull/4010) +- Nuke | Global: adding custom tags representation filtering [\#4009](https://github.com/pypeclub/OpenPype/pull/4009) +- Publisher: Create context has shared data for collection phase [\#3995](https://github.com/pypeclub/OpenPype/pull/3995) +- Resolve: updating to v18 compatibility [\#3986](https://github.com/pypeclub/OpenPype/pull/3986) + +**🐛 Bug fixes** + +- TrayPublisher: Fix missing argument [\#4019](https://github.com/pypeclub/OpenPype/pull/4019) +- General: Fix python 2 compatibility of ffmpeg and oiio tools discovery [\#4011](https://github.com/pypeclub/OpenPype/pull/4011) + +**🔀 Refactored code** + +- Maya: Removed unused imports [\#4008](https://github.com/pypeclub/OpenPype/pull/4008) +- Unreal: Fix import of moved function [\#4007](https://github.com/pypeclub/OpenPype/pull/4007) +- Houdini: Change import of RepairAction [\#4005](https://github.com/pypeclub/OpenPype/pull/4005) +- Nuke/Hiero: Refactor openpype.api imports [\#4000](https://github.com/pypeclub/OpenPype/pull/4000) +- TVPaint: Defined with HostBase [\#3994](https://github.com/pypeclub/OpenPype/pull/3994) + +**Merged pull requests:** + +- Unreal: Remove redundant Creator stub [\#4012](https://github.com/pypeclub/OpenPype/pull/4012) +- Unreal: add `uproject` extension to Unreal project template [\#4004](https://github.com/pypeclub/OpenPype/pull/4004) +- Unreal: fix order of includes [\#4002](https://github.com/pypeclub/OpenPype/pull/4002) +- Fusion: Implement backwards compatibility \(+/- Fusion 17.2\) [\#3958](https://github.com/pypeclub/OpenPype/pull/3958) + +## [3.14.4](https://github.com/pypeclub/OpenPype/tree/3.14.4) (2022-10-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.3...3.14.4) + +**🆕 New features** + +- Webpublisher: use max next published version number for all items in batch [\#3961](https://github.com/pypeclub/OpenPype/pull/3961) +- General: Control Thumbnail integration via explicit configuration profiles [\#3951](https://github.com/pypeclub/OpenPype/pull/3951) + +**🚀 Enhancements** + +- Publisher: Multiselection in card view [\#3993](https://github.com/pypeclub/OpenPype/pull/3993) +- TrayPublisher: Original Basename cause crash too early [\#3990](https://github.com/pypeclub/OpenPype/pull/3990) +- Tray Publisher: add `originalBasename` data to simple creators [\#3988](https://github.com/pypeclub/OpenPype/pull/3988) +- General: Custom paths to ffmpeg and OpenImageIO tools [\#3982](https://github.com/pypeclub/OpenPype/pull/3982) +- Integrate: Preserve existing subset group if instance does not set it for new version [\#3976](https://github.com/pypeclub/OpenPype/pull/3976) +- Publisher: Prepare publisher controller for remote publishing [\#3972](https://github.com/pypeclub/OpenPype/pull/3972) +- Maya: new style dataclasses in maya deadline submitter plugin [\#3968](https://github.com/pypeclub/OpenPype/pull/3968) +- Maya: Define preffered Qt bindings for Qt.py and qtpy [\#3963](https://github.com/pypeclub/OpenPype/pull/3963) +- Settings: Move imageio from project anatomy to project settings \[pypeclub\] [\#3959](https://github.com/pypeclub/OpenPype/pull/3959) +- TrayPublisher: Extract thumbnail for other families [\#3952](https://github.com/pypeclub/OpenPype/pull/3952) +- Publisher: Pass instance to subset name method on update [\#3949](https://github.com/pypeclub/OpenPype/pull/3949) +- General: Set root environments before DCC launch [\#3947](https://github.com/pypeclub/OpenPype/pull/3947) +- Refactor: changed legacy way to update database for Hero version integrate [\#3941](https://github.com/pypeclub/OpenPype/pull/3941) +- Maya: Moved plugin from global to maya [\#3939](https://github.com/pypeclub/OpenPype/pull/3939) +- Publisher: Create dialog is part of main window [\#3936](https://github.com/pypeclub/OpenPype/pull/3936) +- Fusion: Implement Alembic and FBX mesh loader [\#3927](https://github.com/pypeclub/OpenPype/pull/3927) + +**🐛 Bug fixes** + +- TrayPublisher: Disable sequences in batch mov creator [\#3996](https://github.com/pypeclub/OpenPype/pull/3996) +- Fix - tags might be missing on representation [\#3985](https://github.com/pypeclub/OpenPype/pull/3985) +- Resolve: Fix usage of functions from lib [\#3983](https://github.com/pypeclub/OpenPype/pull/3983) +- Maya: remove invalid prefix token for non-multipart outputs [\#3981](https://github.com/pypeclub/OpenPype/pull/3981) +- Ftrack: Fix schema cache for Python 2 [\#3980](https://github.com/pypeclub/OpenPype/pull/3980) +- Maya: add object to attr.s declaration [\#3973](https://github.com/pypeclub/OpenPype/pull/3973) +- Maya: Deadline OutputFilePath hack regression for Renderman [\#3950](https://github.com/pypeclub/OpenPype/pull/3950) +- Houdini: Fix validate workfile paths for non-parm file references [\#3948](https://github.com/pypeclub/OpenPype/pull/3948) +- Photoshop: missed sync published version of workfile with workfile [\#3946](https://github.com/pypeclub/OpenPype/pull/3946) +- Maya: Set default value for RenderSetupIncludeLights option [\#3944](https://github.com/pypeclub/OpenPype/pull/3944) +- Maya: fix regression of Renderman Deadline hack [\#3943](https://github.com/pypeclub/OpenPype/pull/3943) +- Kitsu: 2 fixes, nb\_frames and Shot type error [\#3940](https://github.com/pypeclub/OpenPype/pull/3940) +- Tray: Change order of attribute changes [\#3938](https://github.com/pypeclub/OpenPype/pull/3938) +- AttributeDefs: Fix crashing multivalue of files widget [\#3937](https://github.com/pypeclub/OpenPype/pull/3937) +- General: Fix links query on hero version [\#3900](https://github.com/pypeclub/OpenPype/pull/3900) +- Publisher: Files Drag n Drop cleanup [\#3888](https://github.com/pypeclub/OpenPype/pull/3888) + +**🔀 Refactored code** + +- Flame: Import lib functions from lib [\#3992](https://github.com/pypeclub/OpenPype/pull/3992) +- General: Fix deprecated warning in legacy creator [\#3978](https://github.com/pypeclub/OpenPype/pull/3978) +- Blender: Remove openpype api imports [\#3977](https://github.com/pypeclub/OpenPype/pull/3977) +- General: Use direct import of resources [\#3964](https://github.com/pypeclub/OpenPype/pull/3964) +- General: Direct settings imports [\#3934](https://github.com/pypeclub/OpenPype/pull/3934) +- General: import 'Logger' from 'openpype.lib' [\#3926](https://github.com/pypeclub/OpenPype/pull/3926) +- General: Remove deprecated functions from lib [\#3907](https://github.com/pypeclub/OpenPype/pull/3907) + +**Merged pull requests:** + +- Maya + Yeti: Load Yeti Cache fix frame number recognition [\#3942](https://github.com/pypeclub/OpenPype/pull/3942) +- Fusion: Implement callbacks to Fusion's event system thread [\#3928](https://github.com/pypeclub/OpenPype/pull/3928) +- Photoshop: create single frame image in Ftrack as review [\#3908](https://github.com/pypeclub/OpenPype/pull/3908) + +## [3.14.3](https://github.com/pypeclub/OpenPype/tree/3.14.3) (2022-10-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.2...3.14.3) + +**🚀 Enhancements** + +- Publisher: Enhancement proposals [\#3897](https://github.com/pypeclub/OpenPype/pull/3897) + +**🐛 Bug fixes** + +- Maya: Fix Render single camera validator [\#3929](https://github.com/pypeclub/OpenPype/pull/3929) +- Flame: loading multilayer exr to batch/reel is working [\#3901](https://github.com/pypeclub/OpenPype/pull/3901) +- Hiero: Fix inventory check on launch [\#3895](https://github.com/pypeclub/OpenPype/pull/3895) +- WebPublisher: Fix import after refactor [\#3891](https://github.com/pypeclub/OpenPype/pull/3891) + +**🔀 Refactored code** + +- Maya: Remove unused 'openpype.api' imports in plugins [\#3925](https://github.com/pypeclub/OpenPype/pull/3925) +- Resolve: Use new Extractor location [\#3918](https://github.com/pypeclub/OpenPype/pull/3918) +- Unreal: Use new Extractor location [\#3917](https://github.com/pypeclub/OpenPype/pull/3917) +- Flame: Use new Extractor location [\#3916](https://github.com/pypeclub/OpenPype/pull/3916) +- Houdini: Use new Extractor location [\#3894](https://github.com/pypeclub/OpenPype/pull/3894) +- Harmony: Use new Extractor location [\#3893](https://github.com/pypeclub/OpenPype/pull/3893) + +**Merged pull requests:** + +- Maya: Fix Scene Inventory possibly starting off-screen due to maya preferences [\#3923](https://github.com/pypeclub/OpenPype/pull/3923) + +## [3.14.2](https://github.com/pypeclub/OpenPype/tree/3.14.2) (2022-09-12) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.1...3.14.2) + +### 📖 Documentation + +- Documentation: Anatomy templates [\#3618](https://github.com/pypeclub/OpenPype/pull/3618) + +**🆕 New features** + +- Nuke: Build workfile by template [\#3763](https://github.com/pypeclub/OpenPype/pull/3763) +- Houdini: Publishing workfiles [\#3697](https://github.com/pypeclub/OpenPype/pull/3697) +- Global: making collect audio plugin global [\#3679](https://github.com/pypeclub/OpenPype/pull/3679) + +**🚀 Enhancements** + +- Flame: Adding Creator's retimed shot and handles switch [\#3826](https://github.com/pypeclub/OpenPype/pull/3826) +- Flame: OpenPype submenu to batch and media manager [\#3825](https://github.com/pypeclub/OpenPype/pull/3825) +- General: Better pixmap scaling [\#3809](https://github.com/pypeclub/OpenPype/pull/3809) +- Photoshop: attempt to speed up ExtractImage [\#3793](https://github.com/pypeclub/OpenPype/pull/3793) +- SyncServer: Added cli commands for sync server [\#3765](https://github.com/pypeclub/OpenPype/pull/3765) +- Kitsu: Drop 'entities root' setting. [\#3739](https://github.com/pypeclub/OpenPype/pull/3739) +- git: update gitignore [\#3722](https://github.com/pypeclub/OpenPype/pull/3722) +- Blender: Publisher collect workfile representation [\#3670](https://github.com/pypeclub/OpenPype/pull/3670) +- Maya: move set render settings menu entry [\#3669](https://github.com/pypeclub/OpenPype/pull/3669) +- Scene Inventory: Maya add actions to select from or to scene [\#3659](https://github.com/pypeclub/OpenPype/pull/3659) +- Scene Inventory: Add subsetGroup column [\#3658](https://github.com/pypeclub/OpenPype/pull/3658) + +**🐛 Bug fixes** + +- General: Fix Pattern access in client code [\#3828](https://github.com/pypeclub/OpenPype/pull/3828) +- Launcher: Skip opening last work file works for groups [\#3822](https://github.com/pypeclub/OpenPype/pull/3822) +- Maya: Publishing data key change [\#3811](https://github.com/pypeclub/OpenPype/pull/3811) +- Igniter: Fix status handling when version is already installed [\#3804](https://github.com/pypeclub/OpenPype/pull/3804) +- Resolve: Addon import is Python 2 compatible [\#3798](https://github.com/pypeclub/OpenPype/pull/3798) +- Hiero: retimed clip publishing is working [\#3792](https://github.com/pypeclub/OpenPype/pull/3792) +- nuke: validate write node is not failing due wrong type [\#3780](https://github.com/pypeclub/OpenPype/pull/3780) +- Fix - changed format of version string in pyproject.toml [\#3777](https://github.com/pypeclub/OpenPype/pull/3777) +- Ftrack status fix typo prgoress -\> progress [\#3761](https://github.com/pypeclub/OpenPype/pull/3761) +- Fix version resolution [\#3757](https://github.com/pypeclub/OpenPype/pull/3757) +- Maya: `containerise` dont skip empty values [\#3674](https://github.com/pypeclub/OpenPype/pull/3674) + +**🔀 Refactored code** + +- Photoshop: Use new Extractor location [\#3789](https://github.com/pypeclub/OpenPype/pull/3789) +- Blender: Use new Extractor location [\#3787](https://github.com/pypeclub/OpenPype/pull/3787) +- AfterEffects: Use new Extractor location [\#3784](https://github.com/pypeclub/OpenPype/pull/3784) +- General: Remove unused teshost [\#3773](https://github.com/pypeclub/OpenPype/pull/3773) +- General: Copied 'Extractor' plugin to publish pipeline [\#3771](https://github.com/pypeclub/OpenPype/pull/3771) +- General: Move queries of asset and representation links [\#3770](https://github.com/pypeclub/OpenPype/pull/3770) +- General: Move create project folders to pipeline [\#3768](https://github.com/pypeclub/OpenPype/pull/3768) +- General: Create project function moved to client code [\#3766](https://github.com/pypeclub/OpenPype/pull/3766) +- Maya: Refactor submit deadline to use AbstractSubmitDeadline [\#3759](https://github.com/pypeclub/OpenPype/pull/3759) +- General: Change publish template settings location [\#3755](https://github.com/pypeclub/OpenPype/pull/3755) +- General: Move hostdirname functionality into host [\#3749](https://github.com/pypeclub/OpenPype/pull/3749) +- General: Move publish utils to pipeline [\#3745](https://github.com/pypeclub/OpenPype/pull/3745) +- Houdini: Define houdini as addon [\#3735](https://github.com/pypeclub/OpenPype/pull/3735) +- Fusion: Defined fusion as addon [\#3733](https://github.com/pypeclub/OpenPype/pull/3733) +- Flame: Defined flame as addon [\#3732](https://github.com/pypeclub/OpenPype/pull/3732) +- Resolve: Define resolve as addon [\#3727](https://github.com/pypeclub/OpenPype/pull/3727) + +**Merged pull requests:** + +- Standalone Publisher: Ignore empty labels, then still use name like other asset models [\#3779](https://github.com/pypeclub/OpenPype/pull/3779) +- Kitsu - sync\_all\_project - add list ignore\_projects [\#3776](https://github.com/pypeclub/OpenPype/pull/3776) + +## [3.14.1](https://github.com/pypeclub/OpenPype/tree/3.14.1) (2022-08-30) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.0...3.14.1) + +### 📖 Documentation + +- Documentation: Few updates [\#3698](https://github.com/pypeclub/OpenPype/pull/3698) +- Documentation: Settings development [\#3660](https://github.com/pypeclub/OpenPype/pull/3660) + +**🆕 New features** + +- Webpublisher:change create flatten image into tri state [\#3678](https://github.com/pypeclub/OpenPype/pull/3678) +- Blender: validators code correction with settings and defaults [\#3662](https://github.com/pypeclub/OpenPype/pull/3662) + +**🚀 Enhancements** + +- General: Thumbnail can use project roots [\#3750](https://github.com/pypeclub/OpenPype/pull/3750) +- Settings: Remove settings lock on tray exit [\#3720](https://github.com/pypeclub/OpenPype/pull/3720) +- General: Added helper getters to modules manager [\#3712](https://github.com/pypeclub/OpenPype/pull/3712) +- Unreal: Define unreal as module and use host class [\#3701](https://github.com/pypeclub/OpenPype/pull/3701) +- Settings: Lock settings UI session [\#3700](https://github.com/pypeclub/OpenPype/pull/3700) +- General: Benevolent context label collector [\#3686](https://github.com/pypeclub/OpenPype/pull/3686) +- Ftrack: Store ftrack entities on hierarchy integration to instances [\#3677](https://github.com/pypeclub/OpenPype/pull/3677) +- Ftrack: More logs related to auto sync value change [\#3671](https://github.com/pypeclub/OpenPype/pull/3671) +- Blender: ops refresh manager after process events [\#3663](https://github.com/pypeclub/OpenPype/pull/3663) + +**🐛 Bug fixes** + +- Maya: Fix typo in getPanel argument `with_focus` -\> `withFocus` [\#3753](https://github.com/pypeclub/OpenPype/pull/3753) +- General: Smaller fixes of imports [\#3748](https://github.com/pypeclub/OpenPype/pull/3748) +- General: Logger tweaks [\#3741](https://github.com/pypeclub/OpenPype/pull/3741) +- Nuke: missing job dependency if multiple bake streams [\#3737](https://github.com/pypeclub/OpenPype/pull/3737) +- Nuke: color-space settings from anatomy is working [\#3721](https://github.com/pypeclub/OpenPype/pull/3721) +- Settings: Fix studio default anatomy save [\#3716](https://github.com/pypeclub/OpenPype/pull/3716) +- Maya: Use project name instead of project code [\#3709](https://github.com/pypeclub/OpenPype/pull/3709) +- Settings: Fix project overrides save [\#3708](https://github.com/pypeclub/OpenPype/pull/3708) +- Workfiles tool: Fix published workfile filtering [\#3704](https://github.com/pypeclub/OpenPype/pull/3704) +- PS, AE: Provide default variant value for workfile subset [\#3703](https://github.com/pypeclub/OpenPype/pull/3703) +- RoyalRender: handle host name that is not set [\#3695](https://github.com/pypeclub/OpenPype/pull/3695) +- Flame: retime is working on clip publishing [\#3684](https://github.com/pypeclub/OpenPype/pull/3684) +- Webpublisher: added check for empty context [\#3682](https://github.com/pypeclub/OpenPype/pull/3682) + +**🔀 Refactored code** + +- General: Move delivery logic to pipeline [\#3751](https://github.com/pypeclub/OpenPype/pull/3751) +- General: Host addons cleanup [\#3744](https://github.com/pypeclub/OpenPype/pull/3744) +- Webpublisher: Webpublisher is used as addon [\#3740](https://github.com/pypeclub/OpenPype/pull/3740) +- Photoshop: Defined photoshop as addon [\#3736](https://github.com/pypeclub/OpenPype/pull/3736) +- Harmony: Defined harmony as addon [\#3734](https://github.com/pypeclub/OpenPype/pull/3734) +- General: Module interfaces cleanup [\#3731](https://github.com/pypeclub/OpenPype/pull/3731) +- AfterEffects: Move AE functions from general lib [\#3730](https://github.com/pypeclub/OpenPype/pull/3730) +- Blender: Define blender as module [\#3729](https://github.com/pypeclub/OpenPype/pull/3729) +- AfterEffects: Define AfterEffects as module [\#3728](https://github.com/pypeclub/OpenPype/pull/3728) +- General: Replace PypeLogger with Logger [\#3725](https://github.com/pypeclub/OpenPype/pull/3725) +- Nuke: Define nuke as module [\#3724](https://github.com/pypeclub/OpenPype/pull/3724) +- General: Move subset name functionality [\#3723](https://github.com/pypeclub/OpenPype/pull/3723) +- General: Move creators plugin getter [\#3714](https://github.com/pypeclub/OpenPype/pull/3714) +- General: Move constants from lib to client [\#3713](https://github.com/pypeclub/OpenPype/pull/3713) +- Loader: Subset groups using client operations [\#3710](https://github.com/pypeclub/OpenPype/pull/3710) +- TVPaint: Defined as module [\#3707](https://github.com/pypeclub/OpenPype/pull/3707) +- StandalonePublisher: Define StandalonePublisher as module [\#3706](https://github.com/pypeclub/OpenPype/pull/3706) +- TrayPublisher: Define TrayPublisher as module [\#3705](https://github.com/pypeclub/OpenPype/pull/3705) +- General: Move context specific functions to context tools [\#3702](https://github.com/pypeclub/OpenPype/pull/3702) + +**Merged pull requests:** + +- Hiero: Define hiero as module [\#3717](https://github.com/pypeclub/OpenPype/pull/3717) +- Deadline: better logging for DL webservice failures [\#3694](https://github.com/pypeclub/OpenPype/pull/3694) +- Photoshop: resize saved images in ExtractReview for ffmpeg [\#3676](https://github.com/pypeclub/OpenPype/pull/3676) +- Nuke: Validation refactory to new publisher [\#3567](https://github.com/pypeclub/OpenPype/pull/3567) + +## [3.14.0](https://github.com/pypeclub/OpenPype/tree/3.14.0) (2022-08-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...3.14.0) + +**🆕 New features** + +- Maya: Build workfile by template [\#3578](https://github.com/pypeclub/OpenPype/pull/3578) +- Maya: Implementation of JSON layout for Unreal workflow [\#3353](https://github.com/pypeclub/OpenPype/pull/3353) +- Maya: Build workfile by template [\#3315](https://github.com/pypeclub/OpenPype/pull/3315) + +**🚀 Enhancements** + +- Ftrack: Addiotional component metadata [\#3685](https://github.com/pypeclub/OpenPype/pull/3685) +- Ftrack: Set task status on farm publishing [\#3680](https://github.com/pypeclub/OpenPype/pull/3680) +- Ftrack: Set task status on task creation in integrate hierarchy [\#3675](https://github.com/pypeclub/OpenPype/pull/3675) +- Maya: Disable rendering of all lights for render instances submitted through Deadline. [\#3661](https://github.com/pypeclub/OpenPype/pull/3661) +- General: Optimized OCIO configs [\#3650](https://github.com/pypeclub/OpenPype/pull/3650) + +**🐛 Bug fixes** + +- General: Switch from hero version to versioned works [\#3691](https://github.com/pypeclub/OpenPype/pull/3691) +- General: Fix finding of last version [\#3656](https://github.com/pypeclub/OpenPype/pull/3656) +- General: Extract Review can scale with pixel aspect ratio [\#3644](https://github.com/pypeclub/OpenPype/pull/3644) +- Maya: Refactor moved usage of CreateRender settings [\#3643](https://github.com/pypeclub/OpenPype/pull/3643) +- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) +- Nuke: color settings for render write node is working now [\#3632](https://github.com/pypeclub/OpenPype/pull/3632) +- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) + +**🔀 Refactored code** + +- General: Use client projects getter [\#3673](https://github.com/pypeclub/OpenPype/pull/3673) +- Resolve: Match folder structure to other hosts [\#3653](https://github.com/pypeclub/OpenPype/pull/3653) +- Maya: Hosts as modules [\#3647](https://github.com/pypeclub/OpenPype/pull/3647) +- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) +- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) +- General: Workfiles builder using query functions [\#3598](https://github.com/pypeclub/OpenPype/pull/3598) + +**Merged pull requests:** + +- Deadline: Global job pre load is not Pype 2 compatible [\#3666](https://github.com/pypeclub/OpenPype/pull/3666) +- Maya: Remove unused get current renderer logic [\#3645](https://github.com/pypeclub/OpenPype/pull/3645) +- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) +- fix the bug of failing to extract look when UDIMs format used in AiImage [\#3628](https://github.com/pypeclub/OpenPype/pull/3628) + +## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.2...3.13.0) + +**🆕 New features** + +- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) +- Traypublisher: simple editorial publishing [\#3492](https://github.com/pypeclub/OpenPype/pull/3492) + +**🚀 Enhancements** + +- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) +- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) +- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) +- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) +- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) +- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) +- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) +- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) +- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) +- Maya: Render Creator has configurable options. [\#3097](https://github.com/pypeclub/OpenPype/pull/3097) + +**🐛 Bug fixes** + +- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) +- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) +- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) +- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) +- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) +- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) +- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) +- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) +- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) +- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) +- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) +- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) +- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) +- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) +- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) +- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) + +**🔀 Refactored code** + +- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) +- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) +- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) +- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) +- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) + +**Merged pull requests:** + +- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) +- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) +- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) + +## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...3.12.2) + +### 📖 Documentation + +- Update website with more studios [\#3554](https://github.com/pypeclub/OpenPype/pull/3554) +- Documentation: Update publishing dev docs [\#3549](https://github.com/pypeclub/OpenPype/pull/3549) + +**🚀 Enhancements** + +- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) +- Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) +- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) +- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) +- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) +- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) +- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) +- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) +- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) +- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) +- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) +- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) +- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) +- Enhance powershell build scripts [\#1827](https://github.com/pypeclub/OpenPype/pull/1827) + +**🐛 Bug fixes** + +- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) +- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) +- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) +- Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) +- General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) +- Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) +- Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) +- Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) +- General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) +- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) +- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) +- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) +- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) +- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) +- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) +- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) +- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) +- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) + +**🔀 Refactored code** + +- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) +- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) +- Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) +- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) +- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) +- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) +- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) +- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) +- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) +- Refactor Integrate Asset [\#2898](https://github.com/pypeclub/OpenPype/pull/2898) + +**Merged pull requests:** + +- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) + +## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.0...3.12.1) + +### 📖 Documentation + +- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) + +**🆕 New features** + +- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) + +**🚀 Enhancements** + +- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) +- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) +- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) +- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) +- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) +- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) +- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) +- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) +- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) +- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) +- Maya: Add additional playblast options to review Extractor. [\#3384](https://github.com/pypeclub/OpenPype/pull/3384) +- Maya: Ability to set resolution for playblasts from asset, and override through review instance. [\#3360](https://github.com/pypeclub/OpenPype/pull/3360) +- Maya: Redshift Volume Loader Implement update, remove, switch + fix vdb sequence support [\#3197](https://github.com/pypeclub/OpenPype/pull/3197) +- Maya: Implement `iter_visible_nodes_in_range` for extracting Alembics [\#3100](https://github.com/pypeclub/OpenPype/pull/3100) + +**🐛 Bug fixes** + +- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) +- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) +- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) +- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) +- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) +- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) +- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) +- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) +- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) +- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) +- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) +- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) +- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) +- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) +- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) +- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) +- Maya: Camera extra data - additional fix for \#3304 [\#3386](https://github.com/pypeclub/OpenPype/pull/3386) +- Maya: Handle excluding `model` family from frame range validator. [\#3370](https://github.com/pypeclub/OpenPype/pull/3370) + +**🔀 Refactored code** + +- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) +- Maya: Re-use `maintained_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) +- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) +- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) +- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) +- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) +- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) +- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) +- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) +- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) +- Fusion: Use client query functions [\#3380](https://github.com/pypeclub/OpenPype/pull/3380) +- Resolve: Use client query functions [\#3379](https://github.com/pypeclub/OpenPype/pull/3379) +- General: Host implementation defined with class [\#3337](https://github.com/pypeclub/OpenPype/pull/3337) + +## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.1...3.12.0) + +### 📖 Documentation + +- Fix typo in documentation: pyenv on mac [\#3417](https://github.com/pypeclub/OpenPype/pull/3417) +- Linux: update OIIO package [\#3401](https://github.com/pypeclub/OpenPype/pull/3401) + +**🆕 New features** + +- Shotgrid: Add production beta of shotgrid integration [\#2921](https://github.com/pypeclub/OpenPype/pull/2921) + +**🚀 Enhancements** + +- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) +- Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) +- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) +- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) +- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) +- Maya: Allow more data to be published along camera 🎥 [\#3304](https://github.com/pypeclub/OpenPype/pull/3304) +- Add root keys and project keys to create starting folder [\#2755](https://github.com/pypeclub/OpenPype/pull/2755) + +**🐛 Bug fixes** + +- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) +- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) +- Nuke: Fix keyword argument in query function [\#3414](https://github.com/pypeclub/OpenPype/pull/3414) +- Houdini: fix loading and updating vbd/bgeo sequences [\#3408](https://github.com/pypeclub/OpenPype/pull/3408) +- Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) +- General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) +- Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) +- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) +- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) +- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) +- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) +- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) +- Nuke: Load full model hierarchy by default [\#3328](https://github.com/pypeclub/OpenPype/pull/3328) +- Nuke: multiple baking streams with correct slate [\#3245](https://github.com/pypeclub/OpenPype/pull/3245) +- Maya: fix image prefix warning in validator [\#3128](https://github.com/pypeclub/OpenPype/pull/3128) + +**🔀 Refactored code** + +- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) +- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) +- Kitsu: renaming to plural func sync\_all\_projects [\#3397](https://github.com/pypeclub/OpenPype/pull/3397) +- Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) +- Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) +- Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) +- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) +- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) +- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) +- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) +- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) +- TVPaint: Use client query functions [\#3340](https://github.com/pypeclub/OpenPype/pull/3340) +- Ftrack: Use client query functions [\#3339](https://github.com/pypeclub/OpenPype/pull/3339) +- Standalone Publisher: Use client query functions [\#3330](https://github.com/pypeclub/OpenPype/pull/3330) + +**Merged pull requests:** + +- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) +- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) +- Houdini: Implement Redshift Proxy Export [\#3196](https://github.com/pypeclub/OpenPype/pull/3196) + +## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.0...3.11.1) + +**🆕 New features** + +- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) +- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) + +**🚀 Enhancements** + +- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) +- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) +- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) +- Ftrack: Open browser from tray [\#3320](https://github.com/pypeclub/OpenPype/pull/3320) +- Enhancement: More control over thumbnail processing. [\#3259](https://github.com/pypeclub/OpenPype/pull/3259) + +**🐛 Bug fixes** + +- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) +- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) +- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) +- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) +- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) +- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) +- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) +- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) +- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) +- Deadline: added OPENPYPE\_MONGO to filter [\#3336](https://github.com/pypeclub/OpenPype/pull/3336) +- Nuke: fixing farm publishing if review is disabled [\#3306](https://github.com/pypeclub/OpenPype/pull/3306) +- Maya: Fix Yeti errors on Create, Publish and Load [\#3198](https://github.com/pypeclub/OpenPype/pull/3198) + +**🔀 Refactored code** + +- Webpublisher: Use client query functions [\#3333](https://github.com/pypeclub/OpenPype/pull/3333) + +## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.10.0...3.11.0) + +### 📖 Documentation + +- Documentation: Add app key to template documentation [\#3299](https://github.com/pypeclub/OpenPype/pull/3299) +- doc: adding royal render and multiverse to the web site [\#3285](https://github.com/pypeclub/OpenPype/pull/3285) +- Module: Kitsu module [\#2650](https://github.com/pypeclub/OpenPype/pull/2650) + +**🆕 New features** + +- Multiverse: fixed composition write, full docs, cosmetics [\#3178](https://github.com/pypeclub/OpenPype/pull/3178) + +**🚀 Enhancements** + +- Settings: Settings can be extracted from UI [\#3323](https://github.com/pypeclub/OpenPype/pull/3323) +- updated poetry installation source [\#3316](https://github.com/pypeclub/OpenPype/pull/3316) +- Ftrack: Action to easily create daily review session [\#3310](https://github.com/pypeclub/OpenPype/pull/3310) +- TVPaint: Extractor use mark in/out range to render [\#3309](https://github.com/pypeclub/OpenPype/pull/3309) +- Ftrack: Delivery action can work on ReviewSessions [\#3307](https://github.com/pypeclub/OpenPype/pull/3307) +- Maya: Look assigner UI improvements [\#3298](https://github.com/pypeclub/OpenPype/pull/3298) +- Ftrack: Action to transfer values of hierarchical attributes [\#3284](https://github.com/pypeclub/OpenPype/pull/3284) +- Maya: better handling of legacy review subsets names [\#3269](https://github.com/pypeclub/OpenPype/pull/3269) +- General: Updated windows oiio tool [\#3268](https://github.com/pypeclub/OpenPype/pull/3268) +- Unreal: add support for skeletalMesh and staticMesh to loaders [\#3267](https://github.com/pypeclub/OpenPype/pull/3267) +- Maya: reference loaders could store placeholder in referenced url [\#3264](https://github.com/pypeclub/OpenPype/pull/3264) +- TVPaint: Init file for TVPaint worker also handle guideline images [\#3250](https://github.com/pypeclub/OpenPype/pull/3250) +- Nuke: Change default icon path in settings [\#3247](https://github.com/pypeclub/OpenPype/pull/3247) +- Maya: publishing of animation and pointcache on a farm [\#3225](https://github.com/pypeclub/OpenPype/pull/3225) +- Maya: Look assigner UI improvements [\#3208](https://github.com/pypeclub/OpenPype/pull/3208) +- Nuke: add pointcache and animation to loader [\#3186](https://github.com/pypeclub/OpenPype/pull/3186) +- Nuke: Add a gizmo menu [\#3172](https://github.com/pypeclub/OpenPype/pull/3172) +- Support for Unreal 5 [\#3122](https://github.com/pypeclub/OpenPype/pull/3122) + +**🐛 Bug fixes** + +- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) +- Houdini: Fix Houdini VDB manage update wrong file attribute name [\#3322](https://github.com/pypeclub/OpenPype/pull/3322) +- Nuke: anatomy compatibility issue hacks [\#3321](https://github.com/pypeclub/OpenPype/pull/3321) +- hiero: otio p3 compatibility issue - metadata on effect use update 3.11 [\#3314](https://github.com/pypeclub/OpenPype/pull/3314) +- General: Vendorized modules for Python 2 and update poetry lock [\#3305](https://github.com/pypeclub/OpenPype/pull/3305) +- Fix - added local targets to install host [\#3303](https://github.com/pypeclub/OpenPype/pull/3303) +- Settings: Add missing default settings for nuke gizmo [\#3301](https://github.com/pypeclub/OpenPype/pull/3301) +- Maya: Fix swaped width and height in reviews [\#3300](https://github.com/pypeclub/OpenPype/pull/3300) +- Maya: point cache publish handles Maya instances [\#3297](https://github.com/pypeclub/OpenPype/pull/3297) +- Global: extract review slate issues [\#3286](https://github.com/pypeclub/OpenPype/pull/3286) +- Webpublisher: return only active projects in ProjectsEndpoint [\#3281](https://github.com/pypeclub/OpenPype/pull/3281) +- Hiero: add support for task tags 3.10.x [\#3279](https://github.com/pypeclub/OpenPype/pull/3279) +- General: Fix Oiio tool path resolving [\#3278](https://github.com/pypeclub/OpenPype/pull/3278) +- Maya: Fix udim support for e.g. uppercase \ tag [\#3266](https://github.com/pypeclub/OpenPype/pull/3266) +- Nuke: bake reformat was failing on string type [\#3261](https://github.com/pypeclub/OpenPype/pull/3261) +- Maya: hotfix Pxr multitexture in looks [\#3260](https://github.com/pypeclub/OpenPype/pull/3260) +- Unreal: Fix Camera Loading if Layout is missing [\#3255](https://github.com/pypeclub/OpenPype/pull/3255) +- Unreal: Fixed Animation loading in UE5 [\#3240](https://github.com/pypeclub/OpenPype/pull/3240) +- Unreal: Fixed Render creation in UE5 [\#3239](https://github.com/pypeclub/OpenPype/pull/3239) +- Unreal: Fixed Camera loading in UE5 [\#3238](https://github.com/pypeclub/OpenPype/pull/3238) +- Flame: debugging [\#3224](https://github.com/pypeclub/OpenPype/pull/3224) +- add silent audio to slate [\#3162](https://github.com/pypeclub/OpenPype/pull/3162) +- Add timecode to slate [\#2929](https://github.com/pypeclub/OpenPype/pull/2929) + +**🔀 Refactored code** + +- Blender: Use client query functions [\#3331](https://github.com/pypeclub/OpenPype/pull/3331) +- General: Define query functions [\#3288](https://github.com/pypeclub/OpenPype/pull/3288) + +**Merged pull requests:** + +- Maya: add pointcache family to gpu cache loader [\#3318](https://github.com/pypeclub/OpenPype/pull/3318) +- Maya look: skip empty file attributes [\#3274](https://github.com/pypeclub/OpenPype/pull/3274) + +## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.8...3.10.0) + +### 📖 Documentation + +- Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094) +- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) + +**🆕 New features** + +- General: OpenPype modules publish plugins are registered in host [\#3180](https://github.com/pypeclub/OpenPype/pull/3180) +- General: Creator plugins from addons can be registered [\#3179](https://github.com/pypeclub/OpenPype/pull/3179) +- Ftrack: Single image reviewable [\#3157](https://github.com/pypeclub/OpenPype/pull/3157) +- Nuke: Expose write attributes to settings [\#3123](https://github.com/pypeclub/OpenPype/pull/3123) +- Hiero: Initial frame publish support [\#3106](https://github.com/pypeclub/OpenPype/pull/3106) +- Unreal: Render Publishing [\#2917](https://github.com/pypeclub/OpenPype/pull/2917) +- AfterEffects: Implemented New Publisher [\#2838](https://github.com/pypeclub/OpenPype/pull/2838) +- Unreal: Rendering implementation [\#2410](https://github.com/pypeclub/OpenPype/pull/2410) + +**🚀 Enhancements** + +- Maya: FBX camera export [\#3253](https://github.com/pypeclub/OpenPype/pull/3253) +- General: updating common vendor `scriptmenu` to 1.5.2 [\#3246](https://github.com/pypeclub/OpenPype/pull/3246) +- Project Manager: Allow to paste Tasks into multiple assets at the same time [\#3226](https://github.com/pypeclub/OpenPype/pull/3226) +- Project manager: Sped up project load [\#3216](https://github.com/pypeclub/OpenPype/pull/3216) +- Loader UI: Speed issues of loader with sync server [\#3199](https://github.com/pypeclub/OpenPype/pull/3199) +- Looks: add basic support for Renderman [\#3190](https://github.com/pypeclub/OpenPype/pull/3190) +- Maya: added clean\_import option to Import loader [\#3181](https://github.com/pypeclub/OpenPype/pull/3181) +- Add the scripts menu definition to nuke [\#3168](https://github.com/pypeclub/OpenPype/pull/3168) +- Maya: add maya 2023 to default applications [\#3167](https://github.com/pypeclub/OpenPype/pull/3167) +- Compressed bgeo publishing in SAP and Houdini loader [\#3153](https://github.com/pypeclub/OpenPype/pull/3153) +- General: Add 'dataclasses' to required python modules [\#3149](https://github.com/pypeclub/OpenPype/pull/3149) +- Hooks: Tweak logging grammar [\#3147](https://github.com/pypeclub/OpenPype/pull/3147) +- Nuke: settings for reformat node in CreateWriteRender node [\#3143](https://github.com/pypeclub/OpenPype/pull/3143) +- Houdini: Add loader for alembic through Alembic Archive node [\#3140](https://github.com/pypeclub/OpenPype/pull/3140) +- Publisher: UI Modifications and fixes [\#3139](https://github.com/pypeclub/OpenPype/pull/3139) +- General: Simplified OP modules/addons import [\#3137](https://github.com/pypeclub/OpenPype/pull/3137) +- Terminal: Tweak coloring of TrayModuleManager logging enabled states [\#3133](https://github.com/pypeclub/OpenPype/pull/3133) +- General: Cleanup some Loader docstrings [\#3131](https://github.com/pypeclub/OpenPype/pull/3131) +- Nuke: render instance with subset name filtered overrides [\#3117](https://github.com/pypeclub/OpenPype/pull/3117) +- Unreal: Layout and Camera update and remove functions reimplemented and improvements [\#3116](https://github.com/pypeclub/OpenPype/pull/3116) +- Settings: Remove environment groups from settings [\#3115](https://github.com/pypeclub/OpenPype/pull/3115) +- TVPaint: Match renderlayer key with other hosts [\#3110](https://github.com/pypeclub/OpenPype/pull/3110) +- Ftrack: AssetVersion status on publish [\#3108](https://github.com/pypeclub/OpenPype/pull/3108) +- Tray publisher: Simple families from settings [\#3105](https://github.com/pypeclub/OpenPype/pull/3105) +- Local Settings UI: Overlay messages on save and reset [\#3104](https://github.com/pypeclub/OpenPype/pull/3104) +- General: Remove repos related logic [\#3087](https://github.com/pypeclub/OpenPype/pull/3087) +- Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080) +- Houdini: Fix FPS + outdated content pop-ups [\#3079](https://github.com/pypeclub/OpenPype/pull/3079) +- General: Add global log verbose arguments [\#3070](https://github.com/pypeclub/OpenPype/pull/3070) +- Flame: extract presets distribution [\#3063](https://github.com/pypeclub/OpenPype/pull/3063) +- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) +- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) +- Maya: Implement Hardware Renderer 2.0 support for Render Products [\#2611](https://github.com/pypeclub/OpenPype/pull/2611) + +**🐛 Bug fixes** + +- nuke: use framerange issue [\#3254](https://github.com/pypeclub/OpenPype/pull/3254) +- Ftrack: Chunk sizes for queries has minimal condition [\#3244](https://github.com/pypeclub/OpenPype/pull/3244) +- Maya: renderman displays needs to be filtered [\#3242](https://github.com/pypeclub/OpenPype/pull/3242) +- Ftrack: Validate that the user exists on ftrack [\#3237](https://github.com/pypeclub/OpenPype/pull/3237) +- Maya: Fix support for multiple resolutions [\#3236](https://github.com/pypeclub/OpenPype/pull/3236) +- TVPaint: Look for more groups than 12 [\#3228](https://github.com/pypeclub/OpenPype/pull/3228) +- Hiero: debugging frame range and other 3.10 [\#3222](https://github.com/pypeclub/OpenPype/pull/3222) +- Project Manager: Fix persistent editors on project change [\#3218](https://github.com/pypeclub/OpenPype/pull/3218) +- Deadline: instance data overwrite fix [\#3214](https://github.com/pypeclub/OpenPype/pull/3214) +- Ftrack: Push hierarchical attributes action works [\#3210](https://github.com/pypeclub/OpenPype/pull/3210) +- Standalone Publisher: Always create new representation for thumbnail [\#3203](https://github.com/pypeclub/OpenPype/pull/3203) +- Photoshop: skip collector when automatic testing [\#3202](https://github.com/pypeclub/OpenPype/pull/3202) +- Nuke: render/workfile version sync doesn't work on farm [\#3185](https://github.com/pypeclub/OpenPype/pull/3185) +- Ftrack: Review image only if there are no mp4 reviews [\#3183](https://github.com/pypeclub/OpenPype/pull/3183) +- Ftrack: Locations deepcopy issue [\#3177](https://github.com/pypeclub/OpenPype/pull/3177) +- General: Avoid creating multiple thumbnails [\#3176](https://github.com/pypeclub/OpenPype/pull/3176) +- General/Hiero: better clip duration calculation [\#3169](https://github.com/pypeclub/OpenPype/pull/3169) +- General: Oiio conversion for ffmpeg checks for invalid characters [\#3166](https://github.com/pypeclub/OpenPype/pull/3166) +- Fix for attaching render to subset [\#3164](https://github.com/pypeclub/OpenPype/pull/3164) +- Harmony: fixed missing task name in render instance [\#3163](https://github.com/pypeclub/OpenPype/pull/3163) +- Ftrack: Action delete old versions formatting works [\#3152](https://github.com/pypeclub/OpenPype/pull/3152) +- Deadline: fix the output directory [\#3144](https://github.com/pypeclub/OpenPype/pull/3144) +- General: New Session schema [\#3141](https://github.com/pypeclub/OpenPype/pull/3141) +- General: Missing version on headless mode crash properly [\#3136](https://github.com/pypeclub/OpenPype/pull/3136) +- TVPaint: Composite layers in reversed order [\#3135](https://github.com/pypeclub/OpenPype/pull/3135) +- Nuke: fixing default settings for workfile builder loaders [\#3120](https://github.com/pypeclub/OpenPype/pull/3120) +- Nuke: fix anatomy imageio regex default [\#3119](https://github.com/pypeclub/OpenPype/pull/3119) +- General: Python 3 compatibility in queries [\#3112](https://github.com/pypeclub/OpenPype/pull/3112) +- General: TemplateResult can be copied [\#3099](https://github.com/pypeclub/OpenPype/pull/3099) +- General: Collect loaded versions skips not existing representations [\#3095](https://github.com/pypeclub/OpenPype/pull/3095) +- RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091) +- Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089) +- Maya: Collect Render fix any render cameras check [\#3088](https://github.com/pypeclub/OpenPype/pull/3088) +- Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083) +- Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077) +- General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076) +- Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068) +- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) +- Fix support for Renderman in Maya [\#3006](https://github.com/pypeclub/OpenPype/pull/3006) + +**🔀 Refactored code** + +- Avalon repo removed from Jobs workflow [\#3193](https://github.com/pypeclub/OpenPype/pull/3193) +- General: Remove remaining imports from avalon [\#3130](https://github.com/pypeclub/OpenPype/pull/3130) +- General: Move mongo db logic and remove avalon repository [\#3066](https://github.com/pypeclub/OpenPype/pull/3066) +- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) + +**Merged pull requests:** + +- Harmony: message length in 21.1 [\#3257](https://github.com/pypeclub/OpenPype/pull/3257) +- Harmony: 21.1 fix [\#3249](https://github.com/pypeclub/OpenPype/pull/3249) +- Maya: added jpg to filter for Image Plane Loader [\#3223](https://github.com/pypeclub/OpenPype/pull/3223) +- Webpublisher: replace space by underscore in subset names [\#3160](https://github.com/pypeclub/OpenPype/pull/3160) +- StandalonePublisher: removed Extract Background plugins [\#3093](https://github.com/pypeclub/OpenPype/pull/3093) +- Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078) +- Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065) +- SiteSync: Download all workfile inputs [\#2966](https://github.com/pypeclub/OpenPype/pull/2966) +- Photoshop: New Publisher [\#2933](https://github.com/pypeclub/OpenPype/pull/2933) +- Bump pillow from 9.0.0 to 9.0.1 [\#2880](https://github.com/pypeclub/OpenPype/pull/2880) +- AfterEffects: Allow configuration of default variant via Settings [\#2856](https://github.com/pypeclub/OpenPype/pull/2856) + +## [3.9.8](https://github.com/pypeclub/OpenPype/tree/3.9.8) (2022-05-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.7...3.9.8) + +## [3.9.7](https://github.com/pypeclub/OpenPype/tree/3.9.7) (2022-05-11) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.6...3.9.7) + +## [3.9.6](https://github.com/pypeclub/OpenPype/tree/3.9.6) (2022-05-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.5...3.9.6) + +## [3.9.5](https://github.com/pypeclub/OpenPype/tree/3.9.5) (2022-04-25) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...3.9.5) + +## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.3...3.9.4) + +### 📖 Documentation + +- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062) +- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035) +- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974) + +**🆕 New features** + +- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) +- Flame: Flare integration preparation [\#2928](https://github.com/pypeclub/OpenPype/pull/2928) + +**🚀 Enhancements** + +- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053) +- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036) +- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008) + +**🐛 Bug fixes** + +- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064) +- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061) +- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059) +- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050) +- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049) +- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048) +- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044) +- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043) +- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) +- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) +- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) +- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) +- Maya: invalid review flag on rendered AOVs [\#2915](https://github.com/pypeclub/OpenPype/pull/2915) + +**Merged pull requests:** + +- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051) +- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987) + +## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.2...3.9.3) + +### 📖 Documentation + +- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) +- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) +- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) +- Documentation: New publisher develop docs [\#2896](https://github.com/pypeclub/OpenPype/pull/2896) + +**🆕 New features** + +- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) +- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) +- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) +- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) +- Multiverse: Initial Support [\#2908](https://github.com/pypeclub/OpenPype/pull/2908) + +**🚀 Enhancements** + +- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) +- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) +- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) +- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) +- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) +- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) +- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) +- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) +- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) +- General: `METADATA_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) +- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) +- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) - Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) - NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) +- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) +- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) - Workfiles: Open published workfiles [\#2925](https://github.com/pypeclub/OpenPype/pull/2925) +- General: Default modules loaded dynamically [\#2923](https://github.com/pypeclub/OpenPype/pull/2923) - CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) - Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) - Nuke: Add no-audio Tag [\#2911](https://github.com/pypeclub/OpenPype/pull/2911) @@ -22,19 +1197,48 @@ **🐛 Bug fixes** +- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) +- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) +- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) +- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) +- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) +- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) +- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) +- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) +- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) +- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) +- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) +- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) +- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) +- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) +- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) +- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) +- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) +- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) +- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) +- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) +- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) +- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) +- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) +- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) - LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) +- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) - General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) - SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) - Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) - General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) - General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) - Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) +- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) - General: Don't print log record on OSError [\#2926](https://github.com/pypeclub/OpenPype/pull/2926) - Hiero: Fix import of 'register\_event\_callback' [\#2924](https://github.com/pypeclub/OpenPype/pull/2924) +- Flame: centos related debugging [\#2922](https://github.com/pypeclub/OpenPype/pull/2922) - Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) +- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) **🔀 Refactored code** +- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) - General: Move Attribute Definitions from pipeline [\#2931](https://github.com/pypeclub/OpenPype/pull/2931) - General: Removed silo references and terminal splash [\#2927](https://github.com/pypeclub/OpenPype/pull/2927) - General: Move pipeline constants to OpenPype [\#2918](https://github.com/pypeclub/OpenPype/pull/2918) @@ -43,11 +1247,19 @@ **Merged pull requests:** -- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) +- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) +- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) +- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) +- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) +- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) + +## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...3.9.2) ## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.1-nightly.3...3.9.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) **🚀 Enhancements** @@ -68,6 +1280,8 @@ - General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) - Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) - Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) +- Flame Babypublisher optimalization [\#2806](https://github.com/pypeclub/OpenPype/pull/2806) +- hotfix: OIIO tool path - add extension on windows [\#2618](https://github.com/pypeclub/OpenPype/pull/2618) **🔀 Refactored code** @@ -76,34 +1290,118 @@ ## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.9.0-nightly.9...3.9.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...3.9.0) **Deprecated:** +- Houdini: Remove unused code [\#2779](https://github.com/pypeclub/OpenPype/pull/2779) +- Loader: Remove default family states for hosts from code [\#2706](https://github.com/pypeclub/OpenPype/pull/2706) - AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) ### 📖 Documentation +- Documentation: fixed broken links [\#2799](https://github.com/pypeclub/OpenPype/pull/2799) +- Documentation: broken link fix [\#2785](https://github.com/pypeclub/OpenPype/pull/2785) +- Documentation: link fixes [\#2772](https://github.com/pypeclub/OpenPype/pull/2772) +- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760) +- Various testing updates [\#2726](https://github.com/pypeclub/OpenPype/pull/2726) +- documentation: add example to `repack-version` command [\#2669](https://github.com/pypeclub/OpenPype/pull/2669) +- Update docusaurus [\#2639](https://github.com/pypeclub/OpenPype/pull/2639) +- Documentation: Fixed relative links [\#2621](https://github.com/pypeclub/OpenPype/pull/2621) - Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) +**🆕 New features** + +- Flame: loading clips to reels [\#2622](https://github.com/pypeclub/OpenPype/pull/2622) +- General: Store settings by OpenPype version [\#2570](https://github.com/pypeclub/OpenPype/pull/2570) + **🚀 Enhancements** +- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) +- General: Set context environments for non host applications [\#2803](https://github.com/pypeclub/OpenPype/pull/2803) +- Houdini: Remove duplicate ValidateOutputNode plug-in [\#2780](https://github.com/pypeclub/OpenPype/pull/2780) +- Tray publisher: New Tray Publisher host \(beta\) [\#2778](https://github.com/pypeclub/OpenPype/pull/2778) +- Slack: Added regex for filtering on subset names [\#2775](https://github.com/pypeclub/OpenPype/pull/2775) +- Houdini: Implement Reset Frame Range [\#2770](https://github.com/pypeclub/OpenPype/pull/2770) +- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758) +- Flame: use Shot Name on segment for asset name [\#2751](https://github.com/pypeclub/OpenPype/pull/2751) +- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746) +- Work Files: Preserve subversion comment of current filename by default [\#2734](https://github.com/pypeclub/OpenPype/pull/2734) +- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733) +- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732) +- Project Manager: Disable add task, add asset and save button when not in a project [\#2727](https://github.com/pypeclub/OpenPype/pull/2727) +- dropbox handle big file [\#2718](https://github.com/pypeclub/OpenPype/pull/2718) +- Fusion Move PR: Minor tweaks to Fusion integration [\#2716](https://github.com/pypeclub/OpenPype/pull/2716) +- RoyalRender: Minor enhancements [\#2700](https://github.com/pypeclub/OpenPype/pull/2700) +- Nuke: prerender with review knob [\#2691](https://github.com/pypeclub/OpenPype/pull/2691) +- Maya configurable unit validator [\#2680](https://github.com/pypeclub/OpenPype/pull/2680) +- General: Add settings for CleanUpFarm and disable the plugin by default [\#2679](https://github.com/pypeclub/OpenPype/pull/2679) +- Project Manager: Only allow scroll wheel edits when spinbox is active [\#2678](https://github.com/pypeclub/OpenPype/pull/2678) +- Ftrack: Sync description to assets [\#2670](https://github.com/pypeclub/OpenPype/pull/2670) +- Houdini: Moved to OpenPype [\#2658](https://github.com/pypeclub/OpenPype/pull/2658) +- Maya: Move implementation to OpenPype [\#2649](https://github.com/pypeclub/OpenPype/pull/2649) +- General: FFmpeg conversion also check attribute string length [\#2635](https://github.com/pypeclub/OpenPype/pull/2635) +- Houdini: Load Arnold .ass procedurals into Houdini [\#2606](https://github.com/pypeclub/OpenPype/pull/2606) +- Deadline: Simplify GlobalJobPreLoad logic [\#2605](https://github.com/pypeclub/OpenPype/pull/2605) +- Houdini: Implement Arnold .ass standin extraction from Houdini \(also support .ass.gz\) [\#2603](https://github.com/pypeclub/OpenPype/pull/2603) +- New Publisher: New features and preparations for new standalone publisher [\#2556](https://github.com/pypeclub/OpenPype/pull/2556) +- Fix Maya 2022 Python 3 compatibility [\#2445](https://github.com/pypeclub/OpenPype/pull/2445) +- TVPaint: Use new publisher exceptions in validators [\#2435](https://github.com/pypeclub/OpenPype/pull/2435) +- Harmony: Added new style validations for New Publisher [\#2434](https://github.com/pypeclub/OpenPype/pull/2434) +- Aftereffects: New style validations for New publisher [\#2430](https://github.com/pypeclub/OpenPype/pull/2430) +- Farm publishing: New cleanup plugin for Maya renders on farm [\#2390](https://github.com/pypeclub/OpenPype/pull/2390) - General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) - NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) - NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) - TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) -- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) - Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) - Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) - General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) - General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) - global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) - Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) +- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) +- Houdini: Move Houdini Save Current File to beginning of ExtractorOrder [\#2747](https://github.com/pypeclub/OpenPype/pull/2747) +- Global: adding studio name/code to anatomy template formatting data [\#2630](https://github.com/pypeclub/OpenPype/pull/2630) **🐛 Bug fixes** +- Settings UI: Search case sensitivity [\#2810](https://github.com/pypeclub/OpenPype/pull/2810) +- resolve: fixing fusion module loading [\#2802](https://github.com/pypeclub/OpenPype/pull/2802) +- Ftrack: Unset task ids from asset versions before tasks are removed [\#2800](https://github.com/pypeclub/OpenPype/pull/2800) +- Slack: fail gracefully if slack exception [\#2798](https://github.com/pypeclub/OpenPype/pull/2798) +- Flame: Fix version string in default settings [\#2783](https://github.com/pypeclub/OpenPype/pull/2783) +- After Effects: Fix typo in name `afftereffects` -\> `aftereffects` [\#2768](https://github.com/pypeclub/OpenPype/pull/2768) +- Houdini: Fix open last workfile [\#2767](https://github.com/pypeclub/OpenPype/pull/2767) +- Avoid renaming udim indexes [\#2765](https://github.com/pypeclub/OpenPype/pull/2765) +- Maya: Fix `unique_namespace` when in an namespace that is empty [\#2759](https://github.com/pypeclub/OpenPype/pull/2759) +- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757) +- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754) +- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748) +- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745) +- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744) +- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741) +- Loader UI: Multiple asset selection and underline colors fixed [\#2731](https://github.com/pypeclub/OpenPype/pull/2731) +- General: Fix loading of unused chars in xml format [\#2729](https://github.com/pypeclub/OpenPype/pull/2729) +- TVPaint: Set objectName with members [\#2725](https://github.com/pypeclub/OpenPype/pull/2725) +- General: Don't use 'objectName' from loaded references [\#2715](https://github.com/pypeclub/OpenPype/pull/2715) +- Settings: Studio Project anatomy is queried using right keys [\#2711](https://github.com/pypeclub/OpenPype/pull/2711) +- Local Settings: Additional applications don't break UI [\#2710](https://github.com/pypeclub/OpenPype/pull/2710) +- Maya: Remove some unused code [\#2709](https://github.com/pypeclub/OpenPype/pull/2709) +- Houdini: Fix refactor of Houdini host move for CreateArnoldAss [\#2704](https://github.com/pypeclub/OpenPype/pull/2704) +- LookAssigner: Fix imports after moving code to OpenPype repository [\#2701](https://github.com/pypeclub/OpenPype/pull/2701) +- Multiple hosts: unify menu style across hosts [\#2693](https://github.com/pypeclub/OpenPype/pull/2693) +- Maya Redshift fixes [\#2692](https://github.com/pypeclub/OpenPype/pull/2692) +- Maya: fix fps validation popup [\#2685](https://github.com/pypeclub/OpenPype/pull/2685) +- Houdini Explicitly collect correct frame name even in case of single frame render when `frameStart` is provided [\#2676](https://github.com/pypeclub/OpenPype/pull/2676) +- hiero: fix effect collector name and order [\#2673](https://github.com/pypeclub/OpenPype/pull/2673) +- Maya: Fix menu callbacks [\#2671](https://github.com/pypeclub/OpenPype/pull/2671) +- hiero: removing obsolete unsupported plugin [\#2667](https://github.com/pypeclub/OpenPype/pull/2667) +- Launcher: Fix access to 'data' attribute on actions [\#2659](https://github.com/pypeclub/OpenPype/pull/2659) +- Maya `vrscene` loader fixes [\#2633](https://github.com/pypeclub/OpenPype/pull/2633) +- Houdini: fix usd family in loader and integrators [\#2631](https://github.com/pypeclub/OpenPype/pull/2631) +- Maya: Add only reference node to look family container like with other families [\#2508](https://github.com/pypeclub/OpenPype/pull/2508) - General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) -- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) - Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) - Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) - General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) @@ -130,6 +1428,10 @@ **🔀 Refactored code** +- Ftrack: Moved module one hierarchy level higher [\#2792](https://github.com/pypeclub/OpenPype/pull/2792) +- SyncServer: Moved module one hierarchy level higher [\#2791](https://github.com/pypeclub/OpenPype/pull/2791) +- Royal render: Move module one hierarchy level higher [\#2790](https://github.com/pypeclub/OpenPype/pull/2790) +- Deadline: Move module one hierarchy level higher [\#2789](https://github.com/pypeclub/OpenPype/pull/2789) - Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) - General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) - General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) @@ -137,66 +1439,751 @@ - General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) - Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) - Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) +- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) + +**Merged pull requests:** + +- Fusion: Moved implementation into OpenPype [\#2713](https://github.com/pypeclub/OpenPype/pull/2713) +- TVPaint: Plugin build without dependencies [\#2705](https://github.com/pypeclub/OpenPype/pull/2705) +- Webpublisher: Photoshop create a beauty png [\#2689](https://github.com/pypeclub/OpenPype/pull/2689) +- Ftrack: Hierarchical attributes are queried properly [\#2682](https://github.com/pypeclub/OpenPype/pull/2682) +- Maya: Add Validate Frame Range settings [\#2661](https://github.com/pypeclub/OpenPype/pull/2661) +- Harmony: move to Openpype [\#2657](https://github.com/pypeclub/OpenPype/pull/2657) +- Maya: cleanup duplicate rendersetup code [\#2642](https://github.com/pypeclub/OpenPype/pull/2642) +- Deadline: Be able to pass Mongo url to job [\#2616](https://github.com/pypeclub/OpenPype/pull/2616) ## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.2-nightly.3...3.8.2) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.1...3.8.2) + +### 📖 Documentation + +- Cosmetics: Fix common typos in openpype/website [\#2617](https://github.com/pypeclub/OpenPype/pull/2617) + +**🚀 Enhancements** + +- TVPaint: Image loaders also work on review family [\#2638](https://github.com/pypeclub/OpenPype/pull/2638) +- General: Project backup tools [\#2629](https://github.com/pypeclub/OpenPype/pull/2629) +- nuke: adding clear button to write nodes [\#2627](https://github.com/pypeclub/OpenPype/pull/2627) +- Ftrack: Family to Asset type mapping is in settings [\#2602](https://github.com/pypeclub/OpenPype/pull/2602) +- Nuke: load color space from representation data [\#2576](https://github.com/pypeclub/OpenPype/pull/2576) + +**🐛 Bug fixes** + +- Fix pulling of cx\_freeze 6.10 [\#2628](https://github.com/pypeclub/OpenPype/pull/2628) +- Global: fix broken otio review extractor [\#2590](https://github.com/pypeclub/OpenPype/pull/2590) + +**Merged pull requests:** + +- WebPublisher: fix instance duplicates [\#2641](https://github.com/pypeclub/OpenPype/pull/2641) +- Fix - safer pulling of task name for webpublishing from PS [\#2613](https://github.com/pypeclub/OpenPype/pull/2613) ## [3.8.1](https://github.com/pypeclub/OpenPype/tree/3.8.1) (2022-02-01) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.1-nightly.3...3.8.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.0...3.8.1) + +**🚀 Enhancements** + +- Webpublisher: Thumbnail extractor [\#2600](https://github.com/pypeclub/OpenPype/pull/2600) +- Loader: Allow to toggle default family filters between "include" or "exclude" filtering [\#2541](https://github.com/pypeclub/OpenPype/pull/2541) +- Launcher: Added context menu to to skip opening last workfile [\#2536](https://github.com/pypeclub/OpenPype/pull/2536) +- Unreal: JSON Layout Loading support [\#2066](https://github.com/pypeclub/OpenPype/pull/2066) + +**🐛 Bug fixes** + +- Release/3.8.0 [\#2619](https://github.com/pypeclub/OpenPype/pull/2619) +- Settings: Enum does not store empty string if has single item to select [\#2615](https://github.com/pypeclub/OpenPype/pull/2615) +- switch distutils to sysconfig for `get_platform()` [\#2594](https://github.com/pypeclub/OpenPype/pull/2594) +- Fix poetry index and speedcopy update [\#2589](https://github.com/pypeclub/OpenPype/pull/2589) +- Webpublisher: Fix - subset names from processed .psd used wrong value for task [\#2586](https://github.com/pypeclub/OpenPype/pull/2586) +- `vrscene` creator Deadline webservice URL handling [\#2580](https://github.com/pypeclub/OpenPype/pull/2580) +- global: track name was failing if duplicated root word in name [\#2568](https://github.com/pypeclub/OpenPype/pull/2568) +- Validate Maya Rig produces no cycle errors [\#2484](https://github.com/pypeclub/OpenPype/pull/2484) + +**Merged pull requests:** + +- Bump pillow from 8.4.0 to 9.0.0 [\#2595](https://github.com/pypeclub/OpenPype/pull/2595) +- Webpublisher: Skip version collect [\#2591](https://github.com/pypeclub/OpenPype/pull/2591) +- build\(deps\): bump pillow from 8.4.0 to 9.0.0 [\#2523](https://github.com/pypeclub/OpenPype/pull/2523) ## [3.8.0](https://github.com/pypeclub/OpenPype/tree/3.8.0) (2022-01-24) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.8.0-nightly.7...3.8.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.7.0...3.8.0) + +### 📖 Documentation + +- Variable in docs renamed to proper name [\#2546](https://github.com/pypeclub/OpenPype/pull/2546) + +**🆕 New features** + +- Flame: extracting segments with trans-coding [\#2547](https://github.com/pypeclub/OpenPype/pull/2547) +- Maya : V-Ray Proxy - load all ABC files via proxy [\#2544](https://github.com/pypeclub/OpenPype/pull/2544) +- Maya to Unreal: Extended static mesh workflow [\#2537](https://github.com/pypeclub/OpenPype/pull/2537) +- Flame: collecting publishable instances [\#2519](https://github.com/pypeclub/OpenPype/pull/2519) +- Flame: create publishable clips [\#2495](https://github.com/pypeclub/OpenPype/pull/2495) +- Flame: OpenTimelineIO Export Modul [\#2398](https://github.com/pypeclub/OpenPype/pull/2398) + +**🚀 Enhancements** + +- Webpublisher: Moved error at the beginning of the log [\#2559](https://github.com/pypeclub/OpenPype/pull/2559) +- Ftrack: Use ApplicationManager to get DJV path [\#2558](https://github.com/pypeclub/OpenPype/pull/2558) +- Webpublisher: Added endpoint to reprocess batch through UI [\#2555](https://github.com/pypeclub/OpenPype/pull/2555) +- Settings: PathInput strip passed string [\#2550](https://github.com/pypeclub/OpenPype/pull/2550) +- Global: Exctract Review anatomy fill data with output name [\#2548](https://github.com/pypeclub/OpenPype/pull/2548) +- Cosmetics: Clean up some cosmetics / typos [\#2542](https://github.com/pypeclub/OpenPype/pull/2542) +- General: Validate if current process OpenPype version is requested version [\#2529](https://github.com/pypeclub/OpenPype/pull/2529) +- General: Be able to use anatomy data in ffmpeg output arguments [\#2525](https://github.com/pypeclub/OpenPype/pull/2525) +- Expose toggle publish plug-in settings for Maya Look Shading Engine Naming [\#2521](https://github.com/pypeclub/OpenPype/pull/2521) +- Photoshop: Move implementation to OpenPype [\#2510](https://github.com/pypeclub/OpenPype/pull/2510) +- TimersManager: Move module one hierarchy higher [\#2501](https://github.com/pypeclub/OpenPype/pull/2501) +- Slack: notifications are sent with Openpype logo and bot name [\#2499](https://github.com/pypeclub/OpenPype/pull/2499) +- Slack: Add review to notification message [\#2498](https://github.com/pypeclub/OpenPype/pull/2498) +- Ftrack: Event handlers settings [\#2496](https://github.com/pypeclub/OpenPype/pull/2496) +- Tools: Fix style and modality of errors in loader and creator [\#2489](https://github.com/pypeclub/OpenPype/pull/2489) +- Maya: Collect 'fps' animation data only for "review" instances [\#2486](https://github.com/pypeclub/OpenPype/pull/2486) +- Project Manager: Remove project button cleanup [\#2482](https://github.com/pypeclub/OpenPype/pull/2482) +- Tools: Be able to change models of tasks and assets widgets [\#2475](https://github.com/pypeclub/OpenPype/pull/2475) +- Publish pype: Reduce publish process defering [\#2464](https://github.com/pypeclub/OpenPype/pull/2464) +- Maya: Improve speed of Collect History logic [\#2460](https://github.com/pypeclub/OpenPype/pull/2460) +- Maya: Validate Rig Controllers - fix Error: in script editor [\#2459](https://github.com/pypeclub/OpenPype/pull/2459) +- Maya: Validate NGONs simplify and speed-up [\#2458](https://github.com/pypeclub/OpenPype/pull/2458) +- Maya: Optimize Validate Locked Normals speed for dense polymeshes [\#2457](https://github.com/pypeclub/OpenPype/pull/2457) +- Maya: Refactor missing \_get\_reference\_node method [\#2455](https://github.com/pypeclub/OpenPype/pull/2455) +- Houdini: Remove broken unique name counter [\#2450](https://github.com/pypeclub/OpenPype/pull/2450) +- Maya: Improve lib.polyConstraint performance when Select tool is not the active tool context [\#2447](https://github.com/pypeclub/OpenPype/pull/2447) +- General: Validate third party before build [\#2425](https://github.com/pypeclub/OpenPype/pull/2425) +- Maya : add option to not group reference in ReferenceLoader [\#2383](https://github.com/pypeclub/OpenPype/pull/2383) + +**🐛 Bug fixes** + +- AfterEffects: Fix - removed obsolete import [\#2577](https://github.com/pypeclub/OpenPype/pull/2577) +- General: OpenPype version updates [\#2575](https://github.com/pypeclub/OpenPype/pull/2575) +- Ftrack: Delete action revision [\#2563](https://github.com/pypeclub/OpenPype/pull/2563) +- Webpublisher: ftrack shows incorrect user names [\#2560](https://github.com/pypeclub/OpenPype/pull/2560) +- General: Do not validate version if build does not support it [\#2557](https://github.com/pypeclub/OpenPype/pull/2557) +- Webpublisher: Fixed progress reporting [\#2553](https://github.com/pypeclub/OpenPype/pull/2553) +- Fix Maya AssProxyLoader version switch [\#2551](https://github.com/pypeclub/OpenPype/pull/2551) +- General: Fix install thread in igniter [\#2549](https://github.com/pypeclub/OpenPype/pull/2549) +- Houdini: vdbcache family preserve frame numbers on publish integration + enable validate version for Houdini [\#2535](https://github.com/pypeclub/OpenPype/pull/2535) +- Maya: Fix Load VDB to V-Ray [\#2533](https://github.com/pypeclub/OpenPype/pull/2533) +- Maya: ReferenceLoader fix not unique group name error for attach to root [\#2532](https://github.com/pypeclub/OpenPype/pull/2532) +- Maya: namespaced context go back to original namespace when started from inside a namespace [\#2531](https://github.com/pypeclub/OpenPype/pull/2531) +- Fix create zip tool - path argument [\#2522](https://github.com/pypeclub/OpenPype/pull/2522) +- Maya: Fix Extract Look with space in names [\#2518](https://github.com/pypeclub/OpenPype/pull/2518) +- Fix published frame content for sequence starting with 0 [\#2513](https://github.com/pypeclub/OpenPype/pull/2513) +- Maya: reset empty string attributes correctly to "" instead of "None" [\#2506](https://github.com/pypeclub/OpenPype/pull/2506) +- Improve FusionPreLaunch hook errors [\#2505](https://github.com/pypeclub/OpenPype/pull/2505) +- General: Settings work if OpenPypeVersion is available [\#2494](https://github.com/pypeclub/OpenPype/pull/2494) +- General: PYTHONPATH may break OpenPype dependencies [\#2493](https://github.com/pypeclub/OpenPype/pull/2493) +- General: Modules import function output fix [\#2492](https://github.com/pypeclub/OpenPype/pull/2492) +- AE: fix hiding of alert window below Publish [\#2491](https://github.com/pypeclub/OpenPype/pull/2491) +- Workfiles tool: Files widget show files on first show [\#2488](https://github.com/pypeclub/OpenPype/pull/2488) +- General: Custom template paths filter fix [\#2483](https://github.com/pypeclub/OpenPype/pull/2483) +- Loader: Remove always on top flag in tray [\#2480](https://github.com/pypeclub/OpenPype/pull/2480) +- General: Anatomy does not return root envs as unicode [\#2465](https://github.com/pypeclub/OpenPype/pull/2465) +- Maya: Validate Shape Zero do not keep fixed geometry vertices selected/active after repair [\#2456](https://github.com/pypeclub/OpenPype/pull/2456) + +**Merged pull requests:** + +- AfterEffects: Move implementation to OpenPype [\#2543](https://github.com/pypeclub/OpenPype/pull/2543) +- Maya: Remove Maya Look Assigner check on startup [\#2540](https://github.com/pypeclub/OpenPype/pull/2540) +- build\(deps\): bump shelljs from 0.8.4 to 0.8.5 in /website [\#2538](https://github.com/pypeclub/OpenPype/pull/2538) +- build\(deps\): bump follow-redirects from 1.14.4 to 1.14.7 in /website [\#2534](https://github.com/pypeclub/OpenPype/pull/2534) +- Nuke: Merge avalon's implementation into OpenPype [\#2514](https://github.com/pypeclub/OpenPype/pull/2514) +- Maya: Vray fix proxies look assignment [\#2392](https://github.com/pypeclub/OpenPype/pull/2392) +- Bump algoliasearch-helper from 3.4.4 to 3.6.2 in /website [\#2297](https://github.com/pypeclub/OpenPype/pull/2297) ## [3.7.0](https://github.com/pypeclub/OpenPype/tree/3.7.0) (2022-01-04) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.7.0-nightly.14...3.7.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.4...3.7.0) + +**Deprecated:** + +- General: Default modules hierarchy n2 [\#2368](https://github.com/pypeclub/OpenPype/pull/2368) + +### 📖 Documentation + +- docs\[website\]: Add Ellipse Studio \(logo\) as an OpenPype contributor [\#2324](https://github.com/pypeclub/OpenPype/pull/2324) + +**🆕 New features** + +- Settings UI use OpenPype styles [\#2296](https://github.com/pypeclub/OpenPype/pull/2296) +- Store typed version dependencies for workfiles [\#2192](https://github.com/pypeclub/OpenPype/pull/2192) +- OpenPypeV3: add key task type, task shortname and user to path templating construction [\#2157](https://github.com/pypeclub/OpenPype/pull/2157) +- Nuke: Alembic model workflow [\#2140](https://github.com/pypeclub/OpenPype/pull/2140) +- TVPaint: Load workfile from published. [\#1980](https://github.com/pypeclub/OpenPype/pull/1980) + +**🚀 Enhancements** + +- General: Workdir extra folders [\#2462](https://github.com/pypeclub/OpenPype/pull/2462) +- Photoshop: New style validations for New publisher [\#2429](https://github.com/pypeclub/OpenPype/pull/2429) +- General: Environment variables groups [\#2424](https://github.com/pypeclub/OpenPype/pull/2424) +- Unreal: Dynamic menu created in Python [\#2422](https://github.com/pypeclub/OpenPype/pull/2422) +- Settings UI: Hyperlinks to settings [\#2420](https://github.com/pypeclub/OpenPype/pull/2420) +- Modules: JobQueue module moved one hierarchy level higher [\#2419](https://github.com/pypeclub/OpenPype/pull/2419) +- TimersManager: Start timer post launch hook [\#2418](https://github.com/pypeclub/OpenPype/pull/2418) +- General: Run applications as separate processes under linux [\#2408](https://github.com/pypeclub/OpenPype/pull/2408) +- Ftrack: Check existence of object type on recreation [\#2404](https://github.com/pypeclub/OpenPype/pull/2404) +- Enhancement: Global cleanup plugin that explicitly remove paths from context [\#2402](https://github.com/pypeclub/OpenPype/pull/2402) +- General: MongoDB ability to specify replica set groups [\#2401](https://github.com/pypeclub/OpenPype/pull/2401) +- Flame: moving `utility_scripts` to api folder also with `scripts` [\#2385](https://github.com/pypeclub/OpenPype/pull/2385) +- Centos 7 dependency compatibility [\#2384](https://github.com/pypeclub/OpenPype/pull/2384) +- Enhancement: Settings: Use project settings values from another project [\#2382](https://github.com/pypeclub/OpenPype/pull/2382) +- Blender 3: Support auto install for new blender version [\#2377](https://github.com/pypeclub/OpenPype/pull/2377) +- Maya add render image path to settings [\#2375](https://github.com/pypeclub/OpenPype/pull/2375) +- Settings: Webpublisher in hosts enum [\#2367](https://github.com/pypeclub/OpenPype/pull/2367) +- Hiero: python3 compatibility [\#2365](https://github.com/pypeclub/OpenPype/pull/2365) +- Burnins: Be able recognize mxf OPAtom format [\#2361](https://github.com/pypeclub/OpenPype/pull/2361) +- Maya: Add is\_static\_image\_plane and is\_in\_all\_views option in imagePlaneLoader [\#2356](https://github.com/pypeclub/OpenPype/pull/2356) +- Local settings: Copyable studio paths [\#2349](https://github.com/pypeclub/OpenPype/pull/2349) +- Assets Widget: Clear model on project change [\#2345](https://github.com/pypeclub/OpenPype/pull/2345) +- General: OpenPype default modules hierarchy [\#2338](https://github.com/pypeclub/OpenPype/pull/2338) +- TVPaint: Move implementation to OpenPype [\#2336](https://github.com/pypeclub/OpenPype/pull/2336) +- General: FFprobe error exception contain original error message [\#2328](https://github.com/pypeclub/OpenPype/pull/2328) +- Resolve: Add experimental button to menu [\#2325](https://github.com/pypeclub/OpenPype/pull/2325) +- Hiero: Add experimental tools action [\#2323](https://github.com/pypeclub/OpenPype/pull/2323) +- Input links: Cleanup and unification of differences [\#2322](https://github.com/pypeclub/OpenPype/pull/2322) +- General: Don't validate vendor bin with executing them [\#2317](https://github.com/pypeclub/OpenPype/pull/2317) +- General: Multilayer EXRs support [\#2315](https://github.com/pypeclub/OpenPype/pull/2315) +- General: Run process log stderr as info log level [\#2309](https://github.com/pypeclub/OpenPype/pull/2309) +- General: Reduce vendor imports [\#2305](https://github.com/pypeclub/OpenPype/pull/2305) +- Tools: Cleanup of unused classes [\#2304](https://github.com/pypeclub/OpenPype/pull/2304) +- Project Manager: Added ability to delete project [\#2298](https://github.com/pypeclub/OpenPype/pull/2298) +- Ftrack: Synchronize input links [\#2287](https://github.com/pypeclub/OpenPype/pull/2287) +- StandalonePublisher: Remove unused plugin ExtractHarmonyZip [\#2277](https://github.com/pypeclub/OpenPype/pull/2277) +- Ftrack: Support multiple reviews [\#2271](https://github.com/pypeclub/OpenPype/pull/2271) +- Ftrack: Remove unused clean component plugin [\#2269](https://github.com/pypeclub/OpenPype/pull/2269) +- Royal Render: Support for rr channels in separate dirs [\#2268](https://github.com/pypeclub/OpenPype/pull/2268) +- Houdini: Add experimental tools action [\#2267](https://github.com/pypeclub/OpenPype/pull/2267) +- Nuke: extract baked review videos presets [\#2248](https://github.com/pypeclub/OpenPype/pull/2248) +- TVPaint: Workers rendering [\#2209](https://github.com/pypeclub/OpenPype/pull/2209) +- OpenPypeV3: Add key parent asset to path templating construction [\#2186](https://github.com/pypeclub/OpenPype/pull/2186) + +**🐛 Bug fixes** + +- TVPaint: Create render layer dialog is in front [\#2471](https://github.com/pypeclub/OpenPype/pull/2471) +- Short Pyblish plugin path [\#2428](https://github.com/pypeclub/OpenPype/pull/2428) +- PS: Introduced settings for invalid characters to use in ValidateNaming plugin [\#2417](https://github.com/pypeclub/OpenPype/pull/2417) +- Settings UI: Breadcrumbs path does not create new entities [\#2416](https://github.com/pypeclub/OpenPype/pull/2416) +- AfterEffects: Variant 2022 is in defaults but missing in schemas [\#2412](https://github.com/pypeclub/OpenPype/pull/2412) +- Nuke: baking representations was not additive [\#2406](https://github.com/pypeclub/OpenPype/pull/2406) +- General: Fix access to environments from default settings [\#2403](https://github.com/pypeclub/OpenPype/pull/2403) +- Fix: Placeholder Input color set fix [\#2399](https://github.com/pypeclub/OpenPype/pull/2399) +- Settings: Fix state change of wrapper label [\#2396](https://github.com/pypeclub/OpenPype/pull/2396) +- Flame: fix ftrack publisher [\#2381](https://github.com/pypeclub/OpenPype/pull/2381) +- hiero: solve custom ocio path [\#2379](https://github.com/pypeclub/OpenPype/pull/2379) +- hiero: fix workio and flatten [\#2378](https://github.com/pypeclub/OpenPype/pull/2378) +- Nuke: fixing menu re-drawing during context change [\#2374](https://github.com/pypeclub/OpenPype/pull/2374) +- Webpublisher: Fix assignment of families of TVpaint instances [\#2373](https://github.com/pypeclub/OpenPype/pull/2373) +- Nuke: fixing node name based on switched asset name [\#2369](https://github.com/pypeclub/OpenPype/pull/2369) +- JobQueue: Fix loading of settings [\#2362](https://github.com/pypeclub/OpenPype/pull/2362) +- Tools: Placeholder color [\#2359](https://github.com/pypeclub/OpenPype/pull/2359) +- Launcher: Minimize button on MacOs [\#2355](https://github.com/pypeclub/OpenPype/pull/2355) +- StandalonePublisher: Fix import of constant [\#2354](https://github.com/pypeclub/OpenPype/pull/2354) +- Houdini: Fix HDA creation [\#2350](https://github.com/pypeclub/OpenPype/pull/2350) +- Adobe products show issue [\#2347](https://github.com/pypeclub/OpenPype/pull/2347) +- Maya Look Assigner: Fix Python 3 compatibility [\#2343](https://github.com/pypeclub/OpenPype/pull/2343) +- Remove wrongly used host for hook [\#2342](https://github.com/pypeclub/OpenPype/pull/2342) +- Tools: Use Qt context on tools show [\#2340](https://github.com/pypeclub/OpenPype/pull/2340) +- Flame: Fix default argument value in custom dictionary [\#2339](https://github.com/pypeclub/OpenPype/pull/2339) +- Timers Manager: Disable auto stop timer on linux platform [\#2334](https://github.com/pypeclub/OpenPype/pull/2334) +- nuke: bake preset single input exception [\#2331](https://github.com/pypeclub/OpenPype/pull/2331) +- Hiero: fixing multiple templates at a hierarchy parent [\#2330](https://github.com/pypeclub/OpenPype/pull/2330) +- Fix - provider icons are pulled from a folder [\#2326](https://github.com/pypeclub/OpenPype/pull/2326) +- InputLinks: Typo in "inputLinks" key [\#2314](https://github.com/pypeclub/OpenPype/pull/2314) +- Deadline timeout and logging [\#2312](https://github.com/pypeclub/OpenPype/pull/2312) +- nuke: do not multiply representation on class method [\#2311](https://github.com/pypeclub/OpenPype/pull/2311) +- Workfiles tool: Fix task formatting [\#2306](https://github.com/pypeclub/OpenPype/pull/2306) +- Delivery: Fix delivery paths created on windows [\#2302](https://github.com/pypeclub/OpenPype/pull/2302) +- Maya: Deadline - fix limit groups [\#2295](https://github.com/pypeclub/OpenPype/pull/2295) +- Royal Render: Fix plugin order and OpenPype auto-detection [\#2291](https://github.com/pypeclub/OpenPype/pull/2291) +- New Publisher: Fix mapping of indexes [\#2285](https://github.com/pypeclub/OpenPype/pull/2285) +- Alternate site for site sync doesnt work for sequences [\#2284](https://github.com/pypeclub/OpenPype/pull/2284) +- FFmpeg: Execute ffprobe using list of arguments instead of string command [\#2281](https://github.com/pypeclub/OpenPype/pull/2281) +- Nuke: Anatomy fill data use task as dictionary [\#2278](https://github.com/pypeclub/OpenPype/pull/2278) +- Bug: fix variable name \_asset\_id in workfiles application [\#2274](https://github.com/pypeclub/OpenPype/pull/2274) +- Version handling fixes [\#2272](https://github.com/pypeclub/OpenPype/pull/2272) + +**Merged pull requests:** + +- Maya: Replaced PATH usage with vendored oiio path for maketx utility [\#2405](https://github.com/pypeclub/OpenPype/pull/2405) +- \[Fix\]\[MAYA\] Handle message type attribute within CollectLook [\#2394](https://github.com/pypeclub/OpenPype/pull/2394) +- Add validator to check correct version of extension for PS and AE [\#2387](https://github.com/pypeclub/OpenPype/pull/2387) +- Maya: configurable model top level validation [\#2321](https://github.com/pypeclub/OpenPype/pull/2321) +- Create test publish class for After Effects [\#2270](https://github.com/pypeclub/OpenPype/pull/2270) ## [3.6.4](https://github.com/pypeclub/OpenPype/tree/3.6.4) (2021-11-23) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.7.0-nightly.1...3.6.4) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.3...3.6.4) + +**🐛 Bug fixes** + +- Nuke: inventory update removes all loaded read nodes [\#2294](https://github.com/pypeclub/OpenPype/pull/2294) ## [3.6.3](https://github.com/pypeclub/OpenPype/tree/3.6.3) (2021-11-19) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.3-nightly.1...3.6.3) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.2...3.6.3) + +**🐛 Bug fixes** + +- Deadline: Fix publish targets [\#2280](https://github.com/pypeclub/OpenPype/pull/2280) ## [3.6.2](https://github.com/pypeclub/OpenPype/tree/3.6.2) (2021-11-18) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.2-nightly.2...3.6.2) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.1...3.6.2) + +**🚀 Enhancements** + +- Tools: Assets widget [\#2265](https://github.com/pypeclub/OpenPype/pull/2265) +- SceneInventory: Choose loader in asset switcher [\#2262](https://github.com/pypeclub/OpenPype/pull/2262) +- Style: New fonts in OpenPype style [\#2256](https://github.com/pypeclub/OpenPype/pull/2256) +- Tools: SceneInventory in OpenPype [\#2255](https://github.com/pypeclub/OpenPype/pull/2255) +- Tools: Tasks widget [\#2251](https://github.com/pypeclub/OpenPype/pull/2251) +- Tools: Creator in OpenPype [\#2244](https://github.com/pypeclub/OpenPype/pull/2244) +- Added endpoint for configured extensions [\#2221](https://github.com/pypeclub/OpenPype/pull/2221) + +**🐛 Bug fixes** + +- Tools: Parenting of tools in Nuke and Hiero [\#2266](https://github.com/pypeclub/OpenPype/pull/2266) +- limiting validator to specific editorial hosts [\#2264](https://github.com/pypeclub/OpenPype/pull/2264) +- Tools: Select Context dialog attribute fix [\#2261](https://github.com/pypeclub/OpenPype/pull/2261) +- Maya: Render publishing fails on linux [\#2260](https://github.com/pypeclub/OpenPype/pull/2260) +- LookAssigner: Fix tool reopen [\#2259](https://github.com/pypeclub/OpenPype/pull/2259) +- Standalone: editorial not publishing thumbnails on all subsets [\#2258](https://github.com/pypeclub/OpenPype/pull/2258) +- Burnins: Support mxf metadata [\#2247](https://github.com/pypeclub/OpenPype/pull/2247) +- Maya: Support for configurable AOV separator characters [\#2197](https://github.com/pypeclub/OpenPype/pull/2197) +- Maya: texture colorspace modes in looks [\#2195](https://github.com/pypeclub/OpenPype/pull/2195) ## [3.6.1](https://github.com/pypeclub/OpenPype/tree/3.6.1) (2021-11-16) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.1-nightly.1...3.6.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.0...3.6.1) + +**🐛 Bug fixes** + +- Loader doesn't allow changing of version before loading [\#2254](https://github.com/pypeclub/OpenPype/pull/2254) ## [3.6.0](https://github.com/pypeclub/OpenPype/tree/3.6.0) (2021-11-15) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.6.0-nightly.6...3.6.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.5.0...3.6.0) + +### 📖 Documentation + +- Add alternative sites for Site Sync [\#2206](https://github.com/pypeclub/OpenPype/pull/2206) +- Add command line way of running site sync server [\#2188](https://github.com/pypeclub/OpenPype/pull/2188) + +**🆕 New features** + +- Add validate active site button to sync queue on a project [\#2176](https://github.com/pypeclub/OpenPype/pull/2176) +- Maya : Colorspace configuration [\#2170](https://github.com/pypeclub/OpenPype/pull/2170) +- Blender: Added support for audio [\#2168](https://github.com/pypeclub/OpenPype/pull/2168) +- Flame: a host basic integration [\#2165](https://github.com/pypeclub/OpenPype/pull/2165) +- Houdini: simple HDA workflow [\#2072](https://github.com/pypeclub/OpenPype/pull/2072) +- Basic Royal Render Integration ✨ [\#2061](https://github.com/pypeclub/OpenPype/pull/2061) +- Camera handling between Blender and Unreal [\#1988](https://github.com/pypeclub/OpenPype/pull/1988) +- switch PyQt5 for PySide2 [\#1744](https://github.com/pypeclub/OpenPype/pull/1744) + +**🚀 Enhancements** + +- Tools: Subset manager in OpenPype [\#2243](https://github.com/pypeclub/OpenPype/pull/2243) +- General: Skip module directories without init file [\#2239](https://github.com/pypeclub/OpenPype/pull/2239) +- General: Static interfaces [\#2238](https://github.com/pypeclub/OpenPype/pull/2238) +- Style: Fix transparent image in style [\#2235](https://github.com/pypeclub/OpenPype/pull/2235) +- Add a "following workfile versioning" option on publish [\#2225](https://github.com/pypeclub/OpenPype/pull/2225) +- Modules: Module can add cli commands [\#2224](https://github.com/pypeclub/OpenPype/pull/2224) +- Webpublisher: Separate webpublisher logic [\#2222](https://github.com/pypeclub/OpenPype/pull/2222) +- Add both side availability on Site Sync sites to Loader [\#2220](https://github.com/pypeclub/OpenPype/pull/2220) +- Tools: Center loader and library loader on show [\#2219](https://github.com/pypeclub/OpenPype/pull/2219) +- Maya : Validate shape zero [\#2212](https://github.com/pypeclub/OpenPype/pull/2212) +- Maya : validate unique names [\#2211](https://github.com/pypeclub/OpenPype/pull/2211) +- Tools: OpenPype stylesheet in workfiles tool [\#2208](https://github.com/pypeclub/OpenPype/pull/2208) +- Ftrack: Replace Queue with deque in event handlers logic [\#2204](https://github.com/pypeclub/OpenPype/pull/2204) +- Tools: New select context dialog [\#2200](https://github.com/pypeclub/OpenPype/pull/2200) +- Maya : Validate mesh ngons [\#2199](https://github.com/pypeclub/OpenPype/pull/2199) +- Dirmap in Nuke [\#2198](https://github.com/pypeclub/OpenPype/pull/2198) +- Delivery: Check 'frame' key in template for sequence delivery [\#2196](https://github.com/pypeclub/OpenPype/pull/2196) +- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) +- Usage of tools code [\#2185](https://github.com/pypeclub/OpenPype/pull/2185) +- Settings: Dictionary based on project roots [\#2184](https://github.com/pypeclub/OpenPype/pull/2184) +- Subset name: Be able to pass asset document to get subset name [\#2179](https://github.com/pypeclub/OpenPype/pull/2179) +- Tools: Experimental tools [\#2167](https://github.com/pypeclub/OpenPype/pull/2167) +- Loader: Refactor and use OpenPype stylesheets [\#2166](https://github.com/pypeclub/OpenPype/pull/2166) +- Add loader for linked smart objects in photoshop [\#2149](https://github.com/pypeclub/OpenPype/pull/2149) +- Burnins: DNxHD profiles handling [\#2142](https://github.com/pypeclub/OpenPype/pull/2142) +- Tools: Single access point for host tools [\#2139](https://github.com/pypeclub/OpenPype/pull/2139) + +**🐛 Bug fixes** + +- Ftrack: Sync project ftrack id cache issue [\#2250](https://github.com/pypeclub/OpenPype/pull/2250) +- Ftrack: Session creation and Prepare project [\#2245](https://github.com/pypeclub/OpenPype/pull/2245) +- Added queue for studio processing in PS [\#2237](https://github.com/pypeclub/OpenPype/pull/2237) +- Python 2: Unicode to string conversion [\#2236](https://github.com/pypeclub/OpenPype/pull/2236) +- Fix - enum for color coding in PS [\#2234](https://github.com/pypeclub/OpenPype/pull/2234) +- Pyblish Tool: Fix targets handling [\#2232](https://github.com/pypeclub/OpenPype/pull/2232) +- Ftrack: Base event fix of 'get\_project\_from\_entity' method [\#2214](https://github.com/pypeclub/OpenPype/pull/2214) +- Maya : multiple subsets review broken [\#2210](https://github.com/pypeclub/OpenPype/pull/2210) +- Fix - different command used for Linux and Mac OS [\#2207](https://github.com/pypeclub/OpenPype/pull/2207) +- Tools: Workfiles tool don't use avalon widgets [\#2205](https://github.com/pypeclub/OpenPype/pull/2205) +- Ftrack: Fill missing ftrack id on mongo project [\#2203](https://github.com/pypeclub/OpenPype/pull/2203) +- Project Manager: Fix copying of tasks [\#2191](https://github.com/pypeclub/OpenPype/pull/2191) +- StandalonePublisher: Source validator don't expect representations [\#2190](https://github.com/pypeclub/OpenPype/pull/2190) +- Blender: Fix trying to pack an image when the shader node has no texture [\#2183](https://github.com/pypeclub/OpenPype/pull/2183) +- Maya: review viewport settings [\#2177](https://github.com/pypeclub/OpenPype/pull/2177) +- MacOS: Launching of applications may cause Permissions error [\#2175](https://github.com/pypeclub/OpenPype/pull/2175) +- Maya: Aspect ratio [\#2174](https://github.com/pypeclub/OpenPype/pull/2174) +- Blender: Fix 'Deselect All' with object not in 'Object Mode' [\#2163](https://github.com/pypeclub/OpenPype/pull/2163) +- Tools: Stylesheets are applied after tool show [\#2161](https://github.com/pypeclub/OpenPype/pull/2161) +- Maya: Collect render - fix UNC path support 🐛 [\#2158](https://github.com/pypeclub/OpenPype/pull/2158) +- Maya: Fix hotbox broken by scriptsmenu [\#2151](https://github.com/pypeclub/OpenPype/pull/2151) +- Ftrack: Ignore save warnings exception in Prepare project action [\#2150](https://github.com/pypeclub/OpenPype/pull/2150) +- Loader thumbnails with smooth edges [\#2147](https://github.com/pypeclub/OpenPype/pull/2147) +- Added validator for source files for Standalone Publisher [\#2138](https://github.com/pypeclub/OpenPype/pull/2138) + +**Merged pull requests:** + +- Bump pillow from 8.2.0 to 8.3.2 [\#2162](https://github.com/pypeclub/OpenPype/pull/2162) +- Bump axios from 0.21.1 to 0.21.4 in /website [\#2059](https://github.com/pypeclub/OpenPype/pull/2059) ## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.5.0-nightly.8...3.5.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.1...3.5.0) + +**Deprecated:** + +- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106) + +**🆕 New features** + +- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131) +- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124) +- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114) +- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091) +- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073) +- Maya: Validate setdress top group [\#2068](https://github.com/pypeclub/OpenPype/pull/2068) +- Maya: Enable publishing render attrib sets \(e.g. V-Ray Displacement\) with model [\#1955](https://github.com/pypeclub/OpenPype/pull/1955) + +**🚀 Enhancements** + +- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137) +- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132) +- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128) +- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104) +- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093) +- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088) +- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084) +- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080) +- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079) +- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078) +- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070) +- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069) +- Nuke: Adding `still` image family workflow [\#2064](https://github.com/pypeclub/OpenPype/pull/2064) +- Maya: validate authorized loaded plugins [\#2062](https://github.com/pypeclub/OpenPype/pull/2062) +- Tools: add support for pyenv on windows [\#2051](https://github.com/pypeclub/OpenPype/pull/2051) +- SyncServer: Dropbox Provider [\#1979](https://github.com/pypeclub/OpenPype/pull/1979) +- Burnin: Get data from context with defined keys. [\#1897](https://github.com/pypeclub/OpenPype/pull/1897) +- Timers manager: Get task time [\#1896](https://github.com/pypeclub/OpenPype/pull/1896) +- TVPaint: Option to stop timer on application exit. [\#1887](https://github.com/pypeclub/OpenPype/pull/1887) + +**🐛 Bug fixes** + +- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130) +- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129) +- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120) +- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115) +- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110) +- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109) +- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103) +- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101) +- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100) +- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097) +- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096) +- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095) +- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087) +- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085) +- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083) +- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082) +- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081) +- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077) +- Maya: Fix multi-camera renders [\#2065](https://github.com/pypeclub/OpenPype/pull/2065) +- Fix Sync Queue when project disabled [\#2063](https://github.com/pypeclub/OpenPype/pull/2063) + +**Merged pull requests:** + +- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086) ## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.4.1-nightly.1...3.4.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.0...3.4.1) + +**🆕 New features** + +- Settings: Flag project as deactivated and hide from tools' view [\#2008](https://github.com/pypeclub/OpenPype/pull/2008) + +**🚀 Enhancements** + +- General: Startup validations [\#2054](https://github.com/pypeclub/OpenPype/pull/2054) +- Nuke: proxy mode validator [\#2052](https://github.com/pypeclub/OpenPype/pull/2052) +- Ftrack: Removed ftrack interface [\#2049](https://github.com/pypeclub/OpenPype/pull/2049) +- Settings UI: Deffered set value on entity [\#2044](https://github.com/pypeclub/OpenPype/pull/2044) +- Loader: Families filtering [\#2043](https://github.com/pypeclub/OpenPype/pull/2043) +- Settings UI: Project view enhancements [\#2042](https://github.com/pypeclub/OpenPype/pull/2042) +- Settings for Nuke IncrementScriptVersion [\#2039](https://github.com/pypeclub/OpenPype/pull/2039) +- Loader & Library loader: Use tools from OpenPype [\#2038](https://github.com/pypeclub/OpenPype/pull/2038) +- Adding predefined project folders creation in PM [\#2030](https://github.com/pypeclub/OpenPype/pull/2030) +- WebserverModule: Removed interface of webserver module [\#2028](https://github.com/pypeclub/OpenPype/pull/2028) +- TimersManager: Removed interface of timers manager [\#2024](https://github.com/pypeclub/OpenPype/pull/2024) +- Feature Maya import asset from scene inventory [\#2018](https://github.com/pypeclub/OpenPype/pull/2018) + +**🐛 Bug fixes** + +- Timers manger: Typo fix [\#2058](https://github.com/pypeclub/OpenPype/pull/2058) +- Hiero: Editorial fixes [\#2057](https://github.com/pypeclub/OpenPype/pull/2057) +- Differentiate jpg sequences from thumbnail [\#2056](https://github.com/pypeclub/OpenPype/pull/2056) +- FFmpeg: Split command to list does not work [\#2046](https://github.com/pypeclub/OpenPype/pull/2046) +- Removed shell flag in subprocess call [\#2045](https://github.com/pypeclub/OpenPype/pull/2045) + +**Merged pull requests:** + +- Bump prismjs from 1.24.0 to 1.25.0 in /website [\#2050](https://github.com/pypeclub/OpenPype/pull/2050) ## [3.4.0](https://github.com/pypeclub/OpenPype/tree/3.4.0) (2021-09-17) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.4.0-nightly.6...3.4.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...3.4.0) + +### 📖 Documentation + +- Documentation: Ftrack launch argsuments update [\#2014](https://github.com/pypeclub/OpenPype/pull/2014) +- Nuke Quick Start / Tutorial [\#1952](https://github.com/pypeclub/OpenPype/pull/1952) +- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821) + +**🆕 New features** + +- Nuke: Compatibility with Nuke 13 [\#2003](https://github.com/pypeclub/OpenPype/pull/2003) +- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947) +- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876) +- Blender: Improved assets handling [\#1615](https://github.com/pypeclub/OpenPype/pull/1615) + +**🚀 Enhancements** + +- Added possibility to configure of synchronization of workfile version… [\#2041](https://github.com/pypeclub/OpenPype/pull/2041) +- General: Task types in profiles [\#2036](https://github.com/pypeclub/OpenPype/pull/2036) +- Console interpreter: Handle invalid sizes on initialization [\#2022](https://github.com/pypeclub/OpenPype/pull/2022) +- Ftrack: Show OpenPype versions in event server status [\#2019](https://github.com/pypeclub/OpenPype/pull/2019) +- General: Staging icon [\#2017](https://github.com/pypeclub/OpenPype/pull/2017) +- Ftrack: Sync to avalon actions have jobs [\#2015](https://github.com/pypeclub/OpenPype/pull/2015) +- Modules: Connect method is not required [\#2009](https://github.com/pypeclub/OpenPype/pull/2009) +- Settings UI: Number with configurable steps [\#2001](https://github.com/pypeclub/OpenPype/pull/2001) +- Moving project folder structure creation out of ftrack module \#1989 [\#1996](https://github.com/pypeclub/OpenPype/pull/1996) +- Configurable items for providers without Settings [\#1987](https://github.com/pypeclub/OpenPype/pull/1987) +- Global: Example addons [\#1986](https://github.com/pypeclub/OpenPype/pull/1986) +- Standalone Publisher: Extract harmony zip handle workfile template [\#1982](https://github.com/pypeclub/OpenPype/pull/1982) +- Settings UI: Number sliders [\#1978](https://github.com/pypeclub/OpenPype/pull/1978) +- Workfiles: Support more workfile templates [\#1966](https://github.com/pypeclub/OpenPype/pull/1966) +- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964) +- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963) +- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962) +- Global: Settings defined by Addons/Modules [\#1959](https://github.com/pypeclub/OpenPype/pull/1959) +- CI: change release numbering triggers [\#1954](https://github.com/pypeclub/OpenPype/pull/1954) +- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949) +- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948) +- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942) +- OpenPype: Add version validation and `--headless` mode and update progress 🔄 [\#1939](https://github.com/pypeclub/OpenPype/pull/1939) +- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915) +- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910) +- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888) +- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872) + +**🐛 Bug fixes** + +- Workfiles tool: Task selection [\#2040](https://github.com/pypeclub/OpenPype/pull/2040) +- Ftrack: Delete old versions missing settings key [\#2037](https://github.com/pypeclub/OpenPype/pull/2037) +- Nuke: typo on a button [\#2034](https://github.com/pypeclub/OpenPype/pull/2034) +- Hiero: Fix "none" named tags [\#2033](https://github.com/pypeclub/OpenPype/pull/2033) +- FFmpeg: Subprocess arguments as list [\#2032](https://github.com/pypeclub/OpenPype/pull/2032) +- General: Fix Python 2 breaking line [\#2016](https://github.com/pypeclub/OpenPype/pull/2016) +- Bugfix/webpublisher task type [\#2006](https://github.com/pypeclub/OpenPype/pull/2006) +- Nuke thumbnails generated from middle of the sequence [\#1992](https://github.com/pypeclub/OpenPype/pull/1992) +- Nuke: last version from path gets correct version [\#1990](https://github.com/pypeclub/OpenPype/pull/1990) +- nuke, resolve, hiero: precollector order lest then 0.5 [\#1984](https://github.com/pypeclub/OpenPype/pull/1984) +- Last workfile with multiple work templates [\#1981](https://github.com/pypeclub/OpenPype/pull/1981) +- Collectors order [\#1977](https://github.com/pypeclub/OpenPype/pull/1977) +- Stop timer was within validator order range. [\#1975](https://github.com/pypeclub/OpenPype/pull/1975) +- Ftrack: arrow submodule has https url source [\#1974](https://github.com/pypeclub/OpenPype/pull/1974) +- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972) +- Deadline: Houdini plugins in different hierarchy [\#1970](https://github.com/pypeclub/OpenPype/pull/1970) +- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967) +- Global: ExtractJpeg can handle filepaths with spaces [\#1961](https://github.com/pypeclub/OpenPype/pull/1961) +- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960) + +**Merged pull requests:** + +- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958) +- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933) ## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.1-nightly.1...3.3.1) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.0...3.3.1) + +**🐛 Bug fixes** + +- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946) +- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945) +- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941) +- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928) ## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.3.0-nightly.11...3.3.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...3.3.0) + +### 📖 Documentation + +- Standalone Publish of textures family [\#1834](https://github.com/pypeclub/OpenPype/pull/1834) + +**🆕 New features** + +- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932) +- Maya: Scene patching 🩹on submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923) +- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901) + +**🚀 Enhancements** + +- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940) +- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927) +- Check for missing ✨ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925) +- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920) +- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919) +- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911) +- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900) +- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899) +- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898) +- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892) +- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891) +- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886) +- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885) +- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882) +- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869) +- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868) +- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867) +- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865) +- Anatomy schema validation [\#1864](https://github.com/pypeclub/OpenPype/pull/1864) +- Ftrack prepare project structure [\#1861](https://github.com/pypeclub/OpenPype/pull/1861) +- Maya: support for configurable `dirmap` 🗺️ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859) +- Independent general environments [\#1853](https://github.com/pypeclub/OpenPype/pull/1853) +- TVPaint Start Frame [\#1844](https://github.com/pypeclub/OpenPype/pull/1844) +- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843) +- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838) +- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829) +- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823) +- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819) +- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815) +- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797) +- Maya: Shader name validation [\#1762](https://github.com/pypeclub/OpenPype/pull/1762) + +**🐛 Bug fixes** + +- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935) +- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930) +- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929) +- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926) +- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922) +- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917) +- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916) +- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914) +- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906) +- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904) +- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903) +- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902) +- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893) +- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890) +- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889) +- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880) +- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862) +- imageio: fix grouping [\#1856](https://github.com/pypeclub/OpenPype/pull/1856) +- Maya: don't add reference members as connections to the container set 📦 [\#1855](https://github.com/pypeclub/OpenPype/pull/1855) +- publisher: missing version in subset prop [\#1849](https://github.com/pypeclub/OpenPype/pull/1849) +- Ftrack type error fix in sync to avalon event handler [\#1845](https://github.com/pypeclub/OpenPype/pull/1845) +- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841) +- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836) +- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813) +- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809) +- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806) +- Houdini colector formatting keys fix [\#1802](https://github.com/pypeclub/OpenPype/pull/1802) +- Settings error dialog on show [\#1798](https://github.com/pypeclub/OpenPype/pull/1798) +- Application launch stdout/stderr in GUI build [\#1684](https://github.com/pypeclub/OpenPype/pull/1684) +- Nuke: re-use instance nodes output path [\#1577](https://github.com/pypeclub/OpenPype/pull/1577) + +**Merged pull requests:** + +- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937) +- Add support for multiple Deadline ☠️➖ servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905) +- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space 🚀 [\#1863](https://github.com/pypeclub/OpenPype/pull/1863) +- Maya: expected files -\> render products ⚙️ overhaul [\#1812](https://github.com/pypeclub/OpenPype/pull/1812) +- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811) ## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.2.0-nightly.7...3.2.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.4...3.2.0) + +### 📖 Documentation + +- Fix: staging and `--use-version` option [\#1786](https://github.com/pypeclub/OpenPype/pull/1786) +- Subset template and TVPaint subset template docs [\#1717](https://github.com/pypeclub/OpenPype/pull/1717) +- Overscan color extract review [\#1701](https://github.com/pypeclub/OpenPype/pull/1701) + +**🚀 Enhancements** + +- Nuke: ftrack family plugin settings preset [\#1805](https://github.com/pypeclub/OpenPype/pull/1805) +- Standalone publisher last project [\#1799](https://github.com/pypeclub/OpenPype/pull/1799) +- Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795) +- Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777) +- Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776) +- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769) +- Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766) +- Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763) +- Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757) +- Deadline: Nuke submission additional attributes [\#1756](https://github.com/pypeclub/OpenPype/pull/1756) +- Settings schema without prefill [\#1753](https://github.com/pypeclub/OpenPype/pull/1753) +- Settings Hosts enum [\#1739](https://github.com/pypeclub/OpenPype/pull/1739) +- Validate containers settings [\#1736](https://github.com/pypeclub/OpenPype/pull/1736) +- PS - added loader from sequence [\#1726](https://github.com/pypeclub/OpenPype/pull/1726) +- Autoupdate launcher [\#1725](https://github.com/pypeclub/OpenPype/pull/1725) +- Toggle Ftrack upload in StandalonePublisher [\#1708](https://github.com/pypeclub/OpenPype/pull/1708) +- Nuke: Prerender Frame Range by default [\#1699](https://github.com/pypeclub/OpenPype/pull/1699) +- Smoother edges of color triangle [\#1695](https://github.com/pypeclub/OpenPype/pull/1695) + +**🐛 Bug fixes** + +- nuke: fixing wrong name of family folder when `used existing frames` [\#1803](https://github.com/pypeclub/OpenPype/pull/1803) +- Collect ftrack family bugs [\#1801](https://github.com/pypeclub/OpenPype/pull/1801) +- Invitee email can be None which break the Ftrack commit. [\#1788](https://github.com/pypeclub/OpenPype/pull/1788) +- Otio unrelated error on import [\#1782](https://github.com/pypeclub/OpenPype/pull/1782) +- FFprobe streams order [\#1775](https://github.com/pypeclub/OpenPype/pull/1775) +- Fix - single file files are str only, cast it to list to count properly [\#1772](https://github.com/pypeclub/OpenPype/pull/1772) +- Environments in app executable for MacOS [\#1768](https://github.com/pypeclub/OpenPype/pull/1768) +- Project specific environments [\#1767](https://github.com/pypeclub/OpenPype/pull/1767) +- Settings UI with refresh button [\#1764](https://github.com/pypeclub/OpenPype/pull/1764) +- Standalone publisher thumbnail extractor fix [\#1761](https://github.com/pypeclub/OpenPype/pull/1761) +- Anatomy others templates don't cause crash [\#1758](https://github.com/pypeclub/OpenPype/pull/1758) +- Backend acre module commit update [\#1745](https://github.com/pypeclub/OpenPype/pull/1745) +- hiero: precollect instances failing when audio selected [\#1743](https://github.com/pypeclub/OpenPype/pull/1743) +- Hiero: creator instance error [\#1742](https://github.com/pypeclub/OpenPype/pull/1742) +- Nuke: fixing render creator for no selection format failing [\#1741](https://github.com/pypeclub/OpenPype/pull/1741) +- StandalonePublisher: failing collector for editorial [\#1738](https://github.com/pypeclub/OpenPype/pull/1738) +- Local settings UI crash on missing defaults [\#1737](https://github.com/pypeclub/OpenPype/pull/1737) +- TVPaint white background on thumbnail [\#1735](https://github.com/pypeclub/OpenPype/pull/1735) +- Ftrack missing custom attribute message [\#1734](https://github.com/pypeclub/OpenPype/pull/1734) +- Launcher project changes [\#1733](https://github.com/pypeclub/OpenPype/pull/1733) +- Ftrack sync status [\#1732](https://github.com/pypeclub/OpenPype/pull/1732) +- TVPaint use layer name for default variant [\#1724](https://github.com/pypeclub/OpenPype/pull/1724) +- Default subset template for TVPaint review and workfile families [\#1716](https://github.com/pypeclub/OpenPype/pull/1716) +- Maya: Extract review hotfix [\#1714](https://github.com/pypeclub/OpenPype/pull/1714) +- Settings: Imageio improving granularity [\#1711](https://github.com/pypeclub/OpenPype/pull/1711) +- Application without executables [\#1679](https://github.com/pypeclub/OpenPype/pull/1679) +- Unreal: launching on Linux [\#1672](https://github.com/pypeclub/OpenPype/pull/1672) + +**Merged pull requests:** + +- Bump prismjs from 1.23.0 to 1.24.0 in /website [\#1773](https://github.com/pypeclub/OpenPype/pull/1773) +- TVPaint ftrack family [\#1755](https://github.com/pypeclub/OpenPype/pull/1755) ## [2.18.4](https://github.com/pypeclub/OpenPype/tree/2.18.4) (2021-06-24) @@ -204,7 +2191,7 @@ ## [2.18.3](https://github.com/pypeclub/OpenPype/tree/2.18.3) (2021-06-23) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.2.0-nightly.2...2.18.3) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.2...2.18.3) ## [2.18.2](https://github.com/pypeclub/OpenPype/tree/2.18.2) (2021-06-16) @@ -212,9 +2199,47 @@ ## [3.1.0](https://github.com/pypeclub/OpenPype/tree/3.1.0) (2021-06-15) -[Full Changelog](https://github.com/pypeclub/OpenPype/compare/CI/3.1.0-nightly.4...3.1.0) +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.0.0...3.1.0) -# Changelog +### 📖 Documentation + +- Feature Slack integration [\#1657](https://github.com/pypeclub/OpenPype/pull/1657) + +**🚀 Enhancements** + +- Log Viewer with OpenPype style [\#1703](https://github.com/pypeclub/OpenPype/pull/1703) +- Scrolling in OpenPype info widget [\#1702](https://github.com/pypeclub/OpenPype/pull/1702) +- OpenPype style in modules [\#1694](https://github.com/pypeclub/OpenPype/pull/1694) +- Sort applications and tools alphabetically in Settings UI [\#1689](https://github.com/pypeclub/OpenPype/pull/1689) +- \#683 - Validate Frame Range in Standalone Publisher [\#1683](https://github.com/pypeclub/OpenPype/pull/1683) +- Hiero: old container versions identify with red color [\#1682](https://github.com/pypeclub/OpenPype/pull/1682) +- Project Manger: Default name column width [\#1669](https://github.com/pypeclub/OpenPype/pull/1669) +- Remove outline in stylesheet [\#1667](https://github.com/pypeclub/OpenPype/pull/1667) +- TVPaint: Creator take layer name as default value for subset variant [\#1663](https://github.com/pypeclub/OpenPype/pull/1663) +- TVPaint custom subset template [\#1662](https://github.com/pypeclub/OpenPype/pull/1662) +- Editorial: conform assets validator [\#1659](https://github.com/pypeclub/OpenPype/pull/1659) +- Nuke - Publish simplification [\#1653](https://github.com/pypeclub/OpenPype/pull/1653) +- \#1333 - added tooltip hints to Pyblish buttons [\#1649](https://github.com/pypeclub/OpenPype/pull/1649) + +**🐛 Bug fixes** + +- Nuke: broken publishing rendered frames [\#1707](https://github.com/pypeclub/OpenPype/pull/1707) +- Standalone publisher Thumbnail export args [\#1705](https://github.com/pypeclub/OpenPype/pull/1705) +- Bad zip can break OpenPype start [\#1691](https://github.com/pypeclub/OpenPype/pull/1691) +- Hiero: published whole edit mov [\#1687](https://github.com/pypeclub/OpenPype/pull/1687) +- Ftrack subprocess handle of stdout/stderr [\#1675](https://github.com/pypeclub/OpenPype/pull/1675) +- Settings list race condifiton and mutable dict list conversion [\#1671](https://github.com/pypeclub/OpenPype/pull/1671) +- Mac launch arguments fix [\#1660](https://github.com/pypeclub/OpenPype/pull/1660) +- Fix missing dbm python module [\#1652](https://github.com/pypeclub/OpenPype/pull/1652) +- Transparent branches in view on Mac [\#1648](https://github.com/pypeclub/OpenPype/pull/1648) +- Add asset on task item [\#1646](https://github.com/pypeclub/OpenPype/pull/1646) +- Project manager save and queue [\#1645](https://github.com/pypeclub/OpenPype/pull/1645) +- New project anatomy values [\#1644](https://github.com/pypeclub/OpenPype/pull/1644) +- Farm publishing: check if published items do exist [\#1573](https://github.com/pypeclub/OpenPype/pull/1573) + +**Merged pull requests:** + +- Bump normalize-url from 4.5.0 to 4.5.1 in /website [\#1686](https://github.com/pypeclub/OpenPype/pull/1686) ## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0) @@ -227,12 +2252,12 @@ - Easy to add Application versions. - Per Project Environment and plugin management. - Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family. -- Configurable publish plugins. +- Configurable publish plugins. - Options to make any validator or extractor, optional or disabled. - Color Management is now unified under anatomy settings. - Subset naming and grouping is fully configurable. - All project attributes can now be set directly in OpenPype settings. -- Studio Setting can be locked to prevent unwanted artist changes. +- Studio Setting can be locked to prevent unwanted artist changes. - You can now add per project and per task type templates for workfile initialization in most hosts. - Too many other individual configurable option to list in this changelog :) @@ -990,8 +3015,6 @@ - Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729) -# Changelog - ## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15) [Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6) @@ -1781,9 +3804,4 @@ A large cleanup release. Most of the change are under the hood. - _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner - - - - - - +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/Dockerfile b/Dockerfile index 7232223c3c..46dd9e5c0a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # Build Pype docker image FROM ubuntu:focal AS builder -ARG OPENPYPE_PYTHON_VERSION=3.7.12 +ARG OPENPYPE_PYTHON_VERSION=3.9.12 ARG BUILD_DATE ARG VERSION diff --git a/Dockerfile.centos7 b/Dockerfile.centos7 index be3db58b62..5eb2f478ea 100644 --- a/Dockerfile.centos7 +++ b/Dockerfile.centos7 @@ -1,6 +1,6 @@ # Build Pype docker image FROM centos:7 AS builder -ARG OPENPYPE_PYTHON_VERSION=3.7.12 +ARG OPENPYPE_PYTHON_VERSION=3.9.12 LABEL org.opencontainers.image.name="pypeclub/openpype" LABEL org.opencontainers.image.title="OpenPype Docker Image" @@ -96,11 +96,11 @@ RUN source $HOME/.bashrc \ RUN source $HOME/.bashrc \ && bash ./tools/build.sh -RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.7/lib \ - && cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.7/lib \ - && cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.7/lib \ - && cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.7/lib \ - && cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.7/vendor/python/PySide2/Qt/lib +RUN cp /usr/lib64/libffi* ./build/exe.linux-x86_64-3.9/lib \ + && cp /usr/lib64/libssl* ./build/exe.linux-x86_64-3.9/lib \ + && cp /usr/lib64/libcrypto* ./build/exe.linux-x86_64-3.9/lib \ + && cp /root/.pyenv/versions/${OPENPYPE_PYTHON_VERSION}/lib/libpython* ./build/exe.linux-x86_64-3.9/lib \ + && cp /usr/lib64/libxcb* ./build/exe.linux-x86_64-3.9/vendor/python/PySide2/Qt/lib RUN cd /opt/openpype \ rm -rf ./vendor/bin diff --git a/Dockerfile.debian b/Dockerfile.debian new file mode 100644 index 0000000000..a53b5aa769 --- /dev/null +++ b/Dockerfile.debian @@ -0,0 +1,81 @@ +# Build Pype docker image +FROM debian:bullseye AS builder +ARG OPENPYPE_PYTHON_VERSION=3.9.12 +ARG BUILD_DATE +ARG VERSION + +LABEL maintainer="info@openpype.io" +LABEL description="Docker Image to build and run OpenPype under Ubuntu 20.04" +LABEL org.opencontainers.image.name="pypeclub/openpype" +LABEL org.opencontainers.image.title="OpenPype Docker Image" +LABEL org.opencontainers.image.url="https://openpype.io/" +LABEL org.opencontainers.image.source="https://github.com/pypeclub/OpenPype" +LABEL org.opencontainers.image.documentation="https://openpype.io/docs/system_introduction" +LABEL org.opencontainers.image.created=$BUILD_DATE +LABEL org.opencontainers.image.version=$VERSION + +USER root + +ARG DEBIAN_FRONTEND=noninteractive + +# update base +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + ca-certificates \ + bash \ + git \ + cmake \ + make \ + curl \ + wget \ + build-essential \ + libssl-dev \ + zlib1g-dev \ + libbz2-dev \ + libreadline-dev \ + libsqlite3-dev \ + llvm \ + libncursesw5-dev \ + xz-utils \ + tk-dev \ + libxml2-dev \ + libxmlsec1-dev \ + libffi-dev \ + liblzma-dev \ + patchelf + +SHELL ["/bin/bash", "-c"] + + +RUN mkdir /opt/openpype + +# download and install pyenv +RUN curl https://pyenv.run | bash \ + && echo 'export PATH="$HOME/.pyenv/bin:$PATH"'>> $HOME/init_pyenv.sh \ + && echo 'eval "$(pyenv init -)"' >> $HOME/init_pyenv.sh \ + && echo 'eval "$(pyenv virtualenv-init -)"' >> $HOME/init_pyenv.sh \ + && echo 'eval "$(pyenv init --path)"' >> $HOME/init_pyenv.sh + +# install python with pyenv +RUN source $HOME/init_pyenv.sh \ + && pyenv install ${OPENPYPE_PYTHON_VERSION} + +COPY . /opt/openpype/ + +RUN chmod +x /opt/openpype/tools/create_env.sh && chmod +x /opt/openpype/tools/build.sh + +WORKDIR /opt/openpype + +# set local python version +RUN cd /opt/openpype \ + && source $HOME/init_pyenv.sh \ + && pyenv local ${OPENPYPE_PYTHON_VERSION} + +# fetch third party tools/libraries +RUN source $HOME/init_pyenv.sh \ + && ./tools/create_env.sh \ + && ./tools/fetch_thirdparty_libs.sh + +# build openpype +RUN source $HOME/init_pyenv.sh \ + && bash ./tools/build.sh diff --git a/HISTORY.md b/HISTORY.md index 032f876aa3..543cf11513 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,5 +1,2247 @@ # Changelog +## [3.15.0](https://github.com/ynput/OpenPype/tree/3.15.0) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.10...3.15.0) + +**Deprecated:** + +- General: Fill default values of new publish template profiles [\#4245](https://github.com/ynput/OpenPype/pull/4245) + +### 📖 Documentation + +- documentation: Split tools into separate entries [\#4342](https://github.com/ynput/OpenPype/pull/4342) +- Documentation: Fix harmony docs [\#4301](https://github.com/ynput/OpenPype/pull/4301) +- Remove staging logic set by OpenPype version [\#3979](https://github.com/ynput/OpenPype/pull/3979) + +**🆕 New features** + +- General: Push to studio library [\#4284](https://github.com/ynput/OpenPype/pull/4284) +- Colorspace Management and Distribution [\#4195](https://github.com/ynput/OpenPype/pull/4195) +- Nuke: refactor to latest publisher workfow [\#4006](https://github.com/ynput/OpenPype/pull/4006) +- Update to Python 3.9 [\#3546](https://github.com/ynput/OpenPype/pull/3546) + +**🚀 Enhancements** + +- Unreal: Don't use mongo queries in 'ExistingLayoutLoader' [\#4356](https://github.com/ynput/OpenPype/pull/4356) +- General: Loader and Creator plugins can be disabled [\#4310](https://github.com/ynput/OpenPype/pull/4310) +- General: Unbind poetry version [\#4306](https://github.com/ynput/OpenPype/pull/4306) +- General: Enhanced enum def items [\#4295](https://github.com/ynput/OpenPype/pull/4295) +- Git: add pre-commit hooks [\#4289](https://github.com/ynput/OpenPype/pull/4289) +- Tray Publisher: Improve Online family functionality [\#4263](https://github.com/ynput/OpenPype/pull/4263) +- General: Update MacOs to PySide6 [\#4255](https://github.com/ynput/OpenPype/pull/4255) +- Build: update to Gazu in toml [\#4208](https://github.com/ynput/OpenPype/pull/4208) +- Global: adding imageio to settings [\#4158](https://github.com/ynput/OpenPype/pull/4158) +- Blender: added project settings for validator no colons in name [\#4149](https://github.com/ynput/OpenPype/pull/4149) +- Dockerfile for Debian Bullseye [\#4108](https://github.com/ynput/OpenPype/pull/4108) +- AfterEffects: publish multiple compositions [\#4092](https://github.com/ynput/OpenPype/pull/4092) +- AfterEffects: make new publisher default [\#4056](https://github.com/ynput/OpenPype/pull/4056) +- Photoshop: make new publisher default [\#4051](https://github.com/ynput/OpenPype/pull/4051) +- Feature/multiverse [\#4046](https://github.com/ynput/OpenPype/pull/4046) +- Tests: add support for deadline for automatic tests [\#3989](https://github.com/ynput/OpenPype/pull/3989) +- Add version to shortcut name [\#3906](https://github.com/ynput/OpenPype/pull/3906) +- TrayPublisher: Removed from experimental tools [\#3667](https://github.com/ynput/OpenPype/pull/3667) + +**🐛 Bug fixes** + +- change 3.7 to 3.9 in folder name [\#4354](https://github.com/ynput/OpenPype/pull/4354) +- PushToProject: Fix hierarchy of project change [\#4350](https://github.com/ynput/OpenPype/pull/4350) +- Fix photoshop workfile save-as [\#4347](https://github.com/ynput/OpenPype/pull/4347) +- Nuke Input process node sourcing improvements [\#4341](https://github.com/ynput/OpenPype/pull/4341) +- New publisher: Some validation plugin tweaks [\#4339](https://github.com/ynput/OpenPype/pull/4339) +- Harmony: fix unable to change workfile on Mac [\#4334](https://github.com/ynput/OpenPype/pull/4334) +- Global: fixing in-place source publishing for editorial [\#4333](https://github.com/ynput/OpenPype/pull/4333) +- General: Use class constants of QMessageBox [\#4332](https://github.com/ynput/OpenPype/pull/4332) +- TVPaint: Fix plugin for TVPaint 11.7 [\#4328](https://github.com/ynput/OpenPype/pull/4328) +- Exctract OTIO review has improved quality [\#4325](https://github.com/ynput/OpenPype/pull/4325) +- Ftrack: fix typos causing bugs in sync [\#4322](https://github.com/ynput/OpenPype/pull/4322) +- General: Python 2 compatibility of instance collector [\#4320](https://github.com/ynput/OpenPype/pull/4320) +- Slack: user groups speedup [\#4318](https://github.com/ynput/OpenPype/pull/4318) +- Maya: Bug - Multiverse extractor executed on plain animation family [\#4315](https://github.com/ynput/OpenPype/pull/4315) +- Fix run\_documentation.ps1 [\#4312](https://github.com/ynput/OpenPype/pull/4312) +- Nuke: new creators fixes [\#4308](https://github.com/ynput/OpenPype/pull/4308) +- General: missing comment on standalone and tray publisher [\#4303](https://github.com/ynput/OpenPype/pull/4303) +- AfterEffects: Fix for audio from mp4 layer [\#4296](https://github.com/ynput/OpenPype/pull/4296) +- General: Update gazu in poetry lock [\#4247](https://github.com/ynput/OpenPype/pull/4247) +- Bug: Fixing version detection and filtering in Igniter [\#3914](https://github.com/ynput/OpenPype/pull/3914) +- Bug: Create missing version dir [\#3903](https://github.com/ynput/OpenPype/pull/3903) + +**🔀 Refactored code** + +- Remove redundant export\_alembic method. [\#4293](https://github.com/ynput/OpenPype/pull/4293) +- Igniter: Use qtpy modules instead of Qt [\#4237](https://github.com/ynput/OpenPype/pull/4237) + +**Merged pull requests:** + +- Sort families by alphabetical order in the Create plugin [\#4346](https://github.com/ynput/OpenPype/pull/4346) +- Global: Validate unique subsets [\#4336](https://github.com/ynput/OpenPype/pull/4336) +- Maya: Collect instances preserve handles even if frameStart + frameEnd matches context [\#3437](https://github.com/ynput/OpenPype/pull/3437) + + +## [3.14.10](https://github.com/ynput/OpenPype/tree/3.14.10) + +[Full Changelog](https://github.com/ynput/OpenPype/compare/3.14.9...3.14.10) + +**🆕 New features** + +- Global | Nuke: Creator placeholders in workfile template builder [\#4266](https://github.com/ynput/OpenPype/pull/4266) +- Slack: Added dynamic message [\#4265](https://github.com/ynput/OpenPype/pull/4265) +- Blender: Workfile Loader [\#4234](https://github.com/ynput/OpenPype/pull/4234) +- Unreal: Publishing and Loading for UAssets [\#4198](https://github.com/ynput/OpenPype/pull/4198) +- Publish: register publishes without copying them [\#4157](https://github.com/ynput/OpenPype/pull/4157) + +**🚀 Enhancements** + +- General: Added install method with docstring to HostBase [\#4298](https://github.com/ynput/OpenPype/pull/4298) +- Traypublisher: simple editorial multiple edl [\#4248](https://github.com/ynput/OpenPype/pull/4248) +- General: Extend 'IPluginPaths' to have more available methods [\#4214](https://github.com/ynput/OpenPype/pull/4214) +- Refactorization of folder coloring [\#4211](https://github.com/ynput/OpenPype/pull/4211) +- Flame - loading multilayer with controlled layer names [\#4204](https://github.com/ynput/OpenPype/pull/4204) + +**🐛 Bug fixes** + +- Unreal: fix missing `maintained_selection` call [\#4300](https://github.com/ynput/OpenPype/pull/4300) +- Ftrack: Fix receive of host ip on MacOs [\#4288](https://github.com/ynput/OpenPype/pull/4288) +- SiteSync: sftp connection failing when shouldnt be tested [\#4278](https://github.com/ynput/OpenPype/pull/4278) +- Deadline: fix default value for passing mongo url [\#4275](https://github.com/ynput/OpenPype/pull/4275) +- Scene Manager: Fix variable name [\#4268](https://github.com/ynput/OpenPype/pull/4268) +- Slack: notification fails because of missing published path [\#4264](https://github.com/ynput/OpenPype/pull/4264) +- hiero: creator gui with min max [\#4257](https://github.com/ynput/OpenPype/pull/4257) +- NiceCheckbox: Fix checker positioning in Python 2 [\#4253](https://github.com/ynput/OpenPype/pull/4253) +- Publisher: Fix 'CreatorType' not equal for Python 2 DCCs [\#4249](https://github.com/ynput/OpenPype/pull/4249) +- Deadline: fix dependencies [\#4242](https://github.com/ynput/OpenPype/pull/4242) +- Houdini: hotfix instance data access [\#4236](https://github.com/ynput/OpenPype/pull/4236) +- bugfix/image plane load error [\#4222](https://github.com/ynput/OpenPype/pull/4222) +- Hiero: thumbnail from multilayer exr [\#4209](https://github.com/ynput/OpenPype/pull/4209) + +**🔀 Refactored code** + +- Resolve: Use qtpy in Resolve [\#4254](https://github.com/ynput/OpenPype/pull/4254) +- Houdini: Use qtpy in Houdini [\#4252](https://github.com/ynput/OpenPype/pull/4252) +- Max: Use qtpy in Max [\#4251](https://github.com/ynput/OpenPype/pull/4251) +- Maya: Use qtpy in Maya [\#4250](https://github.com/ynput/OpenPype/pull/4250) +- Hiero: Use qtpy in Hiero [\#4240](https://github.com/ynput/OpenPype/pull/4240) +- Nuke: Use qtpy in Nuke [\#4239](https://github.com/ynput/OpenPype/pull/4239) +- Flame: Use qtpy in flame [\#4238](https://github.com/ynput/OpenPype/pull/4238) +- General: Legacy io not used in global plugins [\#4134](https://github.com/ynput/OpenPype/pull/4134) + +**Merged pull requests:** + +- Bump json5 from 1.0.1 to 1.0.2 in /website [\#4292](https://github.com/ynput/OpenPype/pull/4292) +- Maya: Fix validate frame range repair + fix create render with deadline disabled [\#4279](https://github.com/ynput/OpenPype/pull/4279) + + +## [3.14.9](https://github.com/pypeclub/OpenPype/tree/3.14.9) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.8...3.14.9) + +### 📖 Documentation + +- Documentation: Testing on Deadline [\#4185](https://github.com/pypeclub/OpenPype/pull/4185) +- Consistent Python version [\#4160](https://github.com/pypeclub/OpenPype/pull/4160) + +**🆕 New features** + +- Feature/op 4397 gl tf extractor for maya [\#4192](https://github.com/pypeclub/OpenPype/pull/4192) +- Maya: Extractor for Unreal SkeletalMesh [\#4174](https://github.com/pypeclub/OpenPype/pull/4174) +- 3dsmax: integration [\#4168](https://github.com/pypeclub/OpenPype/pull/4168) +- Blender: Extract Alembic Animations [\#4128](https://github.com/pypeclub/OpenPype/pull/4128) +- Unreal: Load Alembic Animations [\#4127](https://github.com/pypeclub/OpenPype/pull/4127) + +**🚀 Enhancements** + +- Houdini: Use new interface class name for publish host [\#4220](https://github.com/pypeclub/OpenPype/pull/4220) +- General: Default command for headless mode is interactive [\#4203](https://github.com/pypeclub/OpenPype/pull/4203) +- Maya: Enhanced ASS publishing [\#4196](https://github.com/pypeclub/OpenPype/pull/4196) +- Feature/op 3924 implement ass extractor [\#4188](https://github.com/pypeclub/OpenPype/pull/4188) +- File transactions: Source path is destination path [\#4184](https://github.com/pypeclub/OpenPype/pull/4184) +- Deadline: improve environment processing [\#4182](https://github.com/pypeclub/OpenPype/pull/4182) +- General: Comment per instance in Publisher [\#4178](https://github.com/pypeclub/OpenPype/pull/4178) +- Ensure Mongo database directory exists in Windows. [\#4166](https://github.com/pypeclub/OpenPype/pull/4166) +- Note about unrestricted execution on Windows. [\#4161](https://github.com/pypeclub/OpenPype/pull/4161) +- Maya: Enable thumbnail transparency on extraction. [\#4147](https://github.com/pypeclub/OpenPype/pull/4147) +- Maya: Disable viewport Pan/Zoom on playblast extraction. [\#4146](https://github.com/pypeclub/OpenPype/pull/4146) +- Maya: Optional viewport refresh on pointcache extraction [\#4144](https://github.com/pypeclub/OpenPype/pull/4144) +- CelAction: refactory integration to current openpype [\#4140](https://github.com/pypeclub/OpenPype/pull/4140) +- Maya: create and publish bounding box geometry [\#4131](https://github.com/pypeclub/OpenPype/pull/4131) +- Changed the UOpenPypePublishInstance to use the UDataAsset class [\#4124](https://github.com/pypeclub/OpenPype/pull/4124) +- General: Collection Audio speed up [\#4110](https://github.com/pypeclub/OpenPype/pull/4110) +- Maya: keep existing AOVs when creating render instance [\#4087](https://github.com/pypeclub/OpenPype/pull/4087) +- General: Oiio conversion multipart fix [\#4060](https://github.com/pypeclub/OpenPype/pull/4060) + +**🐛 Bug fixes** + +- Publisher: Signal type issues in Python 2 DCCs [\#4230](https://github.com/pypeclub/OpenPype/pull/4230) +- Blender: Fix Layout Family Versioning [\#4228](https://github.com/pypeclub/OpenPype/pull/4228) +- Blender: Fix Create Camera "Use selection" [\#4226](https://github.com/pypeclub/OpenPype/pull/4226) +- TrayPublisher - join needs list [\#4224](https://github.com/pypeclub/OpenPype/pull/4224) +- General: Event callbacks pass event to callbacks as expected [\#4210](https://github.com/pypeclub/OpenPype/pull/4210) +- Build:Revert .toml update of Gazu [\#4207](https://github.com/pypeclub/OpenPype/pull/4207) +- Nuke: fixed imageio node overrides subset filter [\#4202](https://github.com/pypeclub/OpenPype/pull/4202) +- Maya: pointcache [\#4201](https://github.com/pypeclub/OpenPype/pull/4201) +- Unreal: Support for Unreal Engine 5.1 [\#4199](https://github.com/pypeclub/OpenPype/pull/4199) +- General: Integrate thumbnail looks for thumbnail to multiple places [\#4181](https://github.com/pypeclub/OpenPype/pull/4181) +- Various minor bugfixes [\#4172](https://github.com/pypeclub/OpenPype/pull/4172) +- Nuke/Hiero: Remove tkinter library paths before launch [\#4171](https://github.com/pypeclub/OpenPype/pull/4171) +- Flame: vertical alignment of layers [\#4169](https://github.com/pypeclub/OpenPype/pull/4169) +- Nuke: correct detection of viewer and display [\#4165](https://github.com/pypeclub/OpenPype/pull/4165) +- Settings UI: Don't create QApplication if already exists [\#4156](https://github.com/pypeclub/OpenPype/pull/4156) +- General: Extract review handle start offset of sequences [\#4152](https://github.com/pypeclub/OpenPype/pull/4152) +- Maya: Maintain time connections on Alembic update. [\#4143](https://github.com/pypeclub/OpenPype/pull/4143) + +**🔀 Refactored code** + +- General: Use qtpy in modules and hosts UIs which are running in OpenPype process [\#4225](https://github.com/pypeclub/OpenPype/pull/4225) +- Tools: Use qtpy instead of Qt in standalone tools [\#4223](https://github.com/pypeclub/OpenPype/pull/4223) +- General: Use qtpy in settings UI [\#4215](https://github.com/pypeclub/OpenPype/pull/4215) + +**Merged pull requests:** + +- layout publish more than one container issue [\#4098](https://github.com/pypeclub/OpenPype/pull/4098) + +## [3.14.8](https://github.com/pypeclub/OpenPype/tree/3.14.8) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.7...3.14.8) + +**🚀 Enhancements** + +- General: Refactored extract hierarchy plugin [\#4139](https://github.com/pypeclub/OpenPype/pull/4139) +- General: Find executable enhancement [\#4137](https://github.com/pypeclub/OpenPype/pull/4137) +- Ftrack: Reset session before instance processing [\#4129](https://github.com/pypeclub/OpenPype/pull/4129) +- Ftrack: Editorial asset sync issue [\#4126](https://github.com/pypeclub/OpenPype/pull/4126) +- Deadline: Build version resolving [\#4115](https://github.com/pypeclub/OpenPype/pull/4115) +- Houdini: New Publisher [\#3046](https://github.com/pypeclub/OpenPype/pull/3046) +- Fix: Standalone Publish Directories [\#4148](https://github.com/pypeclub/OpenPype/pull/4148) + +**🐛 Bug fixes** + +- Ftrack: Fix occational double parents issue [\#4153](https://github.com/pypeclub/OpenPype/pull/4153) +- General: Maketx executable issue [\#4136](https://github.com/pypeclub/OpenPype/pull/4136) +- Maya: Looks - add all connections [\#4135](https://github.com/pypeclub/OpenPype/pull/4135) +- General: Fix variable check in collect anatomy instance data [\#4117](https://github.com/pypeclub/OpenPype/pull/4117) + +## [3.14.7](https://github.com/pypeclub/OpenPype/tree/3.14.7) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.6...3.14.7) + +**🆕 New features** + +- Hiero: loading effect family to timeline [\#4055](https://github.com/pypeclub/OpenPype/pull/4055) + +**🚀 Enhancements** + +- Photoshop: bug with pop-up window on Instance Creator [\#4121](https://github.com/pypeclub/OpenPype/pull/4121) +- Publisher: Open on specific tab [\#4120](https://github.com/pypeclub/OpenPype/pull/4120) +- Publisher: Hide unknown publish values [\#4116](https://github.com/pypeclub/OpenPype/pull/4116) +- Ftrack: Event server status give more information about version locations [\#4112](https://github.com/pypeclub/OpenPype/pull/4112) +- General: Allow higher numbers in frames and clips [\#4101](https://github.com/pypeclub/OpenPype/pull/4101) +- Publisher: Settings for validate frame range [\#4097](https://github.com/pypeclub/OpenPype/pull/4097) +- Publisher: Ignore escape button [\#4090](https://github.com/pypeclub/OpenPype/pull/4090) +- Flame: Loading clip with native colorspace resolved from mapping [\#4079](https://github.com/pypeclub/OpenPype/pull/4079) +- General: Extract review single frame output [\#4064](https://github.com/pypeclub/OpenPype/pull/4064) +- Publisher: Prepared common function for instance data cache [\#4063](https://github.com/pypeclub/OpenPype/pull/4063) +- Publisher: Easy access to publish page from create page [\#4058](https://github.com/pypeclub/OpenPype/pull/4058) +- General/TVPaint: Attribute defs dialog [\#4052](https://github.com/pypeclub/OpenPype/pull/4052) +- Publisher: Better reset defer [\#4048](https://github.com/pypeclub/OpenPype/pull/4048) +- Publisher: Add thumbnail sources [\#4042](https://github.com/pypeclub/OpenPype/pull/4042) + +**🐛 Bug fixes** + +- General: Move default settings for template name [\#4119](https://github.com/pypeclub/OpenPype/pull/4119) +- Slack: notification fail in new tray publisher [\#4118](https://github.com/pypeclub/OpenPype/pull/4118) +- Nuke: loaded nodes set to first tab [\#4114](https://github.com/pypeclub/OpenPype/pull/4114) +- Nuke: load image first frame [\#4113](https://github.com/pypeclub/OpenPype/pull/4113) +- Files Widget: Ignore case sensitivity of extensions [\#4096](https://github.com/pypeclub/OpenPype/pull/4096) +- Webpublisher: extension is lowercased in Setting and in uploaded files [\#4095](https://github.com/pypeclub/OpenPype/pull/4095) +- Publish Report Viewer: Fix small bugs [\#4086](https://github.com/pypeclub/OpenPype/pull/4086) +- Igniter: fix regex to match semver better [\#4085](https://github.com/pypeclub/OpenPype/pull/4085) +- Maya: aov filtering [\#4083](https://github.com/pypeclub/OpenPype/pull/4083) +- Flame/Flare: Loading to multiple batches [\#4080](https://github.com/pypeclub/OpenPype/pull/4080) +- hiero: creator from settings with set maximum [\#4077](https://github.com/pypeclub/OpenPype/pull/4077) +- Nuke: resolve hashes in file name only for frame token [\#4074](https://github.com/pypeclub/OpenPype/pull/4074) +- Publisher: Fix cache of asset docs [\#4070](https://github.com/pypeclub/OpenPype/pull/4070) +- Webpublisher: cleanup wp extract thumbnail [\#4067](https://github.com/pypeclub/OpenPype/pull/4067) +- Settings UI: Locked setting can't bypass lock [\#4066](https://github.com/pypeclub/OpenPype/pull/4066) +- Loader: Fix comparison of repre name [\#4053](https://github.com/pypeclub/OpenPype/pull/4053) +- Deadline: Extract environment subprocess failure [\#4050](https://github.com/pypeclub/OpenPype/pull/4050) + +**🔀 Refactored code** + +- General: Collect entities plugin minor changes [\#4089](https://github.com/pypeclub/OpenPype/pull/4089) +- General: Direct interfaces import [\#4065](https://github.com/pypeclub/OpenPype/pull/4065) + +**Merged pull requests:** + +- Bump loader-utils from 1.4.1 to 1.4.2 in /website [\#4100](https://github.com/pypeclub/OpenPype/pull/4100) +- Online family for Tray Publisher [\#4093](https://github.com/pypeclub/OpenPype/pull/4093) +- Bump loader-utils from 1.4.0 to 1.4.1 in /website [\#4081](https://github.com/pypeclub/OpenPype/pull/4081) +- remove underscore from subset name [\#4059](https://github.com/pypeclub/OpenPype/pull/4059) +- Alembic Loader as Arnold Standin [\#4047](https://github.com/pypeclub/OpenPype/pull/4047) + +## [3.14.6](https://github.com/pypeclub/OpenPype/tree/3.14.6) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.5...3.14.6) + +### 📖 Documentation + +- Documentation: Minor updates to dev\_requirements.md [\#4025](https://github.com/pypeclub/OpenPype/pull/4025) + +**🆕 New features** + +- Nuke: add 13.2 variant [\#4041](https://github.com/pypeclub/OpenPype/pull/4041) + +**🚀 Enhancements** + +- Publish Report Viewer: Store reports locally on machine [\#4040](https://github.com/pypeclub/OpenPype/pull/4040) +- General: More specific error in burnins script [\#4026](https://github.com/pypeclub/OpenPype/pull/4026) +- General: Extract review does not crash with old settings overrides [\#4023](https://github.com/pypeclub/OpenPype/pull/4023) +- Publisher: Convertors for legacy instances [\#4020](https://github.com/pypeclub/OpenPype/pull/4020) +- workflows: adding milestone creator and assigner [\#4018](https://github.com/pypeclub/OpenPype/pull/4018) +- Publisher: Catch creator errors [\#4015](https://github.com/pypeclub/OpenPype/pull/4015) + +**🐛 Bug fixes** + +- Hiero - effect collection fixes [\#4038](https://github.com/pypeclub/OpenPype/pull/4038) +- Nuke - loader clip correct hash conversion in path [\#4037](https://github.com/pypeclub/OpenPype/pull/4037) +- Maya: Soft fail when applying capture preset [\#4034](https://github.com/pypeclub/OpenPype/pull/4034) +- Igniter: handle missing directory [\#4032](https://github.com/pypeclub/OpenPype/pull/4032) +- StandalonePublisher: Fix thumbnail publishing [\#4029](https://github.com/pypeclub/OpenPype/pull/4029) +- Experimental Tools: Fix publisher import [\#4027](https://github.com/pypeclub/OpenPype/pull/4027) +- Houdini: fix wrong path in ASS loader [\#4016](https://github.com/pypeclub/OpenPype/pull/4016) + +**🔀 Refactored code** + +- General: Import lib functions from lib [\#4017](https://github.com/pypeclub/OpenPype/pull/4017) + +## [3.14.5](https://github.com/pypeclub/OpenPype/tree/3.14.5) (2022-10-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.4...3.14.5) + +**🚀 Enhancements** + +- Maya: add OBJ extractor to model family [\#4021](https://github.com/pypeclub/OpenPype/pull/4021) +- Publish report viewer tool [\#4010](https://github.com/pypeclub/OpenPype/pull/4010) +- Nuke | Global: adding custom tags representation filtering [\#4009](https://github.com/pypeclub/OpenPype/pull/4009) +- Publisher: Create context has shared data for collection phase [\#3995](https://github.com/pypeclub/OpenPype/pull/3995) +- Resolve: updating to v18 compatibility [\#3986](https://github.com/pypeclub/OpenPype/pull/3986) + +**🐛 Bug fixes** + +- TrayPublisher: Fix missing argument [\#4019](https://github.com/pypeclub/OpenPype/pull/4019) +- General: Fix python 2 compatibility of ffmpeg and oiio tools discovery [\#4011](https://github.com/pypeclub/OpenPype/pull/4011) + +**🔀 Refactored code** + +- Maya: Removed unused imports [\#4008](https://github.com/pypeclub/OpenPype/pull/4008) +- Unreal: Fix import of moved function [\#4007](https://github.com/pypeclub/OpenPype/pull/4007) +- Houdini: Change import of RepairAction [\#4005](https://github.com/pypeclub/OpenPype/pull/4005) +- Nuke/Hiero: Refactor openpype.api imports [\#4000](https://github.com/pypeclub/OpenPype/pull/4000) +- TVPaint: Defined with HostBase [\#3994](https://github.com/pypeclub/OpenPype/pull/3994) + +**Merged pull requests:** + +- Unreal: Remove redundant Creator stub [\#4012](https://github.com/pypeclub/OpenPype/pull/4012) +- Unreal: add `uproject` extension to Unreal project template [\#4004](https://github.com/pypeclub/OpenPype/pull/4004) +- Unreal: fix order of includes [\#4002](https://github.com/pypeclub/OpenPype/pull/4002) +- Fusion: Implement backwards compatibility \(+/- Fusion 17.2\) [\#3958](https://github.com/pypeclub/OpenPype/pull/3958) + +## [3.14.4](https://github.com/pypeclub/OpenPype/tree/3.14.4) (2022-10-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.3...3.14.4) + +**🆕 New features** + +- Webpublisher: use max next published version number for all items in batch [\#3961](https://github.com/pypeclub/OpenPype/pull/3961) +- General: Control Thumbnail integration via explicit configuration profiles [\#3951](https://github.com/pypeclub/OpenPype/pull/3951) + +**🚀 Enhancements** + +- Publisher: Multiselection in card view [\#3993](https://github.com/pypeclub/OpenPype/pull/3993) +- TrayPublisher: Original Basename cause crash too early [\#3990](https://github.com/pypeclub/OpenPype/pull/3990) +- Tray Publisher: add `originalBasename` data to simple creators [\#3988](https://github.com/pypeclub/OpenPype/pull/3988) +- General: Custom paths to ffmpeg and OpenImageIO tools [\#3982](https://github.com/pypeclub/OpenPype/pull/3982) +- Integrate: Preserve existing subset group if instance does not set it for new version [\#3976](https://github.com/pypeclub/OpenPype/pull/3976) +- Publisher: Prepare publisher controller for remote publishing [\#3972](https://github.com/pypeclub/OpenPype/pull/3972) +- Maya: new style dataclasses in maya deadline submitter plugin [\#3968](https://github.com/pypeclub/OpenPype/pull/3968) +- Maya: Define preffered Qt bindings for Qt.py and qtpy [\#3963](https://github.com/pypeclub/OpenPype/pull/3963) +- Settings: Move imageio from project anatomy to project settings \[pypeclub\] [\#3959](https://github.com/pypeclub/OpenPype/pull/3959) +- TrayPublisher: Extract thumbnail for other families [\#3952](https://github.com/pypeclub/OpenPype/pull/3952) +- Publisher: Pass instance to subset name method on update [\#3949](https://github.com/pypeclub/OpenPype/pull/3949) +- General: Set root environments before DCC launch [\#3947](https://github.com/pypeclub/OpenPype/pull/3947) +- Refactor: changed legacy way to update database for Hero version integrate [\#3941](https://github.com/pypeclub/OpenPype/pull/3941) +- Maya: Moved plugin from global to maya [\#3939](https://github.com/pypeclub/OpenPype/pull/3939) +- Publisher: Create dialog is part of main window [\#3936](https://github.com/pypeclub/OpenPype/pull/3936) +- Fusion: Implement Alembic and FBX mesh loader [\#3927](https://github.com/pypeclub/OpenPype/pull/3927) + +**🐛 Bug fixes** + +- TrayPublisher: Disable sequences in batch mov creator [\#3996](https://github.com/pypeclub/OpenPype/pull/3996) +- Fix - tags might be missing on representation [\#3985](https://github.com/pypeclub/OpenPype/pull/3985) +- Resolve: Fix usage of functions from lib [\#3983](https://github.com/pypeclub/OpenPype/pull/3983) +- Maya: remove invalid prefix token for non-multipart outputs [\#3981](https://github.com/pypeclub/OpenPype/pull/3981) +- Ftrack: Fix schema cache for Python 2 [\#3980](https://github.com/pypeclub/OpenPype/pull/3980) +- Maya: add object to attr.s declaration [\#3973](https://github.com/pypeclub/OpenPype/pull/3973) +- Maya: Deadline OutputFilePath hack regression for Renderman [\#3950](https://github.com/pypeclub/OpenPype/pull/3950) +- Houdini: Fix validate workfile paths for non-parm file references [\#3948](https://github.com/pypeclub/OpenPype/pull/3948) +- Photoshop: missed sync published version of workfile with workfile [\#3946](https://github.com/pypeclub/OpenPype/pull/3946) +- Maya: Set default value for RenderSetupIncludeLights option [\#3944](https://github.com/pypeclub/OpenPype/pull/3944) +- Maya: fix regression of Renderman Deadline hack [\#3943](https://github.com/pypeclub/OpenPype/pull/3943) +- Kitsu: 2 fixes, nb\_frames and Shot type error [\#3940](https://github.com/pypeclub/OpenPype/pull/3940) +- Tray: Change order of attribute changes [\#3938](https://github.com/pypeclub/OpenPype/pull/3938) +- AttributeDefs: Fix crashing multivalue of files widget [\#3937](https://github.com/pypeclub/OpenPype/pull/3937) +- General: Fix links query on hero version [\#3900](https://github.com/pypeclub/OpenPype/pull/3900) +- Publisher: Files Drag n Drop cleanup [\#3888](https://github.com/pypeclub/OpenPype/pull/3888) + +**🔀 Refactored code** + +- Flame: Import lib functions from lib [\#3992](https://github.com/pypeclub/OpenPype/pull/3992) +- General: Fix deprecated warning in legacy creator [\#3978](https://github.com/pypeclub/OpenPype/pull/3978) +- Blender: Remove openpype api imports [\#3977](https://github.com/pypeclub/OpenPype/pull/3977) +- General: Use direct import of resources [\#3964](https://github.com/pypeclub/OpenPype/pull/3964) +- General: Direct settings imports [\#3934](https://github.com/pypeclub/OpenPype/pull/3934) +- General: import 'Logger' from 'openpype.lib' [\#3926](https://github.com/pypeclub/OpenPype/pull/3926) +- General: Remove deprecated functions from lib [\#3907](https://github.com/pypeclub/OpenPype/pull/3907) + +**Merged pull requests:** + +- Maya + Yeti: Load Yeti Cache fix frame number recognition [\#3942](https://github.com/pypeclub/OpenPype/pull/3942) +- Fusion: Implement callbacks to Fusion's event system thread [\#3928](https://github.com/pypeclub/OpenPype/pull/3928) +- Photoshop: create single frame image in Ftrack as review [\#3908](https://github.com/pypeclub/OpenPype/pull/3908) + +## [3.14.3](https://github.com/pypeclub/OpenPype/tree/3.14.3) (2022-10-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.2...3.14.3) + +**🚀 Enhancements** + +- Publisher: Enhancement proposals [\#3897](https://github.com/pypeclub/OpenPype/pull/3897) + +**🐛 Bug fixes** + +- Maya: Fix Render single camera validator [\#3929](https://github.com/pypeclub/OpenPype/pull/3929) +- Flame: loading multilayer exr to batch/reel is working [\#3901](https://github.com/pypeclub/OpenPype/pull/3901) +- Hiero: Fix inventory check on launch [\#3895](https://github.com/pypeclub/OpenPype/pull/3895) +- WebPublisher: Fix import after refactor [\#3891](https://github.com/pypeclub/OpenPype/pull/3891) + +**🔀 Refactored code** + +- Maya: Remove unused 'openpype.api' imports in plugins [\#3925](https://github.com/pypeclub/OpenPype/pull/3925) +- Resolve: Use new Extractor location [\#3918](https://github.com/pypeclub/OpenPype/pull/3918) +- Unreal: Use new Extractor location [\#3917](https://github.com/pypeclub/OpenPype/pull/3917) +- Flame: Use new Extractor location [\#3916](https://github.com/pypeclub/OpenPype/pull/3916) +- Houdini: Use new Extractor location [\#3894](https://github.com/pypeclub/OpenPype/pull/3894) +- Harmony: Use new Extractor location [\#3893](https://github.com/pypeclub/OpenPype/pull/3893) + +**Merged pull requests:** + +- Maya: Fix Scene Inventory possibly starting off-screen due to maya preferences [\#3923](https://github.com/pypeclub/OpenPype/pull/3923) + +## [3.14.2](https://github.com/pypeclub/OpenPype/tree/3.14.2) (2022-09-12) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.1...3.14.2) + +### 📖 Documentation + +- Documentation: Anatomy templates [\#3618](https://github.com/pypeclub/OpenPype/pull/3618) + +**🆕 New features** + +- Nuke: Build workfile by template [\#3763](https://github.com/pypeclub/OpenPype/pull/3763) +- Houdini: Publishing workfiles [\#3697](https://github.com/pypeclub/OpenPype/pull/3697) +- Global: making collect audio plugin global [\#3679](https://github.com/pypeclub/OpenPype/pull/3679) + +**🚀 Enhancements** + +- Flame: Adding Creator's retimed shot and handles switch [\#3826](https://github.com/pypeclub/OpenPype/pull/3826) +- Flame: OpenPype submenu to batch and media manager [\#3825](https://github.com/pypeclub/OpenPype/pull/3825) +- General: Better pixmap scaling [\#3809](https://github.com/pypeclub/OpenPype/pull/3809) +- Photoshop: attempt to speed up ExtractImage [\#3793](https://github.com/pypeclub/OpenPype/pull/3793) +- SyncServer: Added cli commands for sync server [\#3765](https://github.com/pypeclub/OpenPype/pull/3765) +- Kitsu: Drop 'entities root' setting. [\#3739](https://github.com/pypeclub/OpenPype/pull/3739) +- git: update gitignore [\#3722](https://github.com/pypeclub/OpenPype/pull/3722) +- Blender: Publisher collect workfile representation [\#3670](https://github.com/pypeclub/OpenPype/pull/3670) +- Maya: move set render settings menu entry [\#3669](https://github.com/pypeclub/OpenPype/pull/3669) +- Scene Inventory: Maya add actions to select from or to scene [\#3659](https://github.com/pypeclub/OpenPype/pull/3659) +- Scene Inventory: Add subsetGroup column [\#3658](https://github.com/pypeclub/OpenPype/pull/3658) + +**🐛 Bug fixes** + +- General: Fix Pattern access in client code [\#3828](https://github.com/pypeclub/OpenPype/pull/3828) +- Launcher: Skip opening last work file works for groups [\#3822](https://github.com/pypeclub/OpenPype/pull/3822) +- Maya: Publishing data key change [\#3811](https://github.com/pypeclub/OpenPype/pull/3811) +- Igniter: Fix status handling when version is already installed [\#3804](https://github.com/pypeclub/OpenPype/pull/3804) +- Resolve: Addon import is Python 2 compatible [\#3798](https://github.com/pypeclub/OpenPype/pull/3798) +- Hiero: retimed clip publishing is working [\#3792](https://github.com/pypeclub/OpenPype/pull/3792) +- nuke: validate write node is not failing due wrong type [\#3780](https://github.com/pypeclub/OpenPype/pull/3780) +- Fix - changed format of version string in pyproject.toml [\#3777](https://github.com/pypeclub/OpenPype/pull/3777) +- Ftrack status fix typo prgoress -\> progress [\#3761](https://github.com/pypeclub/OpenPype/pull/3761) +- Fix version resolution [\#3757](https://github.com/pypeclub/OpenPype/pull/3757) +- Maya: `containerise` dont skip empty values [\#3674](https://github.com/pypeclub/OpenPype/pull/3674) + +**🔀 Refactored code** + +- Photoshop: Use new Extractor location [\#3789](https://github.com/pypeclub/OpenPype/pull/3789) +- Blender: Use new Extractor location [\#3787](https://github.com/pypeclub/OpenPype/pull/3787) +- AfterEffects: Use new Extractor location [\#3784](https://github.com/pypeclub/OpenPype/pull/3784) +- General: Remove unused teshost [\#3773](https://github.com/pypeclub/OpenPype/pull/3773) +- General: Copied 'Extractor' plugin to publish pipeline [\#3771](https://github.com/pypeclub/OpenPype/pull/3771) +- General: Move queries of asset and representation links [\#3770](https://github.com/pypeclub/OpenPype/pull/3770) +- General: Move create project folders to pipeline [\#3768](https://github.com/pypeclub/OpenPype/pull/3768) +- General: Create project function moved to client code [\#3766](https://github.com/pypeclub/OpenPype/pull/3766) +- Maya: Refactor submit deadline to use AbstractSubmitDeadline [\#3759](https://github.com/pypeclub/OpenPype/pull/3759) +- General: Change publish template settings location [\#3755](https://github.com/pypeclub/OpenPype/pull/3755) +- General: Move hostdirname functionality into host [\#3749](https://github.com/pypeclub/OpenPype/pull/3749) +- General: Move publish utils to pipeline [\#3745](https://github.com/pypeclub/OpenPype/pull/3745) +- Houdini: Define houdini as addon [\#3735](https://github.com/pypeclub/OpenPype/pull/3735) +- Fusion: Defined fusion as addon [\#3733](https://github.com/pypeclub/OpenPype/pull/3733) +- Flame: Defined flame as addon [\#3732](https://github.com/pypeclub/OpenPype/pull/3732) +- Resolve: Define resolve as addon [\#3727](https://github.com/pypeclub/OpenPype/pull/3727) + +**Merged pull requests:** + +- Standalone Publisher: Ignore empty labels, then still use name like other asset models [\#3779](https://github.com/pypeclub/OpenPype/pull/3779) +- Kitsu - sync\_all\_project - add list ignore\_projects [\#3776](https://github.com/pypeclub/OpenPype/pull/3776) + +## [3.14.1](https://github.com/pypeclub/OpenPype/tree/3.14.1) (2022-08-30) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.14.0...3.14.1) + +### 📖 Documentation + +- Documentation: Few updates [\#3698](https://github.com/pypeclub/OpenPype/pull/3698) +- Documentation: Settings development [\#3660](https://github.com/pypeclub/OpenPype/pull/3660) + +**🆕 New features** + +- Webpublisher:change create flatten image into tri state [\#3678](https://github.com/pypeclub/OpenPype/pull/3678) +- Blender: validators code correction with settings and defaults [\#3662](https://github.com/pypeclub/OpenPype/pull/3662) + +**🚀 Enhancements** + +- General: Thumbnail can use project roots [\#3750](https://github.com/pypeclub/OpenPype/pull/3750) +- Settings: Remove settings lock on tray exit [\#3720](https://github.com/pypeclub/OpenPype/pull/3720) +- General: Added helper getters to modules manager [\#3712](https://github.com/pypeclub/OpenPype/pull/3712) +- Unreal: Define unreal as module and use host class [\#3701](https://github.com/pypeclub/OpenPype/pull/3701) +- Settings: Lock settings UI session [\#3700](https://github.com/pypeclub/OpenPype/pull/3700) +- General: Benevolent context label collector [\#3686](https://github.com/pypeclub/OpenPype/pull/3686) +- Ftrack: Store ftrack entities on hierarchy integration to instances [\#3677](https://github.com/pypeclub/OpenPype/pull/3677) +- Ftrack: More logs related to auto sync value change [\#3671](https://github.com/pypeclub/OpenPype/pull/3671) +- Blender: ops refresh manager after process events [\#3663](https://github.com/pypeclub/OpenPype/pull/3663) + +**🐛 Bug fixes** + +- Maya: Fix typo in getPanel argument `with_focus` -\> `withFocus` [\#3753](https://github.com/pypeclub/OpenPype/pull/3753) +- General: Smaller fixes of imports [\#3748](https://github.com/pypeclub/OpenPype/pull/3748) +- General: Logger tweaks [\#3741](https://github.com/pypeclub/OpenPype/pull/3741) +- Nuke: missing job dependency if multiple bake streams [\#3737](https://github.com/pypeclub/OpenPype/pull/3737) +- Nuke: color-space settings from anatomy is working [\#3721](https://github.com/pypeclub/OpenPype/pull/3721) +- Settings: Fix studio default anatomy save [\#3716](https://github.com/pypeclub/OpenPype/pull/3716) +- Maya: Use project name instead of project code [\#3709](https://github.com/pypeclub/OpenPype/pull/3709) +- Settings: Fix project overrides save [\#3708](https://github.com/pypeclub/OpenPype/pull/3708) +- Workfiles tool: Fix published workfile filtering [\#3704](https://github.com/pypeclub/OpenPype/pull/3704) +- PS, AE: Provide default variant value for workfile subset [\#3703](https://github.com/pypeclub/OpenPype/pull/3703) +- RoyalRender: handle host name that is not set [\#3695](https://github.com/pypeclub/OpenPype/pull/3695) +- Flame: retime is working on clip publishing [\#3684](https://github.com/pypeclub/OpenPype/pull/3684) +- Webpublisher: added check for empty context [\#3682](https://github.com/pypeclub/OpenPype/pull/3682) + +**🔀 Refactored code** + +- General: Move delivery logic to pipeline [\#3751](https://github.com/pypeclub/OpenPype/pull/3751) +- General: Host addons cleanup [\#3744](https://github.com/pypeclub/OpenPype/pull/3744) +- Webpublisher: Webpublisher is used as addon [\#3740](https://github.com/pypeclub/OpenPype/pull/3740) +- Photoshop: Defined photoshop as addon [\#3736](https://github.com/pypeclub/OpenPype/pull/3736) +- Harmony: Defined harmony as addon [\#3734](https://github.com/pypeclub/OpenPype/pull/3734) +- General: Module interfaces cleanup [\#3731](https://github.com/pypeclub/OpenPype/pull/3731) +- AfterEffects: Move AE functions from general lib [\#3730](https://github.com/pypeclub/OpenPype/pull/3730) +- Blender: Define blender as module [\#3729](https://github.com/pypeclub/OpenPype/pull/3729) +- AfterEffects: Define AfterEffects as module [\#3728](https://github.com/pypeclub/OpenPype/pull/3728) +- General: Replace PypeLogger with Logger [\#3725](https://github.com/pypeclub/OpenPype/pull/3725) +- Nuke: Define nuke as module [\#3724](https://github.com/pypeclub/OpenPype/pull/3724) +- General: Move subset name functionality [\#3723](https://github.com/pypeclub/OpenPype/pull/3723) +- General: Move creators plugin getter [\#3714](https://github.com/pypeclub/OpenPype/pull/3714) +- General: Move constants from lib to client [\#3713](https://github.com/pypeclub/OpenPype/pull/3713) +- Loader: Subset groups using client operations [\#3710](https://github.com/pypeclub/OpenPype/pull/3710) +- TVPaint: Defined as module [\#3707](https://github.com/pypeclub/OpenPype/pull/3707) +- StandalonePublisher: Define StandalonePublisher as module [\#3706](https://github.com/pypeclub/OpenPype/pull/3706) +- TrayPublisher: Define TrayPublisher as module [\#3705](https://github.com/pypeclub/OpenPype/pull/3705) +- General: Move context specific functions to context tools [\#3702](https://github.com/pypeclub/OpenPype/pull/3702) + +**Merged pull requests:** + +- Hiero: Define hiero as module [\#3717](https://github.com/pypeclub/OpenPype/pull/3717) +- Deadline: better logging for DL webservice failures [\#3694](https://github.com/pypeclub/OpenPype/pull/3694) +- Photoshop: resize saved images in ExtractReview for ffmpeg [\#3676](https://github.com/pypeclub/OpenPype/pull/3676) +- Nuke: Validation refactory to new publisher [\#3567](https://github.com/pypeclub/OpenPype/pull/3567) + +## [3.14.0](https://github.com/pypeclub/OpenPype/tree/3.14.0) (2022-08-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.13.0...3.14.0) + +**🆕 New features** + +- Maya: Build workfile by template [\#3578](https://github.com/pypeclub/OpenPype/pull/3578) +- Maya: Implementation of JSON layout for Unreal workflow [\#3353](https://github.com/pypeclub/OpenPype/pull/3353) +- Maya: Build workfile by template [\#3315](https://github.com/pypeclub/OpenPype/pull/3315) + +**🚀 Enhancements** + +- Ftrack: Addiotional component metadata [\#3685](https://github.com/pypeclub/OpenPype/pull/3685) +- Ftrack: Set task status on farm publishing [\#3680](https://github.com/pypeclub/OpenPype/pull/3680) +- Ftrack: Set task status on task creation in integrate hierarchy [\#3675](https://github.com/pypeclub/OpenPype/pull/3675) +- Maya: Disable rendering of all lights for render instances submitted through Deadline. [\#3661](https://github.com/pypeclub/OpenPype/pull/3661) +- General: Optimized OCIO configs [\#3650](https://github.com/pypeclub/OpenPype/pull/3650) + +**🐛 Bug fixes** + +- General: Switch from hero version to versioned works [\#3691](https://github.com/pypeclub/OpenPype/pull/3691) +- General: Fix finding of last version [\#3656](https://github.com/pypeclub/OpenPype/pull/3656) +- General: Extract Review can scale with pixel aspect ratio [\#3644](https://github.com/pypeclub/OpenPype/pull/3644) +- Maya: Refactor moved usage of CreateRender settings [\#3643](https://github.com/pypeclub/OpenPype/pull/3643) +- General: Hero version representations have full context [\#3638](https://github.com/pypeclub/OpenPype/pull/3638) +- Nuke: color settings for render write node is working now [\#3632](https://github.com/pypeclub/OpenPype/pull/3632) +- Maya: FBX support for update in reference loader [\#3631](https://github.com/pypeclub/OpenPype/pull/3631) + +**🔀 Refactored code** + +- General: Use client projects getter [\#3673](https://github.com/pypeclub/OpenPype/pull/3673) +- Resolve: Match folder structure to other hosts [\#3653](https://github.com/pypeclub/OpenPype/pull/3653) +- Maya: Hosts as modules [\#3647](https://github.com/pypeclub/OpenPype/pull/3647) +- TimersManager: Plugins are in timers manager module [\#3639](https://github.com/pypeclub/OpenPype/pull/3639) +- General: Move workfiles functions into pipeline [\#3637](https://github.com/pypeclub/OpenPype/pull/3637) +- General: Workfiles builder using query functions [\#3598](https://github.com/pypeclub/OpenPype/pull/3598) + +**Merged pull requests:** + +- Deadline: Global job pre load is not Pype 2 compatible [\#3666](https://github.com/pypeclub/OpenPype/pull/3666) +- Maya: Remove unused get current renderer logic [\#3645](https://github.com/pypeclub/OpenPype/pull/3645) +- Kitsu|Fix: Movie project type fails & first loop children names [\#3636](https://github.com/pypeclub/OpenPype/pull/3636) +- fix the bug of failing to extract look when UDIMs format used in AiImage [\#3628](https://github.com/pypeclub/OpenPype/pull/3628) + +## [3.13.0](https://github.com/pypeclub/OpenPype/tree/3.13.0) (2022-08-09) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.2...3.13.0) + +**🆕 New features** + +- Support for mutliple installed versions - 3.13 [\#3605](https://github.com/pypeclub/OpenPype/pull/3605) +- Traypublisher: simple editorial publishing [\#3492](https://github.com/pypeclub/OpenPype/pull/3492) + +**🚀 Enhancements** + +- Editorial: Mix audio use side file for ffmpeg filters [\#3630](https://github.com/pypeclub/OpenPype/pull/3630) +- Ftrack: Comment template can contain optional keys [\#3615](https://github.com/pypeclub/OpenPype/pull/3615) +- Ftrack: Add more metadata to ftrack components [\#3612](https://github.com/pypeclub/OpenPype/pull/3612) +- General: Add context to pyblish context [\#3594](https://github.com/pypeclub/OpenPype/pull/3594) +- Kitsu: Shot&Sequence name with prefix over appends [\#3593](https://github.com/pypeclub/OpenPype/pull/3593) +- Photoshop: implemented {layer} placeholder in subset template [\#3591](https://github.com/pypeclub/OpenPype/pull/3591) +- General: Python module appdirs from git [\#3589](https://github.com/pypeclub/OpenPype/pull/3589) +- Ftrack: Update ftrack api to 2.3.3 [\#3588](https://github.com/pypeclub/OpenPype/pull/3588) +- General: New Integrator small fixes [\#3583](https://github.com/pypeclub/OpenPype/pull/3583) +- Maya: Render Creator has configurable options. [\#3097](https://github.com/pypeclub/OpenPype/pull/3097) + +**🐛 Bug fixes** + +- Maya: fix aov separator in Redshift [\#3625](https://github.com/pypeclub/OpenPype/pull/3625) +- Fix for multi-version build on Mac [\#3622](https://github.com/pypeclub/OpenPype/pull/3622) +- Ftrack: Sync hierarchical attributes can handle new created entities [\#3621](https://github.com/pypeclub/OpenPype/pull/3621) +- General: Extract review aspect ratio scale is calculated by ffmpeg [\#3620](https://github.com/pypeclub/OpenPype/pull/3620) +- Maya: Fix types of default settings [\#3617](https://github.com/pypeclub/OpenPype/pull/3617) +- Integrator: Don't force to have dot before frame [\#3611](https://github.com/pypeclub/OpenPype/pull/3611) +- AfterEffects: refactored integrate doesnt work formulti frame publishes [\#3610](https://github.com/pypeclub/OpenPype/pull/3610) +- Maya look data contents fails with custom attribute on group [\#3607](https://github.com/pypeclub/OpenPype/pull/3607) +- TrayPublisher: Fix wrong conflict merge [\#3600](https://github.com/pypeclub/OpenPype/pull/3600) +- Bugfix: Add OCIO as submodule to prepare for handling `maketx` color space conversion. [\#3590](https://github.com/pypeclub/OpenPype/pull/3590) +- Fix general settings environment variables resolution [\#3587](https://github.com/pypeclub/OpenPype/pull/3587) +- Editorial publishing workflow improvements [\#3580](https://github.com/pypeclub/OpenPype/pull/3580) +- General: Update imports in start script [\#3579](https://github.com/pypeclub/OpenPype/pull/3579) +- Nuke: render family integration consistency [\#3576](https://github.com/pypeclub/OpenPype/pull/3576) +- Ftrack: Handle missing published path in integrator [\#3570](https://github.com/pypeclub/OpenPype/pull/3570) +- Nuke: publish existing frames with slate with correct range [\#3555](https://github.com/pypeclub/OpenPype/pull/3555) + +**🔀 Refactored code** + +- General: Plugin settings handled by plugins [\#3623](https://github.com/pypeclub/OpenPype/pull/3623) +- General: Naive implementation of document create, update, delete [\#3601](https://github.com/pypeclub/OpenPype/pull/3601) +- General: Use query functions in general code [\#3596](https://github.com/pypeclub/OpenPype/pull/3596) +- General: Separate extraction of template data into more functions [\#3574](https://github.com/pypeclub/OpenPype/pull/3574) +- General: Lib cleanup [\#3571](https://github.com/pypeclub/OpenPype/pull/3571) + +**Merged pull requests:** + +- Webpublisher: timeout for PS studio processing [\#3619](https://github.com/pypeclub/OpenPype/pull/3619) +- Core: translated validate\_containers.py into New publisher style [\#3614](https://github.com/pypeclub/OpenPype/pull/3614) +- Enable write color sets on animation publish automatically [\#3582](https://github.com/pypeclub/OpenPype/pull/3582) + +## [3.12.2](https://github.com/pypeclub/OpenPype/tree/3.12.2) (2022-07-27) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.1...3.12.2) + +### 📖 Documentation + +- Update website with more studios [\#3554](https://github.com/pypeclub/OpenPype/pull/3554) +- Documentation: Update publishing dev docs [\#3549](https://github.com/pypeclub/OpenPype/pull/3549) + +**🚀 Enhancements** + +- General: Global thumbnail extractor is ready for more cases [\#3561](https://github.com/pypeclub/OpenPype/pull/3561) +- Maya: add additional validators to Settings [\#3540](https://github.com/pypeclub/OpenPype/pull/3540) +- General: Interactive console in cli [\#3526](https://github.com/pypeclub/OpenPype/pull/3526) +- Ftrack: Automatic daily review session creation can define trigger hour [\#3516](https://github.com/pypeclub/OpenPype/pull/3516) +- Ftrack: add source into Note [\#3509](https://github.com/pypeclub/OpenPype/pull/3509) +- Ftrack: Trigger custom ftrack topic of project structure creation [\#3506](https://github.com/pypeclub/OpenPype/pull/3506) +- Settings UI: Add extract to file action on project view [\#3505](https://github.com/pypeclub/OpenPype/pull/3505) +- Add pack and unpack convenience scripts [\#3502](https://github.com/pypeclub/OpenPype/pull/3502) +- General: Event system [\#3499](https://github.com/pypeclub/OpenPype/pull/3499) +- NewPublisher: Keep plugins with mismatch target in report [\#3498](https://github.com/pypeclub/OpenPype/pull/3498) +- Nuke: load clip with options from settings [\#3497](https://github.com/pypeclub/OpenPype/pull/3497) +- TrayPublisher: implemented render\_mov\_batch [\#3486](https://github.com/pypeclub/OpenPype/pull/3486) +- Migrate basic families to the new Tray Publisher [\#3469](https://github.com/pypeclub/OpenPype/pull/3469) +- Enhance powershell build scripts [\#1827](https://github.com/pypeclub/OpenPype/pull/1827) + +**🐛 Bug fixes** + +- Maya: fix Review image plane attribute [\#3569](https://github.com/pypeclub/OpenPype/pull/3569) +- Maya: Fix animated attributes \(ie. overscan\) on loaded cameras breaking review publishing. [\#3562](https://github.com/pypeclub/OpenPype/pull/3562) +- NewPublisher: Python 2 compatible html escape [\#3559](https://github.com/pypeclub/OpenPype/pull/3559) +- Remove invalid submodules from `/vendor` [\#3557](https://github.com/pypeclub/OpenPype/pull/3557) +- General: Remove hosts filter on integrator plugins [\#3556](https://github.com/pypeclub/OpenPype/pull/3556) +- Settings: Clean default values of environments [\#3550](https://github.com/pypeclub/OpenPype/pull/3550) +- Module interfaces: Fix import error [\#3547](https://github.com/pypeclub/OpenPype/pull/3547) +- Workfiles tool: Show of tool and it's flags [\#3539](https://github.com/pypeclub/OpenPype/pull/3539) +- General: Create workfile documents works again [\#3538](https://github.com/pypeclub/OpenPype/pull/3538) +- Additional fixes for powershell scripts [\#3525](https://github.com/pypeclub/OpenPype/pull/3525) +- Maya: Added wrapper around cmds.setAttr [\#3523](https://github.com/pypeclub/OpenPype/pull/3523) +- Nuke: double slate [\#3521](https://github.com/pypeclub/OpenPype/pull/3521) +- General: Fix hash of centos oiio archive [\#3519](https://github.com/pypeclub/OpenPype/pull/3519) +- Maya: Renderman display output fix [\#3514](https://github.com/pypeclub/OpenPype/pull/3514) +- TrayPublisher: Simple creation enhancements and fixes [\#3513](https://github.com/pypeclub/OpenPype/pull/3513) +- NewPublisher: Publish attributes are properly collected [\#3510](https://github.com/pypeclub/OpenPype/pull/3510) +- TrayPublisher: Make sure host name is filled [\#3504](https://github.com/pypeclub/OpenPype/pull/3504) +- NewPublisher: Groups work and enum multivalue [\#3501](https://github.com/pypeclub/OpenPype/pull/3501) + +**🔀 Refactored code** + +- General: Use query functions in integrator [\#3563](https://github.com/pypeclub/OpenPype/pull/3563) +- General: Mongo core connection moved to client [\#3531](https://github.com/pypeclub/OpenPype/pull/3531) +- Refactor Integrate Asset [\#3530](https://github.com/pypeclub/OpenPype/pull/3530) +- General: Client docstrings cleanup [\#3529](https://github.com/pypeclub/OpenPype/pull/3529) +- General: Move load related functions into pipeline [\#3527](https://github.com/pypeclub/OpenPype/pull/3527) +- General: Get current context document functions [\#3522](https://github.com/pypeclub/OpenPype/pull/3522) +- Kitsu: Use query function from client [\#3496](https://github.com/pypeclub/OpenPype/pull/3496) +- TimersManager: Use query functions [\#3495](https://github.com/pypeclub/OpenPype/pull/3495) +- Deadline: Use query functions [\#3466](https://github.com/pypeclub/OpenPype/pull/3466) +- Refactor Integrate Asset [\#2898](https://github.com/pypeclub/OpenPype/pull/2898) + +**Merged pull requests:** + +- Maya: fix active pane loss [\#3566](https://github.com/pypeclub/OpenPype/pull/3566) + +## [3.12.1](https://github.com/pypeclub/OpenPype/tree/3.12.1) (2022-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.12.0...3.12.1) + +### 📖 Documentation + +- Docs: Added minimal permissions for MongoDB [\#3441](https://github.com/pypeclub/OpenPype/pull/3441) + +**🆕 New features** + +- Maya: Add VDB to Arnold loader [\#3433](https://github.com/pypeclub/OpenPype/pull/3433) + +**🚀 Enhancements** + +- TrayPublisher: Added more options for grouping of instances [\#3494](https://github.com/pypeclub/OpenPype/pull/3494) +- NewPublisher: Align creator attributes from top to bottom [\#3487](https://github.com/pypeclub/OpenPype/pull/3487) +- NewPublisher: Added ability to use label of instance [\#3484](https://github.com/pypeclub/OpenPype/pull/3484) +- General: Creator Plugins have access to project [\#3476](https://github.com/pypeclub/OpenPype/pull/3476) +- General: Better arguments order in creator init [\#3475](https://github.com/pypeclub/OpenPype/pull/3475) +- Ftrack: Trigger custom ftrack events on project creation and preparation [\#3465](https://github.com/pypeclub/OpenPype/pull/3465) +- Windows installer: Clean old files and add version subfolder [\#3445](https://github.com/pypeclub/OpenPype/pull/3445) +- Blender: Bugfix - Set fps properly on open [\#3426](https://github.com/pypeclub/OpenPype/pull/3426) +- Hiero: Add custom scripts menu [\#3425](https://github.com/pypeclub/OpenPype/pull/3425) +- Blender: pre pyside install for all platforms [\#3400](https://github.com/pypeclub/OpenPype/pull/3400) +- Maya: Add additional playblast options to review Extractor. [\#3384](https://github.com/pypeclub/OpenPype/pull/3384) +- Maya: Ability to set resolution for playblasts from asset, and override through review instance. [\#3360](https://github.com/pypeclub/OpenPype/pull/3360) +- Maya: Redshift Volume Loader Implement update, remove, switch + fix vdb sequence support [\#3197](https://github.com/pypeclub/OpenPype/pull/3197) +- Maya: Implement `iter_visible_nodes_in_range` for extracting Alembics [\#3100](https://github.com/pypeclub/OpenPype/pull/3100) + +**🐛 Bug fixes** + +- TrayPublisher: Keep use instance label in list view [\#3493](https://github.com/pypeclub/OpenPype/pull/3493) +- General: Extract review use first frame of input sequence [\#3491](https://github.com/pypeclub/OpenPype/pull/3491) +- General: Fix Plist loading for application launch [\#3485](https://github.com/pypeclub/OpenPype/pull/3485) +- Nuke: Workfile tools open on start [\#3479](https://github.com/pypeclub/OpenPype/pull/3479) +- New Publisher: Disabled context change allows creation [\#3478](https://github.com/pypeclub/OpenPype/pull/3478) +- General: thumbnail extractor fix [\#3474](https://github.com/pypeclub/OpenPype/pull/3474) +- Kitsu: bugfix with sync-service ans publish plugins [\#3473](https://github.com/pypeclub/OpenPype/pull/3473) +- Flame: solved problem with multi-selected loading [\#3470](https://github.com/pypeclub/OpenPype/pull/3470) +- General: Fix query function in update logic [\#3468](https://github.com/pypeclub/OpenPype/pull/3468) +- Resolve: removed few bugs [\#3464](https://github.com/pypeclub/OpenPype/pull/3464) +- General: Delete old versions is safer when ftrack is disabled [\#3462](https://github.com/pypeclub/OpenPype/pull/3462) +- Nuke: fixing metadata slate TC difference [\#3455](https://github.com/pypeclub/OpenPype/pull/3455) +- Nuke: prerender reviewable fails [\#3450](https://github.com/pypeclub/OpenPype/pull/3450) +- Maya: fix hashing in Python 3 for tile rendering [\#3447](https://github.com/pypeclub/OpenPype/pull/3447) +- LogViewer: Escape html characters in log message [\#3443](https://github.com/pypeclub/OpenPype/pull/3443) +- Nuke: Slate frame is integrated [\#3427](https://github.com/pypeclub/OpenPype/pull/3427) +- Maya: Camera extra data - additional fix for \#3304 [\#3386](https://github.com/pypeclub/OpenPype/pull/3386) +- Maya: Handle excluding `model` family from frame range validator. [\#3370](https://github.com/pypeclub/OpenPype/pull/3370) + +**🔀 Refactored code** + +- Maya: Merge animation + pointcache extractor logic [\#3461](https://github.com/pypeclub/OpenPype/pull/3461) +- Maya: Re-use `maintained_time` from lib [\#3460](https://github.com/pypeclub/OpenPype/pull/3460) +- General: Use query functions in global plugins [\#3459](https://github.com/pypeclub/OpenPype/pull/3459) +- Clockify: Use query functions in clockify actions [\#3458](https://github.com/pypeclub/OpenPype/pull/3458) +- General: Use query functions in rest api calls [\#3457](https://github.com/pypeclub/OpenPype/pull/3457) +- General: Use query functions in openpype lib functions [\#3454](https://github.com/pypeclub/OpenPype/pull/3454) +- General: Use query functions in load utils [\#3446](https://github.com/pypeclub/OpenPype/pull/3446) +- General: Move publish plugin and publish render abstractions [\#3442](https://github.com/pypeclub/OpenPype/pull/3442) +- General: Use Anatomy after move to pipeline [\#3436](https://github.com/pypeclub/OpenPype/pull/3436) +- General: Anatomy moved to pipeline [\#3435](https://github.com/pypeclub/OpenPype/pull/3435) +- Fusion: Use client query functions [\#3380](https://github.com/pypeclub/OpenPype/pull/3380) +- Resolve: Use client query functions [\#3379](https://github.com/pypeclub/OpenPype/pull/3379) +- General: Host implementation defined with class [\#3337](https://github.com/pypeclub/OpenPype/pull/3337) + +## [3.12.0](https://github.com/pypeclub/OpenPype/tree/3.12.0) (2022-06-28) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.1...3.12.0) + +### 📖 Documentation + +- Fix typo in documentation: pyenv on mac [\#3417](https://github.com/pypeclub/OpenPype/pull/3417) +- Linux: update OIIO package [\#3401](https://github.com/pypeclub/OpenPype/pull/3401) + +**🆕 New features** + +- Shotgrid: Add production beta of shotgrid integration [\#2921](https://github.com/pypeclub/OpenPype/pull/2921) + +**🚀 Enhancements** + +- Webserver: Added CORS middleware [\#3422](https://github.com/pypeclub/OpenPype/pull/3422) +- Attribute Defs UI: Files widget show what is allowed to drop in [\#3411](https://github.com/pypeclub/OpenPype/pull/3411) +- General: Add ability to change user value for templates [\#3366](https://github.com/pypeclub/OpenPype/pull/3366) +- Hosts: More options for in-host callbacks [\#3357](https://github.com/pypeclub/OpenPype/pull/3357) +- Multiverse: expose some settings to GUI [\#3350](https://github.com/pypeclub/OpenPype/pull/3350) +- Maya: Allow more data to be published along camera 🎥 [\#3304](https://github.com/pypeclub/OpenPype/pull/3304) +- Add root keys and project keys to create starting folder [\#2755](https://github.com/pypeclub/OpenPype/pull/2755) + +**🐛 Bug fixes** + +- NewPublisher: Fix subset name change on change of creator plugin [\#3420](https://github.com/pypeclub/OpenPype/pull/3420) +- Bug: fix invalid avalon import [\#3418](https://github.com/pypeclub/OpenPype/pull/3418) +- Nuke: Fix keyword argument in query function [\#3414](https://github.com/pypeclub/OpenPype/pull/3414) +- Houdini: fix loading and updating vbd/bgeo sequences [\#3408](https://github.com/pypeclub/OpenPype/pull/3408) +- Nuke: Collect representation files based on Write [\#3407](https://github.com/pypeclub/OpenPype/pull/3407) +- General: Filter representations before integration start [\#3398](https://github.com/pypeclub/OpenPype/pull/3398) +- Maya: look collector typo [\#3392](https://github.com/pypeclub/OpenPype/pull/3392) +- TVPaint: Make sure exit code is set to not None [\#3382](https://github.com/pypeclub/OpenPype/pull/3382) +- Maya: vray device aspect ratio fix [\#3381](https://github.com/pypeclub/OpenPype/pull/3381) +- Flame: bunch of publishing issues [\#3377](https://github.com/pypeclub/OpenPype/pull/3377) +- Harmony: added unc path to zifile command in Harmony [\#3372](https://github.com/pypeclub/OpenPype/pull/3372) +- Standalone: settings improvements [\#3355](https://github.com/pypeclub/OpenPype/pull/3355) +- Nuke: Load full model hierarchy by default [\#3328](https://github.com/pypeclub/OpenPype/pull/3328) +- Nuke: multiple baking streams with correct slate [\#3245](https://github.com/pypeclub/OpenPype/pull/3245) +- Maya: fix image prefix warning in validator [\#3128](https://github.com/pypeclub/OpenPype/pull/3128) + +**🔀 Refactored code** + +- Unreal: Use client query functions [\#3421](https://github.com/pypeclub/OpenPype/pull/3421) +- General: Move editorial lib to pipeline [\#3419](https://github.com/pypeclub/OpenPype/pull/3419) +- Kitsu: renaming to plural func sync\_all\_projects [\#3397](https://github.com/pypeclub/OpenPype/pull/3397) +- Houdini: Use client query functions [\#3395](https://github.com/pypeclub/OpenPype/pull/3395) +- Hiero: Use client query functions [\#3393](https://github.com/pypeclub/OpenPype/pull/3393) +- Nuke: Use client query functions [\#3391](https://github.com/pypeclub/OpenPype/pull/3391) +- Maya: Use client query functions [\#3385](https://github.com/pypeclub/OpenPype/pull/3385) +- Harmony: Use client query functions [\#3378](https://github.com/pypeclub/OpenPype/pull/3378) +- Celaction: Use client query functions [\#3376](https://github.com/pypeclub/OpenPype/pull/3376) +- Photoshop: Use client query functions [\#3375](https://github.com/pypeclub/OpenPype/pull/3375) +- AfterEffects: Use client query functions [\#3374](https://github.com/pypeclub/OpenPype/pull/3374) +- TVPaint: Use client query functions [\#3340](https://github.com/pypeclub/OpenPype/pull/3340) +- Ftrack: Use client query functions [\#3339](https://github.com/pypeclub/OpenPype/pull/3339) +- Standalone Publisher: Use client query functions [\#3330](https://github.com/pypeclub/OpenPype/pull/3330) + +**Merged pull requests:** + +- Sync Queue: Added far future value for null values for dates [\#3371](https://github.com/pypeclub/OpenPype/pull/3371) +- Maya - added support for single frame playblast review [\#3369](https://github.com/pypeclub/OpenPype/pull/3369) +- Houdini: Implement Redshift Proxy Export [\#3196](https://github.com/pypeclub/OpenPype/pull/3196) + +## [3.11.1](https://github.com/pypeclub/OpenPype/tree/3.11.1) (2022-06-20) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.11.0...3.11.1) + +**🆕 New features** + +- Flame: custom export temp folder [\#3346](https://github.com/pypeclub/OpenPype/pull/3346) +- Nuke: removing third-party plugins [\#3344](https://github.com/pypeclub/OpenPype/pull/3344) + +**🚀 Enhancements** + +- Pyblish Pype: Hiding/Close issues [\#3367](https://github.com/pypeclub/OpenPype/pull/3367) +- Ftrack: Removed requirement of pypeclub role from default settings [\#3354](https://github.com/pypeclub/OpenPype/pull/3354) +- Kitsu: Prevent crash on missing frames information [\#3352](https://github.com/pypeclub/OpenPype/pull/3352) +- Ftrack: Open browser from tray [\#3320](https://github.com/pypeclub/OpenPype/pull/3320) +- Enhancement: More control over thumbnail processing. [\#3259](https://github.com/pypeclub/OpenPype/pull/3259) + +**🐛 Bug fixes** + +- Nuke: bake streams with slate on farm [\#3368](https://github.com/pypeclub/OpenPype/pull/3368) +- Harmony: audio validator has wrong logic [\#3364](https://github.com/pypeclub/OpenPype/pull/3364) +- Nuke: Fix missing variable in extract thumbnail [\#3363](https://github.com/pypeclub/OpenPype/pull/3363) +- Nuke: Fix precollect writes [\#3361](https://github.com/pypeclub/OpenPype/pull/3361) +- AE- fix validate\_scene\_settings and renderLocal [\#3358](https://github.com/pypeclub/OpenPype/pull/3358) +- deadline: fixing misidentification of revieables [\#3356](https://github.com/pypeclub/OpenPype/pull/3356) +- General: Create only one thumbnail per instance [\#3351](https://github.com/pypeclub/OpenPype/pull/3351) +- nuke: adding extract thumbnail settings 3.10 [\#3347](https://github.com/pypeclub/OpenPype/pull/3347) +- General: Fix last version function [\#3345](https://github.com/pypeclub/OpenPype/pull/3345) +- Deadline: added OPENPYPE\_MONGO to filter [\#3336](https://github.com/pypeclub/OpenPype/pull/3336) +- Nuke: fixing farm publishing if review is disabled [\#3306](https://github.com/pypeclub/OpenPype/pull/3306) +- Maya: Fix Yeti errors on Create, Publish and Load [\#3198](https://github.com/pypeclub/OpenPype/pull/3198) + +**🔀 Refactored code** + +- Webpublisher: Use client query functions [\#3333](https://github.com/pypeclub/OpenPype/pull/3333) + +## [3.11.0](https://github.com/pypeclub/OpenPype/tree/3.11.0) (2022-06-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.10.0...3.11.0) + +### 📖 Documentation + +- Documentation: Add app key to template documentation [\#3299](https://github.com/pypeclub/OpenPype/pull/3299) +- doc: adding royal render and multiverse to the web site [\#3285](https://github.com/pypeclub/OpenPype/pull/3285) +- Module: Kitsu module [\#2650](https://github.com/pypeclub/OpenPype/pull/2650) + +**🆕 New features** + +- Multiverse: fixed composition write, full docs, cosmetics [\#3178](https://github.com/pypeclub/OpenPype/pull/3178) + +**🚀 Enhancements** + +- Settings: Settings can be extracted from UI [\#3323](https://github.com/pypeclub/OpenPype/pull/3323) +- updated poetry installation source [\#3316](https://github.com/pypeclub/OpenPype/pull/3316) +- Ftrack: Action to easily create daily review session [\#3310](https://github.com/pypeclub/OpenPype/pull/3310) +- TVPaint: Extractor use mark in/out range to render [\#3309](https://github.com/pypeclub/OpenPype/pull/3309) +- Ftrack: Delivery action can work on ReviewSessions [\#3307](https://github.com/pypeclub/OpenPype/pull/3307) +- Maya: Look assigner UI improvements [\#3298](https://github.com/pypeclub/OpenPype/pull/3298) +- Ftrack: Action to transfer values of hierarchical attributes [\#3284](https://github.com/pypeclub/OpenPype/pull/3284) +- Maya: better handling of legacy review subsets names [\#3269](https://github.com/pypeclub/OpenPype/pull/3269) +- General: Updated windows oiio tool [\#3268](https://github.com/pypeclub/OpenPype/pull/3268) +- Unreal: add support for skeletalMesh and staticMesh to loaders [\#3267](https://github.com/pypeclub/OpenPype/pull/3267) +- Maya: reference loaders could store placeholder in referenced url [\#3264](https://github.com/pypeclub/OpenPype/pull/3264) +- TVPaint: Init file for TVPaint worker also handle guideline images [\#3250](https://github.com/pypeclub/OpenPype/pull/3250) +- Nuke: Change default icon path in settings [\#3247](https://github.com/pypeclub/OpenPype/pull/3247) +- Maya: publishing of animation and pointcache on a farm [\#3225](https://github.com/pypeclub/OpenPype/pull/3225) +- Maya: Look assigner UI improvements [\#3208](https://github.com/pypeclub/OpenPype/pull/3208) +- Nuke: add pointcache and animation to loader [\#3186](https://github.com/pypeclub/OpenPype/pull/3186) +- Nuke: Add a gizmo menu [\#3172](https://github.com/pypeclub/OpenPype/pull/3172) +- Support for Unreal 5 [\#3122](https://github.com/pypeclub/OpenPype/pull/3122) + +**🐛 Bug fixes** + +- General: Handle empty source key on instance [\#3342](https://github.com/pypeclub/OpenPype/pull/3342) +- Houdini: Fix Houdini VDB manage update wrong file attribute name [\#3322](https://github.com/pypeclub/OpenPype/pull/3322) +- Nuke: anatomy compatibility issue hacks [\#3321](https://github.com/pypeclub/OpenPype/pull/3321) +- hiero: otio p3 compatibility issue - metadata on effect use update 3.11 [\#3314](https://github.com/pypeclub/OpenPype/pull/3314) +- General: Vendorized modules for Python 2 and update poetry lock [\#3305](https://github.com/pypeclub/OpenPype/pull/3305) +- Fix - added local targets to install host [\#3303](https://github.com/pypeclub/OpenPype/pull/3303) +- Settings: Add missing default settings for nuke gizmo [\#3301](https://github.com/pypeclub/OpenPype/pull/3301) +- Maya: Fix swaped width and height in reviews [\#3300](https://github.com/pypeclub/OpenPype/pull/3300) +- Maya: point cache publish handles Maya instances [\#3297](https://github.com/pypeclub/OpenPype/pull/3297) +- Global: extract review slate issues [\#3286](https://github.com/pypeclub/OpenPype/pull/3286) +- Webpublisher: return only active projects in ProjectsEndpoint [\#3281](https://github.com/pypeclub/OpenPype/pull/3281) +- Hiero: add support for task tags 3.10.x [\#3279](https://github.com/pypeclub/OpenPype/pull/3279) +- General: Fix Oiio tool path resolving [\#3278](https://github.com/pypeclub/OpenPype/pull/3278) +- Maya: Fix udim support for e.g. uppercase \ tag [\#3266](https://github.com/pypeclub/OpenPype/pull/3266) +- Nuke: bake reformat was failing on string type [\#3261](https://github.com/pypeclub/OpenPype/pull/3261) +- Maya: hotfix Pxr multitexture in looks [\#3260](https://github.com/pypeclub/OpenPype/pull/3260) +- Unreal: Fix Camera Loading if Layout is missing [\#3255](https://github.com/pypeclub/OpenPype/pull/3255) +- Unreal: Fixed Animation loading in UE5 [\#3240](https://github.com/pypeclub/OpenPype/pull/3240) +- Unreal: Fixed Render creation in UE5 [\#3239](https://github.com/pypeclub/OpenPype/pull/3239) +- Unreal: Fixed Camera loading in UE5 [\#3238](https://github.com/pypeclub/OpenPype/pull/3238) +- Flame: debugging [\#3224](https://github.com/pypeclub/OpenPype/pull/3224) +- add silent audio to slate [\#3162](https://github.com/pypeclub/OpenPype/pull/3162) +- Add timecode to slate [\#2929](https://github.com/pypeclub/OpenPype/pull/2929) + +**🔀 Refactored code** + +- Blender: Use client query functions [\#3331](https://github.com/pypeclub/OpenPype/pull/3331) +- General: Define query functions [\#3288](https://github.com/pypeclub/OpenPype/pull/3288) + +**Merged pull requests:** + +- Maya: add pointcache family to gpu cache loader [\#3318](https://github.com/pypeclub/OpenPype/pull/3318) +- Maya look: skip empty file attributes [\#3274](https://github.com/pypeclub/OpenPype/pull/3274) + +## [3.10.0](https://github.com/pypeclub/OpenPype/tree/3.10.0) (2022-05-26) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.8...3.10.0) + +### 📖 Documentation + +- Docs: add all-contributors config and initial list [\#3094](https://github.com/pypeclub/OpenPype/pull/3094) +- Nuke docs with videos [\#3052](https://github.com/pypeclub/OpenPype/pull/3052) + +**🆕 New features** + +- General: OpenPype modules publish plugins are registered in host [\#3180](https://github.com/pypeclub/OpenPype/pull/3180) +- General: Creator plugins from addons can be registered [\#3179](https://github.com/pypeclub/OpenPype/pull/3179) +- Ftrack: Single image reviewable [\#3157](https://github.com/pypeclub/OpenPype/pull/3157) +- Nuke: Expose write attributes to settings [\#3123](https://github.com/pypeclub/OpenPype/pull/3123) +- Hiero: Initial frame publish support [\#3106](https://github.com/pypeclub/OpenPype/pull/3106) +- Unreal: Render Publishing [\#2917](https://github.com/pypeclub/OpenPype/pull/2917) +- AfterEffects: Implemented New Publisher [\#2838](https://github.com/pypeclub/OpenPype/pull/2838) +- Unreal: Rendering implementation [\#2410](https://github.com/pypeclub/OpenPype/pull/2410) + +**🚀 Enhancements** + +- Maya: FBX camera export [\#3253](https://github.com/pypeclub/OpenPype/pull/3253) +- General: updating common vendor `scriptmenu` to 1.5.2 [\#3246](https://github.com/pypeclub/OpenPype/pull/3246) +- Project Manager: Allow to paste Tasks into multiple assets at the same time [\#3226](https://github.com/pypeclub/OpenPype/pull/3226) +- Project manager: Sped up project load [\#3216](https://github.com/pypeclub/OpenPype/pull/3216) +- Loader UI: Speed issues of loader with sync server [\#3199](https://github.com/pypeclub/OpenPype/pull/3199) +- Looks: add basic support for Renderman [\#3190](https://github.com/pypeclub/OpenPype/pull/3190) +- Maya: added clean\_import option to Import loader [\#3181](https://github.com/pypeclub/OpenPype/pull/3181) +- Add the scripts menu definition to nuke [\#3168](https://github.com/pypeclub/OpenPype/pull/3168) +- Maya: add maya 2023 to default applications [\#3167](https://github.com/pypeclub/OpenPype/pull/3167) +- Compressed bgeo publishing in SAP and Houdini loader [\#3153](https://github.com/pypeclub/OpenPype/pull/3153) +- General: Add 'dataclasses' to required python modules [\#3149](https://github.com/pypeclub/OpenPype/pull/3149) +- Hooks: Tweak logging grammar [\#3147](https://github.com/pypeclub/OpenPype/pull/3147) +- Nuke: settings for reformat node in CreateWriteRender node [\#3143](https://github.com/pypeclub/OpenPype/pull/3143) +- Houdini: Add loader for alembic through Alembic Archive node [\#3140](https://github.com/pypeclub/OpenPype/pull/3140) +- Publisher: UI Modifications and fixes [\#3139](https://github.com/pypeclub/OpenPype/pull/3139) +- General: Simplified OP modules/addons import [\#3137](https://github.com/pypeclub/OpenPype/pull/3137) +- Terminal: Tweak coloring of TrayModuleManager logging enabled states [\#3133](https://github.com/pypeclub/OpenPype/pull/3133) +- General: Cleanup some Loader docstrings [\#3131](https://github.com/pypeclub/OpenPype/pull/3131) +- Nuke: render instance with subset name filtered overrides [\#3117](https://github.com/pypeclub/OpenPype/pull/3117) +- Unreal: Layout and Camera update and remove functions reimplemented and improvements [\#3116](https://github.com/pypeclub/OpenPype/pull/3116) +- Settings: Remove environment groups from settings [\#3115](https://github.com/pypeclub/OpenPype/pull/3115) +- TVPaint: Match renderlayer key with other hosts [\#3110](https://github.com/pypeclub/OpenPype/pull/3110) +- Ftrack: AssetVersion status on publish [\#3108](https://github.com/pypeclub/OpenPype/pull/3108) +- Tray publisher: Simple families from settings [\#3105](https://github.com/pypeclub/OpenPype/pull/3105) +- Local Settings UI: Overlay messages on save and reset [\#3104](https://github.com/pypeclub/OpenPype/pull/3104) +- General: Remove repos related logic [\#3087](https://github.com/pypeclub/OpenPype/pull/3087) +- Standalone publisher: add support for bgeo and vdb [\#3080](https://github.com/pypeclub/OpenPype/pull/3080) +- Houdini: Fix FPS + outdated content pop-ups [\#3079](https://github.com/pypeclub/OpenPype/pull/3079) +- General: Add global log verbose arguments [\#3070](https://github.com/pypeclub/OpenPype/pull/3070) +- Flame: extract presets distribution [\#3063](https://github.com/pypeclub/OpenPype/pull/3063) +- Update collect\_render.py [\#3055](https://github.com/pypeclub/OpenPype/pull/3055) +- SiteSync: Added compute\_resource\_sync\_sites to sync\_server\_module [\#2983](https://github.com/pypeclub/OpenPype/pull/2983) +- Maya: Implement Hardware Renderer 2.0 support for Render Products [\#2611](https://github.com/pypeclub/OpenPype/pull/2611) + +**🐛 Bug fixes** + +- nuke: use framerange issue [\#3254](https://github.com/pypeclub/OpenPype/pull/3254) +- Ftrack: Chunk sizes for queries has minimal condition [\#3244](https://github.com/pypeclub/OpenPype/pull/3244) +- Maya: renderman displays needs to be filtered [\#3242](https://github.com/pypeclub/OpenPype/pull/3242) +- Ftrack: Validate that the user exists on ftrack [\#3237](https://github.com/pypeclub/OpenPype/pull/3237) +- Maya: Fix support for multiple resolutions [\#3236](https://github.com/pypeclub/OpenPype/pull/3236) +- TVPaint: Look for more groups than 12 [\#3228](https://github.com/pypeclub/OpenPype/pull/3228) +- Hiero: debugging frame range and other 3.10 [\#3222](https://github.com/pypeclub/OpenPype/pull/3222) +- Project Manager: Fix persistent editors on project change [\#3218](https://github.com/pypeclub/OpenPype/pull/3218) +- Deadline: instance data overwrite fix [\#3214](https://github.com/pypeclub/OpenPype/pull/3214) +- Ftrack: Push hierarchical attributes action works [\#3210](https://github.com/pypeclub/OpenPype/pull/3210) +- Standalone Publisher: Always create new representation for thumbnail [\#3203](https://github.com/pypeclub/OpenPype/pull/3203) +- Photoshop: skip collector when automatic testing [\#3202](https://github.com/pypeclub/OpenPype/pull/3202) +- Nuke: render/workfile version sync doesn't work on farm [\#3185](https://github.com/pypeclub/OpenPype/pull/3185) +- Ftrack: Review image only if there are no mp4 reviews [\#3183](https://github.com/pypeclub/OpenPype/pull/3183) +- Ftrack: Locations deepcopy issue [\#3177](https://github.com/pypeclub/OpenPype/pull/3177) +- General: Avoid creating multiple thumbnails [\#3176](https://github.com/pypeclub/OpenPype/pull/3176) +- General/Hiero: better clip duration calculation [\#3169](https://github.com/pypeclub/OpenPype/pull/3169) +- General: Oiio conversion for ffmpeg checks for invalid characters [\#3166](https://github.com/pypeclub/OpenPype/pull/3166) +- Fix for attaching render to subset [\#3164](https://github.com/pypeclub/OpenPype/pull/3164) +- Harmony: fixed missing task name in render instance [\#3163](https://github.com/pypeclub/OpenPype/pull/3163) +- Ftrack: Action delete old versions formatting works [\#3152](https://github.com/pypeclub/OpenPype/pull/3152) +- Deadline: fix the output directory [\#3144](https://github.com/pypeclub/OpenPype/pull/3144) +- General: New Session schema [\#3141](https://github.com/pypeclub/OpenPype/pull/3141) +- General: Missing version on headless mode crash properly [\#3136](https://github.com/pypeclub/OpenPype/pull/3136) +- TVPaint: Composite layers in reversed order [\#3135](https://github.com/pypeclub/OpenPype/pull/3135) +- Nuke: fixing default settings for workfile builder loaders [\#3120](https://github.com/pypeclub/OpenPype/pull/3120) +- Nuke: fix anatomy imageio regex default [\#3119](https://github.com/pypeclub/OpenPype/pull/3119) +- General: Python 3 compatibility in queries [\#3112](https://github.com/pypeclub/OpenPype/pull/3112) +- General: TemplateResult can be copied [\#3099](https://github.com/pypeclub/OpenPype/pull/3099) +- General: Collect loaded versions skips not existing representations [\#3095](https://github.com/pypeclub/OpenPype/pull/3095) +- RoyalRender Control Submission - AVALON\_APP\_NAME default [\#3091](https://github.com/pypeclub/OpenPype/pull/3091) +- Ftrack: Update Create Folders action [\#3089](https://github.com/pypeclub/OpenPype/pull/3089) +- Maya: Collect Render fix any render cameras check [\#3088](https://github.com/pypeclub/OpenPype/pull/3088) +- Project Manager: Avoid unnecessary updates of asset documents [\#3083](https://github.com/pypeclub/OpenPype/pull/3083) +- Standalone publisher: Fix plugins install [\#3077](https://github.com/pypeclub/OpenPype/pull/3077) +- General: Extract review sequence is not converted with same names [\#3076](https://github.com/pypeclub/OpenPype/pull/3076) +- Webpublisher: Use variant value [\#3068](https://github.com/pypeclub/OpenPype/pull/3068) +- Nuke: Add aov matching even for remainder and prerender [\#3060](https://github.com/pypeclub/OpenPype/pull/3060) +- Fix support for Renderman in Maya [\#3006](https://github.com/pypeclub/OpenPype/pull/3006) + +**🔀 Refactored code** + +- Avalon repo removed from Jobs workflow [\#3193](https://github.com/pypeclub/OpenPype/pull/3193) +- General: Remove remaining imports from avalon [\#3130](https://github.com/pypeclub/OpenPype/pull/3130) +- General: Move mongo db logic and remove avalon repository [\#3066](https://github.com/pypeclub/OpenPype/pull/3066) +- General: Move host install [\#3009](https://github.com/pypeclub/OpenPype/pull/3009) + +**Merged pull requests:** + +- Harmony: message length in 21.1 [\#3257](https://github.com/pypeclub/OpenPype/pull/3257) +- Harmony: 21.1 fix [\#3249](https://github.com/pypeclub/OpenPype/pull/3249) +- Maya: added jpg to filter for Image Plane Loader [\#3223](https://github.com/pypeclub/OpenPype/pull/3223) +- Webpublisher: replace space by underscore in subset names [\#3160](https://github.com/pypeclub/OpenPype/pull/3160) +- StandalonePublisher: removed Extract Background plugins [\#3093](https://github.com/pypeclub/OpenPype/pull/3093) +- Nuke: added suspend\_publish knob [\#3078](https://github.com/pypeclub/OpenPype/pull/3078) +- Bump async from 2.6.3 to 2.6.4 in /website [\#3065](https://github.com/pypeclub/OpenPype/pull/3065) +- SiteSync: Download all workfile inputs [\#2966](https://github.com/pypeclub/OpenPype/pull/2966) +- Photoshop: New Publisher [\#2933](https://github.com/pypeclub/OpenPype/pull/2933) +- Bump pillow from 9.0.0 to 9.0.1 [\#2880](https://github.com/pypeclub/OpenPype/pull/2880) +- AfterEffects: Allow configuration of default variant via Settings [\#2856](https://github.com/pypeclub/OpenPype/pull/2856) + +## [3.9.8](https://github.com/pypeclub/OpenPype/tree/3.9.8) (2022-05-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.7...3.9.8) + +## [3.9.7](https://github.com/pypeclub/OpenPype/tree/3.9.7) (2022-05-11) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.6...3.9.7) + +## [3.9.6](https://github.com/pypeclub/OpenPype/tree/3.9.6) (2022-05-03) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.5...3.9.6) + +## [3.9.5](https://github.com/pypeclub/OpenPype/tree/3.9.5) (2022-04-25) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.4...3.9.5) + +## [3.9.4](https://github.com/pypeclub/OpenPype/tree/3.9.4) (2022-04-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.3...3.9.4) + +### 📖 Documentation + +- Documentation: more info about Tasks [\#3062](https://github.com/pypeclub/OpenPype/pull/3062) +- Documentation: Python requirements to 3.7.9 [\#3035](https://github.com/pypeclub/OpenPype/pull/3035) +- Website Docs: Remove unused pages [\#2974](https://github.com/pypeclub/OpenPype/pull/2974) + +**🆕 New features** + +- General: Local overrides for environment variables [\#3045](https://github.com/pypeclub/OpenPype/pull/3045) +- Flame: Flare integration preparation [\#2928](https://github.com/pypeclub/OpenPype/pull/2928) + +**🚀 Enhancements** + +- TVPaint: Added init file for worker to triggers missing sound file dialog [\#3053](https://github.com/pypeclub/OpenPype/pull/3053) +- Ftrack: Custom attributes can be filled in slate values [\#3036](https://github.com/pypeclub/OpenPype/pull/3036) +- Resolve environment variable in google drive credential path [\#3008](https://github.com/pypeclub/OpenPype/pull/3008) + +**🐛 Bug fixes** + +- GitHub: Updated push-protected action in github workflow [\#3064](https://github.com/pypeclub/OpenPype/pull/3064) +- Nuke: Typos in imports from Nuke implementation [\#3061](https://github.com/pypeclub/OpenPype/pull/3061) +- Hotfix: fixing deadline job publishing [\#3059](https://github.com/pypeclub/OpenPype/pull/3059) +- General: Extract Review handle invalid characters for ffmpeg [\#3050](https://github.com/pypeclub/OpenPype/pull/3050) +- Slate Review: Support to keep format on slate concatenation [\#3049](https://github.com/pypeclub/OpenPype/pull/3049) +- Webpublisher: fix processing of workfile [\#3048](https://github.com/pypeclub/OpenPype/pull/3048) +- Ftrack: Integrate ftrack api fix [\#3044](https://github.com/pypeclub/OpenPype/pull/3044) +- Webpublisher - removed wrong hardcoded family [\#3043](https://github.com/pypeclub/OpenPype/pull/3043) +- LibraryLoader: Use current project for asset query in families filter [\#3042](https://github.com/pypeclub/OpenPype/pull/3042) +- SiteSync: Providers ignore that site is disabled [\#3041](https://github.com/pypeclub/OpenPype/pull/3041) +- Unreal: Creator import fixes [\#3040](https://github.com/pypeclub/OpenPype/pull/3040) +- SiteSync: fix transitive alternate sites, fix dropdown in Local Settings [\#3018](https://github.com/pypeclub/OpenPype/pull/3018) +- Maya: invalid review flag on rendered AOVs [\#2915](https://github.com/pypeclub/OpenPype/pull/2915) + +**Merged pull requests:** + +- Deadline: reworked pools assignment [\#3051](https://github.com/pypeclub/OpenPype/pull/3051) +- Houdini: Avoid ImportError on `hdefereval` when Houdini runs without UI [\#2987](https://github.com/pypeclub/OpenPype/pull/2987) + +## [3.9.3](https://github.com/pypeclub/OpenPype/tree/3.9.3) (2022-04-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.2...3.9.3) + +### 📖 Documentation + +- Documentation: Added mention of adding My Drive as a root [\#2999](https://github.com/pypeclub/OpenPype/pull/2999) +- Website Docs: Manager Ftrack fix broken links [\#2979](https://github.com/pypeclub/OpenPype/pull/2979) +- Docs: Added MongoDB requirements [\#2951](https://github.com/pypeclub/OpenPype/pull/2951) +- Documentation: New publisher develop docs [\#2896](https://github.com/pypeclub/OpenPype/pull/2896) + +**🆕 New features** + +- Ftrack: Add description integrator [\#3027](https://github.com/pypeclub/OpenPype/pull/3027) +- nuke: bypass baking [\#2992](https://github.com/pypeclub/OpenPype/pull/2992) +- Publishing textures for Unreal [\#2988](https://github.com/pypeclub/OpenPype/pull/2988) +- Maya to Unreal: Static and Skeletal Meshes [\#2978](https://github.com/pypeclub/OpenPype/pull/2978) +- Multiverse: Initial Support [\#2908](https://github.com/pypeclub/OpenPype/pull/2908) + +**🚀 Enhancements** + +- General: default workfile subset name for workfile [\#3011](https://github.com/pypeclub/OpenPype/pull/3011) +- Ftrack: Add more options for note text of integrate ftrack note [\#3025](https://github.com/pypeclub/OpenPype/pull/3025) +- Console Interpreter: Changed how console splitter size are reused on show [\#3016](https://github.com/pypeclub/OpenPype/pull/3016) +- Deadline: Use more suitable name for sequence review logic [\#3015](https://github.com/pypeclub/OpenPype/pull/3015) +- Nuke: add concurrency attr to deadline job [\#3005](https://github.com/pypeclub/OpenPype/pull/3005) +- Photoshop: create image without instance [\#3001](https://github.com/pypeclub/OpenPype/pull/3001) +- TVPaint: Render scene family [\#3000](https://github.com/pypeclub/OpenPype/pull/3000) +- Deadline: priority configurable in Maya jobs [\#2995](https://github.com/pypeclub/OpenPype/pull/2995) +- Nuke: ReviewDataMov Read RAW attribute [\#2985](https://github.com/pypeclub/OpenPype/pull/2985) +- General: `METADATA_KEYS` constant as `frozenset` for optimal immutable lookup [\#2980](https://github.com/pypeclub/OpenPype/pull/2980) +- General: Tools with host filters [\#2975](https://github.com/pypeclub/OpenPype/pull/2975) +- Hero versions: Use custom templates [\#2967](https://github.com/pypeclub/OpenPype/pull/2967) +- Slack: Added configurable maximum file size of review upload to Slack [\#2945](https://github.com/pypeclub/OpenPype/pull/2945) +- NewPublisher: Prepared implementation of optional pyblish plugin [\#2943](https://github.com/pypeclub/OpenPype/pull/2943) +- TVPaint: Extractor to convert PNG into EXR [\#2942](https://github.com/pypeclub/OpenPype/pull/2942) +- Workfiles tool: Save as published workfiles [\#2937](https://github.com/pypeclub/OpenPype/pull/2937) +- Workfiles: Open published workfiles [\#2925](https://github.com/pypeclub/OpenPype/pull/2925) +- General: Default modules loaded dynamically [\#2923](https://github.com/pypeclub/OpenPype/pull/2923) +- CI: change the version bump logic [\#2919](https://github.com/pypeclub/OpenPype/pull/2919) +- Deadline: Add headless argument [\#2916](https://github.com/pypeclub/OpenPype/pull/2916) +- Nuke: Add no-audio Tag [\#2911](https://github.com/pypeclub/OpenPype/pull/2911) +- Ftrack: Fill workfile in custom attribute [\#2906](https://github.com/pypeclub/OpenPype/pull/2906) +- Nuke: improving readability [\#2903](https://github.com/pypeclub/OpenPype/pull/2903) +- Settings UI: Add simple tooltips for settings entities [\#2901](https://github.com/pypeclub/OpenPype/pull/2901) + +**🐛 Bug fixes** + +- General: Fix validate asset docs plug-in filename and class name [\#3029](https://github.com/pypeclub/OpenPype/pull/3029) +- Deadline: Fixed default value of use sequence for review [\#3033](https://github.com/pypeclub/OpenPype/pull/3033) +- Settings UI: Version column can be extended so version are visible [\#3032](https://github.com/pypeclub/OpenPype/pull/3032) +- General: Fix import after movements [\#3028](https://github.com/pypeclub/OpenPype/pull/3028) +- Harmony: Added creating subset name for workfile from template [\#3024](https://github.com/pypeclub/OpenPype/pull/3024) +- AfterEffects: Added creating subset name for workfile from template [\#3023](https://github.com/pypeclub/OpenPype/pull/3023) +- General: Add example addons to ignored [\#3022](https://github.com/pypeclub/OpenPype/pull/3022) +- Maya: Remove missing import [\#3017](https://github.com/pypeclub/OpenPype/pull/3017) +- Ftrack: multiple reviewable componets [\#3012](https://github.com/pypeclub/OpenPype/pull/3012) +- Tray publisher: Fixes after code movement [\#3010](https://github.com/pypeclub/OpenPype/pull/3010) +- Hosts: Remove path existence checks in 'add\_implementation\_envs' [\#3004](https://github.com/pypeclub/OpenPype/pull/3004) +- Nuke: fixing unicode type detection in effect loaders [\#3002](https://github.com/pypeclub/OpenPype/pull/3002) +- Fix - remove doubled dot in workfile created from template [\#2998](https://github.com/pypeclub/OpenPype/pull/2998) +- Nuke: removing redundant Ftrack asset when farm publishing [\#2996](https://github.com/pypeclub/OpenPype/pull/2996) +- PS: fix renaming subset incorrectly in PS [\#2991](https://github.com/pypeclub/OpenPype/pull/2991) +- Fix: Disable setuptools auto discovery [\#2990](https://github.com/pypeclub/OpenPype/pull/2990) +- AEL: fix opening existing workfile if no scene opened [\#2989](https://github.com/pypeclub/OpenPype/pull/2989) +- Maya: Don't do hardlinks on windows for look publishing [\#2986](https://github.com/pypeclub/OpenPype/pull/2986) +- Settings UI: Fix version completer on linux [\#2981](https://github.com/pypeclub/OpenPype/pull/2981) +- Photoshop: Fix creation of subset names in PS review and workfile [\#2969](https://github.com/pypeclub/OpenPype/pull/2969) +- Slack: Added default for review\_upload\_limit for Slack [\#2965](https://github.com/pypeclub/OpenPype/pull/2965) +- General: OIIO conversion for ffmeg can handle sequences [\#2958](https://github.com/pypeclub/OpenPype/pull/2958) +- Settings: Conditional dictionary avoid invalid logs [\#2956](https://github.com/pypeclub/OpenPype/pull/2956) +- General: Smaller fixes and typos [\#2950](https://github.com/pypeclub/OpenPype/pull/2950) +- LogViewer: Don't refresh on initialization [\#2949](https://github.com/pypeclub/OpenPype/pull/2949) +- nuke: python3 compatibility issue with `iteritems` [\#2948](https://github.com/pypeclub/OpenPype/pull/2948) +- General: anatomy data with correct task short key [\#2947](https://github.com/pypeclub/OpenPype/pull/2947) +- SceneInventory: Fix imports in UI [\#2944](https://github.com/pypeclub/OpenPype/pull/2944) +- Slack: add generic exception [\#2941](https://github.com/pypeclub/OpenPype/pull/2941) +- General: Python specific vendor paths on env injection [\#2939](https://github.com/pypeclub/OpenPype/pull/2939) +- General: More fail safe delete old versions [\#2936](https://github.com/pypeclub/OpenPype/pull/2936) +- Settings UI: Collapsed of collapsible wrapper works as expected [\#2934](https://github.com/pypeclub/OpenPype/pull/2934) +- Maya: Do not pass `set` to maya commands \(fixes support for older maya versions\) [\#2932](https://github.com/pypeclub/OpenPype/pull/2932) +- General: Don't print log record on OSError [\#2926](https://github.com/pypeclub/OpenPype/pull/2926) +- Hiero: Fix import of 'register\_event\_callback' [\#2924](https://github.com/pypeclub/OpenPype/pull/2924) +- Flame: centos related debugging [\#2922](https://github.com/pypeclub/OpenPype/pull/2922) +- Ftrack: Missing Ftrack id after editorial publish [\#2905](https://github.com/pypeclub/OpenPype/pull/2905) +- AfterEffects: Fix rendering for single frame in DL [\#2875](https://github.com/pypeclub/OpenPype/pull/2875) + +**🔀 Refactored code** + +- General: Move plugins register and discover [\#2935](https://github.com/pypeclub/OpenPype/pull/2935) +- General: Move Attribute Definitions from pipeline [\#2931](https://github.com/pypeclub/OpenPype/pull/2931) +- General: Removed silo references and terminal splash [\#2927](https://github.com/pypeclub/OpenPype/pull/2927) +- General: Move pipeline constants to OpenPype [\#2918](https://github.com/pypeclub/OpenPype/pull/2918) +- General: Move formatting and workfile functions [\#2914](https://github.com/pypeclub/OpenPype/pull/2914) +- General: Move remaining plugins from avalon [\#2912](https://github.com/pypeclub/OpenPype/pull/2912) + +**Merged pull requests:** + +- Maya: Allow to select invalid camera contents if no cameras found [\#3030](https://github.com/pypeclub/OpenPype/pull/3030) +- Bump paramiko from 2.9.2 to 2.10.1 [\#2973](https://github.com/pypeclub/OpenPype/pull/2973) +- Bump minimist from 1.2.5 to 1.2.6 in /website [\#2954](https://github.com/pypeclub/OpenPype/pull/2954) +- Bump node-forge from 1.2.1 to 1.3.0 in /website [\#2953](https://github.com/pypeclub/OpenPype/pull/2953) +- Maya - added transparency into review creator [\#2952](https://github.com/pypeclub/OpenPype/pull/2952) + +## [3.9.2](https://github.com/pypeclub/OpenPype/tree/3.9.2) (2022-04-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.1...3.9.2) + +## [3.9.1](https://github.com/pypeclub/OpenPype/tree/3.9.1) (2022-03-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.9.0...3.9.1) + +**🚀 Enhancements** + +- General: Change how OPENPYPE\_DEBUG value is handled [\#2907](https://github.com/pypeclub/OpenPype/pull/2907) +- nuke: imageio adding ocio config version 1.2 [\#2897](https://github.com/pypeclub/OpenPype/pull/2897) +- Flame: support for comment with xml attribute overrides [\#2892](https://github.com/pypeclub/OpenPype/pull/2892) +- Nuke: ExtractReviewSlate can handle more codes and profiles [\#2879](https://github.com/pypeclub/OpenPype/pull/2879) +- Flame: sequence used for reference video [\#2869](https://github.com/pypeclub/OpenPype/pull/2869) + +**🐛 Bug fixes** + +- General: Fix use of Anatomy roots [\#2904](https://github.com/pypeclub/OpenPype/pull/2904) +- Fixing gap detection in extract review [\#2902](https://github.com/pypeclub/OpenPype/pull/2902) +- Pyblish Pype - ensure current state is correct when entering new group order [\#2899](https://github.com/pypeclub/OpenPype/pull/2899) +- SceneInventory: Fix import of load function [\#2894](https://github.com/pypeclub/OpenPype/pull/2894) +- Harmony - fixed creator issue [\#2891](https://github.com/pypeclub/OpenPype/pull/2891) +- General: Remove forgotten use of avalon Creator [\#2885](https://github.com/pypeclub/OpenPype/pull/2885) +- General: Avoid circular import [\#2884](https://github.com/pypeclub/OpenPype/pull/2884) +- Fixes for attaching loaded containers \(\#2837\) [\#2874](https://github.com/pypeclub/OpenPype/pull/2874) +- Maya: Deformer node ids validation plugin [\#2826](https://github.com/pypeclub/OpenPype/pull/2826) +- Flame Babypublisher optimalization [\#2806](https://github.com/pypeclub/OpenPype/pull/2806) +- hotfix: OIIO tool path - add extension on windows [\#2618](https://github.com/pypeclub/OpenPype/pull/2618) + +**🔀 Refactored code** + +- General: Reduce style usage to OpenPype repository [\#2889](https://github.com/pypeclub/OpenPype/pull/2889) +- General: Move loader logic from avalon to openpype [\#2886](https://github.com/pypeclub/OpenPype/pull/2886) + +## [3.9.0](https://github.com/pypeclub/OpenPype/tree/3.9.0) (2022-03-14) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.2...3.9.0) + +**Deprecated:** + +- Houdini: Remove unused code [\#2779](https://github.com/pypeclub/OpenPype/pull/2779) +- Loader: Remove default family states for hosts from code [\#2706](https://github.com/pypeclub/OpenPype/pull/2706) +- AssetCreator: Remove the tool [\#2845](https://github.com/pypeclub/OpenPype/pull/2845) + +### 📖 Documentation + +- Documentation: fixed broken links [\#2799](https://github.com/pypeclub/OpenPype/pull/2799) +- Documentation: broken link fix [\#2785](https://github.com/pypeclub/OpenPype/pull/2785) +- Documentation: link fixes [\#2772](https://github.com/pypeclub/OpenPype/pull/2772) +- Update docusaurus to latest version [\#2760](https://github.com/pypeclub/OpenPype/pull/2760) +- Various testing updates [\#2726](https://github.com/pypeclub/OpenPype/pull/2726) +- documentation: add example to `repack-version` command [\#2669](https://github.com/pypeclub/OpenPype/pull/2669) +- Update docusaurus [\#2639](https://github.com/pypeclub/OpenPype/pull/2639) +- Documentation: Fixed relative links [\#2621](https://github.com/pypeclub/OpenPype/pull/2621) +- Documentation: Change Photoshop & AfterEffects plugin path [\#2878](https://github.com/pypeclub/OpenPype/pull/2878) + +**🆕 New features** + +- Flame: loading clips to reels [\#2622](https://github.com/pypeclub/OpenPype/pull/2622) +- General: Store settings by OpenPype version [\#2570](https://github.com/pypeclub/OpenPype/pull/2570) + +**🚀 Enhancements** + +- New: Validation exceptions [\#2841](https://github.com/pypeclub/OpenPype/pull/2841) +- General: Set context environments for non host applications [\#2803](https://github.com/pypeclub/OpenPype/pull/2803) +- Houdini: Remove duplicate ValidateOutputNode plug-in [\#2780](https://github.com/pypeclub/OpenPype/pull/2780) +- Tray publisher: New Tray Publisher host \(beta\) [\#2778](https://github.com/pypeclub/OpenPype/pull/2778) +- Slack: Added regex for filtering on subset names [\#2775](https://github.com/pypeclub/OpenPype/pull/2775) +- Houdini: Implement Reset Frame Range [\#2770](https://github.com/pypeclub/OpenPype/pull/2770) +- Pyblish Pype: Remove redundant new line in installed fonts printing [\#2758](https://github.com/pypeclub/OpenPype/pull/2758) +- Flame: use Shot Name on segment for asset name [\#2751](https://github.com/pypeclub/OpenPype/pull/2751) +- Flame: adding validator source clip [\#2746](https://github.com/pypeclub/OpenPype/pull/2746) +- Work Files: Preserve subversion comment of current filename by default [\#2734](https://github.com/pypeclub/OpenPype/pull/2734) +- Maya: set Deadline job/batch name to original source workfile name instead of published workfile [\#2733](https://github.com/pypeclub/OpenPype/pull/2733) +- Ftrack: Disable ftrack module by default [\#2732](https://github.com/pypeclub/OpenPype/pull/2732) +- Project Manager: Disable add task, add asset and save button when not in a project [\#2727](https://github.com/pypeclub/OpenPype/pull/2727) +- dropbox handle big file [\#2718](https://github.com/pypeclub/OpenPype/pull/2718) +- Fusion Move PR: Minor tweaks to Fusion integration [\#2716](https://github.com/pypeclub/OpenPype/pull/2716) +- RoyalRender: Minor enhancements [\#2700](https://github.com/pypeclub/OpenPype/pull/2700) +- Nuke: prerender with review knob [\#2691](https://github.com/pypeclub/OpenPype/pull/2691) +- Maya configurable unit validator [\#2680](https://github.com/pypeclub/OpenPype/pull/2680) +- General: Add settings for CleanUpFarm and disable the plugin by default [\#2679](https://github.com/pypeclub/OpenPype/pull/2679) +- Project Manager: Only allow scroll wheel edits when spinbox is active [\#2678](https://github.com/pypeclub/OpenPype/pull/2678) +- Ftrack: Sync description to assets [\#2670](https://github.com/pypeclub/OpenPype/pull/2670) +- Houdini: Moved to OpenPype [\#2658](https://github.com/pypeclub/OpenPype/pull/2658) +- Maya: Move implementation to OpenPype [\#2649](https://github.com/pypeclub/OpenPype/pull/2649) +- General: FFmpeg conversion also check attribute string length [\#2635](https://github.com/pypeclub/OpenPype/pull/2635) +- Houdini: Load Arnold .ass procedurals into Houdini [\#2606](https://github.com/pypeclub/OpenPype/pull/2606) +- Deadline: Simplify GlobalJobPreLoad logic [\#2605](https://github.com/pypeclub/OpenPype/pull/2605) +- Houdini: Implement Arnold .ass standin extraction from Houdini \(also support .ass.gz\) [\#2603](https://github.com/pypeclub/OpenPype/pull/2603) +- New Publisher: New features and preparations for new standalone publisher [\#2556](https://github.com/pypeclub/OpenPype/pull/2556) +- Fix Maya 2022 Python 3 compatibility [\#2445](https://github.com/pypeclub/OpenPype/pull/2445) +- TVPaint: Use new publisher exceptions in validators [\#2435](https://github.com/pypeclub/OpenPype/pull/2435) +- Harmony: Added new style validations for New Publisher [\#2434](https://github.com/pypeclub/OpenPype/pull/2434) +- Aftereffects: New style validations for New publisher [\#2430](https://github.com/pypeclub/OpenPype/pull/2430) +- Farm publishing: New cleanup plugin for Maya renders on farm [\#2390](https://github.com/pypeclub/OpenPype/pull/2390) +- General: Subset name filtering in ExtractReview outpus [\#2872](https://github.com/pypeclub/OpenPype/pull/2872) +- NewPublisher: Descriptions and Icons in creator dialog [\#2867](https://github.com/pypeclub/OpenPype/pull/2867) +- NewPublisher: Changing task on publishing instance [\#2863](https://github.com/pypeclub/OpenPype/pull/2863) +- TrayPublisher: Choose project widget is more clear [\#2859](https://github.com/pypeclub/OpenPype/pull/2859) +- Maya: add loaded containers to published instance [\#2837](https://github.com/pypeclub/OpenPype/pull/2837) +- Ftrack: Can sync fps as string [\#2836](https://github.com/pypeclub/OpenPype/pull/2836) +- General: Custom function for find executable [\#2822](https://github.com/pypeclub/OpenPype/pull/2822) +- General: Color dialog UI fixes [\#2817](https://github.com/pypeclub/OpenPype/pull/2817) +- global: letter box calculated on output as last process [\#2812](https://github.com/pypeclub/OpenPype/pull/2812) +- Nuke: adding Reformat to baking mov plugin [\#2811](https://github.com/pypeclub/OpenPype/pull/2811) +- Manager: Update all to latest button [\#2805](https://github.com/pypeclub/OpenPype/pull/2805) +- Houdini: Move Houdini Save Current File to beginning of ExtractorOrder [\#2747](https://github.com/pypeclub/OpenPype/pull/2747) +- Global: adding studio name/code to anatomy template formatting data [\#2630](https://github.com/pypeclub/OpenPype/pull/2630) + +**🐛 Bug fixes** + +- Settings UI: Search case sensitivity [\#2810](https://github.com/pypeclub/OpenPype/pull/2810) +- resolve: fixing fusion module loading [\#2802](https://github.com/pypeclub/OpenPype/pull/2802) +- Ftrack: Unset task ids from asset versions before tasks are removed [\#2800](https://github.com/pypeclub/OpenPype/pull/2800) +- Slack: fail gracefully if slack exception [\#2798](https://github.com/pypeclub/OpenPype/pull/2798) +- Flame: Fix version string in default settings [\#2783](https://github.com/pypeclub/OpenPype/pull/2783) +- After Effects: Fix typo in name `afftereffects` -\> `aftereffects` [\#2768](https://github.com/pypeclub/OpenPype/pull/2768) +- Houdini: Fix open last workfile [\#2767](https://github.com/pypeclub/OpenPype/pull/2767) +- Avoid renaming udim indexes [\#2765](https://github.com/pypeclub/OpenPype/pull/2765) +- Maya: Fix `unique_namespace` when in an namespace that is empty [\#2759](https://github.com/pypeclub/OpenPype/pull/2759) +- Loader UI: Fix right click in representation widget [\#2757](https://github.com/pypeclub/OpenPype/pull/2757) +- Harmony: Rendering in Deadline didn't work in other machines than submitter [\#2754](https://github.com/pypeclub/OpenPype/pull/2754) +- Aftereffects 2022 and Deadline [\#2748](https://github.com/pypeclub/OpenPype/pull/2748) +- Flame: bunch of bugs [\#2745](https://github.com/pypeclub/OpenPype/pull/2745) +- Maya: Save current scene on workfile publish [\#2744](https://github.com/pypeclub/OpenPype/pull/2744) +- Version Up: Preserve parts of filename after version number \(like subversion\) on version\_up [\#2741](https://github.com/pypeclub/OpenPype/pull/2741) +- Loader UI: Multiple asset selection and underline colors fixed [\#2731](https://github.com/pypeclub/OpenPype/pull/2731) +- General: Fix loading of unused chars in xml format [\#2729](https://github.com/pypeclub/OpenPype/pull/2729) +- TVPaint: Set objectName with members [\#2725](https://github.com/pypeclub/OpenPype/pull/2725) +- General: Don't use 'objectName' from loaded references [\#2715](https://github.com/pypeclub/OpenPype/pull/2715) +- Settings: Studio Project anatomy is queried using right keys [\#2711](https://github.com/pypeclub/OpenPype/pull/2711) +- Local Settings: Additional applications don't break UI [\#2710](https://github.com/pypeclub/OpenPype/pull/2710) +- Maya: Remove some unused code [\#2709](https://github.com/pypeclub/OpenPype/pull/2709) +- Houdini: Fix refactor of Houdini host move for CreateArnoldAss [\#2704](https://github.com/pypeclub/OpenPype/pull/2704) +- LookAssigner: Fix imports after moving code to OpenPype repository [\#2701](https://github.com/pypeclub/OpenPype/pull/2701) +- Multiple hosts: unify menu style across hosts [\#2693](https://github.com/pypeclub/OpenPype/pull/2693) +- Maya Redshift fixes [\#2692](https://github.com/pypeclub/OpenPype/pull/2692) +- Maya: fix fps validation popup [\#2685](https://github.com/pypeclub/OpenPype/pull/2685) +- Houdini Explicitly collect correct frame name even in case of single frame render when `frameStart` is provided [\#2676](https://github.com/pypeclub/OpenPype/pull/2676) +- hiero: fix effect collector name and order [\#2673](https://github.com/pypeclub/OpenPype/pull/2673) +- Maya: Fix menu callbacks [\#2671](https://github.com/pypeclub/OpenPype/pull/2671) +- hiero: removing obsolete unsupported plugin [\#2667](https://github.com/pypeclub/OpenPype/pull/2667) +- Launcher: Fix access to 'data' attribute on actions [\#2659](https://github.com/pypeclub/OpenPype/pull/2659) +- Maya `vrscene` loader fixes [\#2633](https://github.com/pypeclub/OpenPype/pull/2633) +- Houdini: fix usd family in loader and integrators [\#2631](https://github.com/pypeclub/OpenPype/pull/2631) +- Maya: Add only reference node to look family container like with other families [\#2508](https://github.com/pypeclub/OpenPype/pull/2508) +- General: Missing time function [\#2877](https://github.com/pypeclub/OpenPype/pull/2877) +- Deadline: Fix plugin name for tile assemble [\#2868](https://github.com/pypeclub/OpenPype/pull/2868) +- Nuke: gizmo precollect fix [\#2866](https://github.com/pypeclub/OpenPype/pull/2866) +- General: Fix hardlink for windows [\#2864](https://github.com/pypeclub/OpenPype/pull/2864) +- General: ffmpeg was crashing on slate merge [\#2860](https://github.com/pypeclub/OpenPype/pull/2860) +- WebPublisher: Video file was published with one too many frame [\#2858](https://github.com/pypeclub/OpenPype/pull/2858) +- New Publisher: Error dialog got right styles [\#2857](https://github.com/pypeclub/OpenPype/pull/2857) +- General: Fix getattr clalback on dynamic modules [\#2855](https://github.com/pypeclub/OpenPype/pull/2855) +- Nuke: slate resolution to input video resolution [\#2853](https://github.com/pypeclub/OpenPype/pull/2853) +- WebPublisher: Fix username stored in DB [\#2852](https://github.com/pypeclub/OpenPype/pull/2852) +- WebPublisher: Fix wrong number of frames for video file [\#2851](https://github.com/pypeclub/OpenPype/pull/2851) +- Nuke: Fix family test in validate\_write\_legacy to work with stillImage [\#2847](https://github.com/pypeclub/OpenPype/pull/2847) +- Nuke: fix multiple baking profile farm publishing [\#2842](https://github.com/pypeclub/OpenPype/pull/2842) +- Blender: Fixed parameters for FBX export of the camera [\#2840](https://github.com/pypeclub/OpenPype/pull/2840) +- Maya: Stop creation of reviews for Cryptomattes [\#2832](https://github.com/pypeclub/OpenPype/pull/2832) +- Deadline: Remove recreated event [\#2828](https://github.com/pypeclub/OpenPype/pull/2828) +- Deadline: Added missing events folder [\#2827](https://github.com/pypeclub/OpenPype/pull/2827) +- Settings: Missing document with OP versions may break start of OpenPype [\#2825](https://github.com/pypeclub/OpenPype/pull/2825) +- Deadline: more detailed temp file name for environment json [\#2824](https://github.com/pypeclub/OpenPype/pull/2824) +- General: Host name was formed from obsolete code [\#2821](https://github.com/pypeclub/OpenPype/pull/2821) +- Settings UI: Fix "Apply from" action [\#2820](https://github.com/pypeclub/OpenPype/pull/2820) +- Ftrack: Job killer with missing user [\#2819](https://github.com/pypeclub/OpenPype/pull/2819) +- Nuke: Use AVALON\_APP to get value for "app" key [\#2818](https://github.com/pypeclub/OpenPype/pull/2818) +- StandalonePublisher: use dynamic groups in subset names [\#2816](https://github.com/pypeclub/OpenPype/pull/2816) + +**🔀 Refactored code** + +- Ftrack: Moved module one hierarchy level higher [\#2792](https://github.com/pypeclub/OpenPype/pull/2792) +- SyncServer: Moved module one hierarchy level higher [\#2791](https://github.com/pypeclub/OpenPype/pull/2791) +- Royal render: Move module one hierarchy level higher [\#2790](https://github.com/pypeclub/OpenPype/pull/2790) +- Deadline: Move module one hierarchy level higher [\#2789](https://github.com/pypeclub/OpenPype/pull/2789) +- Refactor: move webserver tool to openpype [\#2876](https://github.com/pypeclub/OpenPype/pull/2876) +- General: Move create logic from avalon to OpenPype [\#2854](https://github.com/pypeclub/OpenPype/pull/2854) +- General: Add vendors from avalon [\#2848](https://github.com/pypeclub/OpenPype/pull/2848) +- General: Basic event system [\#2846](https://github.com/pypeclub/OpenPype/pull/2846) +- General: Move change context functions [\#2839](https://github.com/pypeclub/OpenPype/pull/2839) +- Tools: Don't use avalon tools code [\#2829](https://github.com/pypeclub/OpenPype/pull/2829) +- Move Unreal Implementation to OpenPype [\#2823](https://github.com/pypeclub/OpenPype/pull/2823) +- General: Extract template formatting from anatomy [\#2766](https://github.com/pypeclub/OpenPype/pull/2766) + +**Merged pull requests:** + +- Fusion: Moved implementation into OpenPype [\#2713](https://github.com/pypeclub/OpenPype/pull/2713) +- TVPaint: Plugin build without dependencies [\#2705](https://github.com/pypeclub/OpenPype/pull/2705) +- Webpublisher: Photoshop create a beauty png [\#2689](https://github.com/pypeclub/OpenPype/pull/2689) +- Ftrack: Hierarchical attributes are queried properly [\#2682](https://github.com/pypeclub/OpenPype/pull/2682) +- Maya: Add Validate Frame Range settings [\#2661](https://github.com/pypeclub/OpenPype/pull/2661) +- Harmony: move to Openpype [\#2657](https://github.com/pypeclub/OpenPype/pull/2657) +- Maya: cleanup duplicate rendersetup code [\#2642](https://github.com/pypeclub/OpenPype/pull/2642) +- Deadline: Be able to pass Mongo url to job [\#2616](https://github.com/pypeclub/OpenPype/pull/2616) + +## [3.8.2](https://github.com/pypeclub/OpenPype/tree/3.8.2) (2022-02-07) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.1...3.8.2) + +### 📖 Documentation + +- Cosmetics: Fix common typos in openpype/website [\#2617](https://github.com/pypeclub/OpenPype/pull/2617) + +**🚀 Enhancements** + +- TVPaint: Image loaders also work on review family [\#2638](https://github.com/pypeclub/OpenPype/pull/2638) +- General: Project backup tools [\#2629](https://github.com/pypeclub/OpenPype/pull/2629) +- nuke: adding clear button to write nodes [\#2627](https://github.com/pypeclub/OpenPype/pull/2627) +- Ftrack: Family to Asset type mapping is in settings [\#2602](https://github.com/pypeclub/OpenPype/pull/2602) +- Nuke: load color space from representation data [\#2576](https://github.com/pypeclub/OpenPype/pull/2576) + +**🐛 Bug fixes** + +- Fix pulling of cx\_freeze 6.10 [\#2628](https://github.com/pypeclub/OpenPype/pull/2628) +- Global: fix broken otio review extractor [\#2590](https://github.com/pypeclub/OpenPype/pull/2590) + +**Merged pull requests:** + +- WebPublisher: fix instance duplicates [\#2641](https://github.com/pypeclub/OpenPype/pull/2641) +- Fix - safer pulling of task name for webpublishing from PS [\#2613](https://github.com/pypeclub/OpenPype/pull/2613) + +## [3.8.1](https://github.com/pypeclub/OpenPype/tree/3.8.1) (2022-02-01) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.8.0...3.8.1) + +**🚀 Enhancements** + +- Webpublisher: Thumbnail extractor [\#2600](https://github.com/pypeclub/OpenPype/pull/2600) +- Loader: Allow to toggle default family filters between "include" or "exclude" filtering [\#2541](https://github.com/pypeclub/OpenPype/pull/2541) +- Launcher: Added context menu to to skip opening last workfile [\#2536](https://github.com/pypeclub/OpenPype/pull/2536) +- Unreal: JSON Layout Loading support [\#2066](https://github.com/pypeclub/OpenPype/pull/2066) + +**🐛 Bug fixes** + +- Release/3.8.0 [\#2619](https://github.com/pypeclub/OpenPype/pull/2619) +- Settings: Enum does not store empty string if has single item to select [\#2615](https://github.com/pypeclub/OpenPype/pull/2615) +- switch distutils to sysconfig for `get_platform()` [\#2594](https://github.com/pypeclub/OpenPype/pull/2594) +- Fix poetry index and speedcopy update [\#2589](https://github.com/pypeclub/OpenPype/pull/2589) +- Webpublisher: Fix - subset names from processed .psd used wrong value for task [\#2586](https://github.com/pypeclub/OpenPype/pull/2586) +- `vrscene` creator Deadline webservice URL handling [\#2580](https://github.com/pypeclub/OpenPype/pull/2580) +- global: track name was failing if duplicated root word in name [\#2568](https://github.com/pypeclub/OpenPype/pull/2568) +- Validate Maya Rig produces no cycle errors [\#2484](https://github.com/pypeclub/OpenPype/pull/2484) + +**Merged pull requests:** + +- Bump pillow from 8.4.0 to 9.0.0 [\#2595](https://github.com/pypeclub/OpenPype/pull/2595) +- Webpublisher: Skip version collect [\#2591](https://github.com/pypeclub/OpenPype/pull/2591) +- build\(deps\): bump pillow from 8.4.0 to 9.0.0 [\#2523](https://github.com/pypeclub/OpenPype/pull/2523) + +## [3.8.0](https://github.com/pypeclub/OpenPype/tree/3.8.0) (2022-01-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.7.0...3.8.0) + +### 📖 Documentation + +- Variable in docs renamed to proper name [\#2546](https://github.com/pypeclub/OpenPype/pull/2546) + +**🆕 New features** + +- Flame: extracting segments with trans-coding [\#2547](https://github.com/pypeclub/OpenPype/pull/2547) +- Maya : V-Ray Proxy - load all ABC files via proxy [\#2544](https://github.com/pypeclub/OpenPype/pull/2544) +- Maya to Unreal: Extended static mesh workflow [\#2537](https://github.com/pypeclub/OpenPype/pull/2537) +- Flame: collecting publishable instances [\#2519](https://github.com/pypeclub/OpenPype/pull/2519) +- Flame: create publishable clips [\#2495](https://github.com/pypeclub/OpenPype/pull/2495) +- Flame: OpenTimelineIO Export Modul [\#2398](https://github.com/pypeclub/OpenPype/pull/2398) + +**🚀 Enhancements** + +- Webpublisher: Moved error at the beginning of the log [\#2559](https://github.com/pypeclub/OpenPype/pull/2559) +- Ftrack: Use ApplicationManager to get DJV path [\#2558](https://github.com/pypeclub/OpenPype/pull/2558) +- Webpublisher: Added endpoint to reprocess batch through UI [\#2555](https://github.com/pypeclub/OpenPype/pull/2555) +- Settings: PathInput strip passed string [\#2550](https://github.com/pypeclub/OpenPype/pull/2550) +- Global: Exctract Review anatomy fill data with output name [\#2548](https://github.com/pypeclub/OpenPype/pull/2548) +- Cosmetics: Clean up some cosmetics / typos [\#2542](https://github.com/pypeclub/OpenPype/pull/2542) +- General: Validate if current process OpenPype version is requested version [\#2529](https://github.com/pypeclub/OpenPype/pull/2529) +- General: Be able to use anatomy data in ffmpeg output arguments [\#2525](https://github.com/pypeclub/OpenPype/pull/2525) +- Expose toggle publish plug-in settings for Maya Look Shading Engine Naming [\#2521](https://github.com/pypeclub/OpenPype/pull/2521) +- Photoshop: Move implementation to OpenPype [\#2510](https://github.com/pypeclub/OpenPype/pull/2510) +- TimersManager: Move module one hierarchy higher [\#2501](https://github.com/pypeclub/OpenPype/pull/2501) +- Slack: notifications are sent with Openpype logo and bot name [\#2499](https://github.com/pypeclub/OpenPype/pull/2499) +- Slack: Add review to notification message [\#2498](https://github.com/pypeclub/OpenPype/pull/2498) +- Ftrack: Event handlers settings [\#2496](https://github.com/pypeclub/OpenPype/pull/2496) +- Tools: Fix style and modality of errors in loader and creator [\#2489](https://github.com/pypeclub/OpenPype/pull/2489) +- Maya: Collect 'fps' animation data only for "review" instances [\#2486](https://github.com/pypeclub/OpenPype/pull/2486) +- Project Manager: Remove project button cleanup [\#2482](https://github.com/pypeclub/OpenPype/pull/2482) +- Tools: Be able to change models of tasks and assets widgets [\#2475](https://github.com/pypeclub/OpenPype/pull/2475) +- Publish pype: Reduce publish process defering [\#2464](https://github.com/pypeclub/OpenPype/pull/2464) +- Maya: Improve speed of Collect History logic [\#2460](https://github.com/pypeclub/OpenPype/pull/2460) +- Maya: Validate Rig Controllers - fix Error: in script editor [\#2459](https://github.com/pypeclub/OpenPype/pull/2459) +- Maya: Validate NGONs simplify and speed-up [\#2458](https://github.com/pypeclub/OpenPype/pull/2458) +- Maya: Optimize Validate Locked Normals speed for dense polymeshes [\#2457](https://github.com/pypeclub/OpenPype/pull/2457) +- Maya: Refactor missing \_get\_reference\_node method [\#2455](https://github.com/pypeclub/OpenPype/pull/2455) +- Houdini: Remove broken unique name counter [\#2450](https://github.com/pypeclub/OpenPype/pull/2450) +- Maya: Improve lib.polyConstraint performance when Select tool is not the active tool context [\#2447](https://github.com/pypeclub/OpenPype/pull/2447) +- General: Validate third party before build [\#2425](https://github.com/pypeclub/OpenPype/pull/2425) +- Maya : add option to not group reference in ReferenceLoader [\#2383](https://github.com/pypeclub/OpenPype/pull/2383) + +**🐛 Bug fixes** + +- AfterEffects: Fix - removed obsolete import [\#2577](https://github.com/pypeclub/OpenPype/pull/2577) +- General: OpenPype version updates [\#2575](https://github.com/pypeclub/OpenPype/pull/2575) +- Ftrack: Delete action revision [\#2563](https://github.com/pypeclub/OpenPype/pull/2563) +- Webpublisher: ftrack shows incorrect user names [\#2560](https://github.com/pypeclub/OpenPype/pull/2560) +- General: Do not validate version if build does not support it [\#2557](https://github.com/pypeclub/OpenPype/pull/2557) +- Webpublisher: Fixed progress reporting [\#2553](https://github.com/pypeclub/OpenPype/pull/2553) +- Fix Maya AssProxyLoader version switch [\#2551](https://github.com/pypeclub/OpenPype/pull/2551) +- General: Fix install thread in igniter [\#2549](https://github.com/pypeclub/OpenPype/pull/2549) +- Houdini: vdbcache family preserve frame numbers on publish integration + enable validate version for Houdini [\#2535](https://github.com/pypeclub/OpenPype/pull/2535) +- Maya: Fix Load VDB to V-Ray [\#2533](https://github.com/pypeclub/OpenPype/pull/2533) +- Maya: ReferenceLoader fix not unique group name error for attach to root [\#2532](https://github.com/pypeclub/OpenPype/pull/2532) +- Maya: namespaced context go back to original namespace when started from inside a namespace [\#2531](https://github.com/pypeclub/OpenPype/pull/2531) +- Fix create zip tool - path argument [\#2522](https://github.com/pypeclub/OpenPype/pull/2522) +- Maya: Fix Extract Look with space in names [\#2518](https://github.com/pypeclub/OpenPype/pull/2518) +- Fix published frame content for sequence starting with 0 [\#2513](https://github.com/pypeclub/OpenPype/pull/2513) +- Maya: reset empty string attributes correctly to "" instead of "None" [\#2506](https://github.com/pypeclub/OpenPype/pull/2506) +- Improve FusionPreLaunch hook errors [\#2505](https://github.com/pypeclub/OpenPype/pull/2505) +- General: Settings work if OpenPypeVersion is available [\#2494](https://github.com/pypeclub/OpenPype/pull/2494) +- General: PYTHONPATH may break OpenPype dependencies [\#2493](https://github.com/pypeclub/OpenPype/pull/2493) +- General: Modules import function output fix [\#2492](https://github.com/pypeclub/OpenPype/pull/2492) +- AE: fix hiding of alert window below Publish [\#2491](https://github.com/pypeclub/OpenPype/pull/2491) +- Workfiles tool: Files widget show files on first show [\#2488](https://github.com/pypeclub/OpenPype/pull/2488) +- General: Custom template paths filter fix [\#2483](https://github.com/pypeclub/OpenPype/pull/2483) +- Loader: Remove always on top flag in tray [\#2480](https://github.com/pypeclub/OpenPype/pull/2480) +- General: Anatomy does not return root envs as unicode [\#2465](https://github.com/pypeclub/OpenPype/pull/2465) +- Maya: Validate Shape Zero do not keep fixed geometry vertices selected/active after repair [\#2456](https://github.com/pypeclub/OpenPype/pull/2456) + +**Merged pull requests:** + +- AfterEffects: Move implementation to OpenPype [\#2543](https://github.com/pypeclub/OpenPype/pull/2543) +- Maya: Remove Maya Look Assigner check on startup [\#2540](https://github.com/pypeclub/OpenPype/pull/2540) +- build\(deps\): bump shelljs from 0.8.4 to 0.8.5 in /website [\#2538](https://github.com/pypeclub/OpenPype/pull/2538) +- build\(deps\): bump follow-redirects from 1.14.4 to 1.14.7 in /website [\#2534](https://github.com/pypeclub/OpenPype/pull/2534) +- Nuke: Merge avalon's implementation into OpenPype [\#2514](https://github.com/pypeclub/OpenPype/pull/2514) +- Maya: Vray fix proxies look assignment [\#2392](https://github.com/pypeclub/OpenPype/pull/2392) +- Bump algoliasearch-helper from 3.4.4 to 3.6.2 in /website [\#2297](https://github.com/pypeclub/OpenPype/pull/2297) + +## [3.7.0](https://github.com/pypeclub/OpenPype/tree/3.7.0) (2022-01-04) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.4...3.7.0) + +**Deprecated:** + +- General: Default modules hierarchy n2 [\#2368](https://github.com/pypeclub/OpenPype/pull/2368) + +### 📖 Documentation + +- docs\[website\]: Add Ellipse Studio \(logo\) as an OpenPype contributor [\#2324](https://github.com/pypeclub/OpenPype/pull/2324) + +**🆕 New features** + +- Settings UI use OpenPype styles [\#2296](https://github.com/pypeclub/OpenPype/pull/2296) +- Store typed version dependencies for workfiles [\#2192](https://github.com/pypeclub/OpenPype/pull/2192) +- OpenPypeV3: add key task type, task shortname and user to path templating construction [\#2157](https://github.com/pypeclub/OpenPype/pull/2157) +- Nuke: Alembic model workflow [\#2140](https://github.com/pypeclub/OpenPype/pull/2140) +- TVPaint: Load workfile from published. [\#1980](https://github.com/pypeclub/OpenPype/pull/1980) + +**🚀 Enhancements** + +- General: Workdir extra folders [\#2462](https://github.com/pypeclub/OpenPype/pull/2462) +- Photoshop: New style validations for New publisher [\#2429](https://github.com/pypeclub/OpenPype/pull/2429) +- General: Environment variables groups [\#2424](https://github.com/pypeclub/OpenPype/pull/2424) +- Unreal: Dynamic menu created in Python [\#2422](https://github.com/pypeclub/OpenPype/pull/2422) +- Settings UI: Hyperlinks to settings [\#2420](https://github.com/pypeclub/OpenPype/pull/2420) +- Modules: JobQueue module moved one hierarchy level higher [\#2419](https://github.com/pypeclub/OpenPype/pull/2419) +- TimersManager: Start timer post launch hook [\#2418](https://github.com/pypeclub/OpenPype/pull/2418) +- General: Run applications as separate processes under linux [\#2408](https://github.com/pypeclub/OpenPype/pull/2408) +- Ftrack: Check existence of object type on recreation [\#2404](https://github.com/pypeclub/OpenPype/pull/2404) +- Enhancement: Global cleanup plugin that explicitly remove paths from context [\#2402](https://github.com/pypeclub/OpenPype/pull/2402) +- General: MongoDB ability to specify replica set groups [\#2401](https://github.com/pypeclub/OpenPype/pull/2401) +- Flame: moving `utility_scripts` to api folder also with `scripts` [\#2385](https://github.com/pypeclub/OpenPype/pull/2385) +- Centos 7 dependency compatibility [\#2384](https://github.com/pypeclub/OpenPype/pull/2384) +- Enhancement: Settings: Use project settings values from another project [\#2382](https://github.com/pypeclub/OpenPype/pull/2382) +- Blender 3: Support auto install for new blender version [\#2377](https://github.com/pypeclub/OpenPype/pull/2377) +- Maya add render image path to settings [\#2375](https://github.com/pypeclub/OpenPype/pull/2375) +- Settings: Webpublisher in hosts enum [\#2367](https://github.com/pypeclub/OpenPype/pull/2367) +- Hiero: python3 compatibility [\#2365](https://github.com/pypeclub/OpenPype/pull/2365) +- Burnins: Be able recognize mxf OPAtom format [\#2361](https://github.com/pypeclub/OpenPype/pull/2361) +- Maya: Add is\_static\_image\_plane and is\_in\_all\_views option in imagePlaneLoader [\#2356](https://github.com/pypeclub/OpenPype/pull/2356) +- Local settings: Copyable studio paths [\#2349](https://github.com/pypeclub/OpenPype/pull/2349) +- Assets Widget: Clear model on project change [\#2345](https://github.com/pypeclub/OpenPype/pull/2345) +- General: OpenPype default modules hierarchy [\#2338](https://github.com/pypeclub/OpenPype/pull/2338) +- TVPaint: Move implementation to OpenPype [\#2336](https://github.com/pypeclub/OpenPype/pull/2336) +- General: FFprobe error exception contain original error message [\#2328](https://github.com/pypeclub/OpenPype/pull/2328) +- Resolve: Add experimental button to menu [\#2325](https://github.com/pypeclub/OpenPype/pull/2325) +- Hiero: Add experimental tools action [\#2323](https://github.com/pypeclub/OpenPype/pull/2323) +- Input links: Cleanup and unification of differences [\#2322](https://github.com/pypeclub/OpenPype/pull/2322) +- General: Don't validate vendor bin with executing them [\#2317](https://github.com/pypeclub/OpenPype/pull/2317) +- General: Multilayer EXRs support [\#2315](https://github.com/pypeclub/OpenPype/pull/2315) +- General: Run process log stderr as info log level [\#2309](https://github.com/pypeclub/OpenPype/pull/2309) +- General: Reduce vendor imports [\#2305](https://github.com/pypeclub/OpenPype/pull/2305) +- Tools: Cleanup of unused classes [\#2304](https://github.com/pypeclub/OpenPype/pull/2304) +- Project Manager: Added ability to delete project [\#2298](https://github.com/pypeclub/OpenPype/pull/2298) +- Ftrack: Synchronize input links [\#2287](https://github.com/pypeclub/OpenPype/pull/2287) +- StandalonePublisher: Remove unused plugin ExtractHarmonyZip [\#2277](https://github.com/pypeclub/OpenPype/pull/2277) +- Ftrack: Support multiple reviews [\#2271](https://github.com/pypeclub/OpenPype/pull/2271) +- Ftrack: Remove unused clean component plugin [\#2269](https://github.com/pypeclub/OpenPype/pull/2269) +- Royal Render: Support for rr channels in separate dirs [\#2268](https://github.com/pypeclub/OpenPype/pull/2268) +- Houdini: Add experimental tools action [\#2267](https://github.com/pypeclub/OpenPype/pull/2267) +- Nuke: extract baked review videos presets [\#2248](https://github.com/pypeclub/OpenPype/pull/2248) +- TVPaint: Workers rendering [\#2209](https://github.com/pypeclub/OpenPype/pull/2209) +- OpenPypeV3: Add key parent asset to path templating construction [\#2186](https://github.com/pypeclub/OpenPype/pull/2186) + +**🐛 Bug fixes** + +- TVPaint: Create render layer dialog is in front [\#2471](https://github.com/pypeclub/OpenPype/pull/2471) +- Short Pyblish plugin path [\#2428](https://github.com/pypeclub/OpenPype/pull/2428) +- PS: Introduced settings for invalid characters to use in ValidateNaming plugin [\#2417](https://github.com/pypeclub/OpenPype/pull/2417) +- Settings UI: Breadcrumbs path does not create new entities [\#2416](https://github.com/pypeclub/OpenPype/pull/2416) +- AfterEffects: Variant 2022 is in defaults but missing in schemas [\#2412](https://github.com/pypeclub/OpenPype/pull/2412) +- Nuke: baking representations was not additive [\#2406](https://github.com/pypeclub/OpenPype/pull/2406) +- General: Fix access to environments from default settings [\#2403](https://github.com/pypeclub/OpenPype/pull/2403) +- Fix: Placeholder Input color set fix [\#2399](https://github.com/pypeclub/OpenPype/pull/2399) +- Settings: Fix state change of wrapper label [\#2396](https://github.com/pypeclub/OpenPype/pull/2396) +- Flame: fix ftrack publisher [\#2381](https://github.com/pypeclub/OpenPype/pull/2381) +- hiero: solve custom ocio path [\#2379](https://github.com/pypeclub/OpenPype/pull/2379) +- hiero: fix workio and flatten [\#2378](https://github.com/pypeclub/OpenPype/pull/2378) +- Nuke: fixing menu re-drawing during context change [\#2374](https://github.com/pypeclub/OpenPype/pull/2374) +- Webpublisher: Fix assignment of families of TVpaint instances [\#2373](https://github.com/pypeclub/OpenPype/pull/2373) +- Nuke: fixing node name based on switched asset name [\#2369](https://github.com/pypeclub/OpenPype/pull/2369) +- JobQueue: Fix loading of settings [\#2362](https://github.com/pypeclub/OpenPype/pull/2362) +- Tools: Placeholder color [\#2359](https://github.com/pypeclub/OpenPype/pull/2359) +- Launcher: Minimize button on MacOs [\#2355](https://github.com/pypeclub/OpenPype/pull/2355) +- StandalonePublisher: Fix import of constant [\#2354](https://github.com/pypeclub/OpenPype/pull/2354) +- Houdini: Fix HDA creation [\#2350](https://github.com/pypeclub/OpenPype/pull/2350) +- Adobe products show issue [\#2347](https://github.com/pypeclub/OpenPype/pull/2347) +- Maya Look Assigner: Fix Python 3 compatibility [\#2343](https://github.com/pypeclub/OpenPype/pull/2343) +- Remove wrongly used host for hook [\#2342](https://github.com/pypeclub/OpenPype/pull/2342) +- Tools: Use Qt context on tools show [\#2340](https://github.com/pypeclub/OpenPype/pull/2340) +- Flame: Fix default argument value in custom dictionary [\#2339](https://github.com/pypeclub/OpenPype/pull/2339) +- Timers Manager: Disable auto stop timer on linux platform [\#2334](https://github.com/pypeclub/OpenPype/pull/2334) +- nuke: bake preset single input exception [\#2331](https://github.com/pypeclub/OpenPype/pull/2331) +- Hiero: fixing multiple templates at a hierarchy parent [\#2330](https://github.com/pypeclub/OpenPype/pull/2330) +- Fix - provider icons are pulled from a folder [\#2326](https://github.com/pypeclub/OpenPype/pull/2326) +- InputLinks: Typo in "inputLinks" key [\#2314](https://github.com/pypeclub/OpenPype/pull/2314) +- Deadline timeout and logging [\#2312](https://github.com/pypeclub/OpenPype/pull/2312) +- nuke: do not multiply representation on class method [\#2311](https://github.com/pypeclub/OpenPype/pull/2311) +- Workfiles tool: Fix task formatting [\#2306](https://github.com/pypeclub/OpenPype/pull/2306) +- Delivery: Fix delivery paths created on windows [\#2302](https://github.com/pypeclub/OpenPype/pull/2302) +- Maya: Deadline - fix limit groups [\#2295](https://github.com/pypeclub/OpenPype/pull/2295) +- Royal Render: Fix plugin order and OpenPype auto-detection [\#2291](https://github.com/pypeclub/OpenPype/pull/2291) +- New Publisher: Fix mapping of indexes [\#2285](https://github.com/pypeclub/OpenPype/pull/2285) +- Alternate site for site sync doesnt work for sequences [\#2284](https://github.com/pypeclub/OpenPype/pull/2284) +- FFmpeg: Execute ffprobe using list of arguments instead of string command [\#2281](https://github.com/pypeclub/OpenPype/pull/2281) +- Nuke: Anatomy fill data use task as dictionary [\#2278](https://github.com/pypeclub/OpenPype/pull/2278) +- Bug: fix variable name \_asset\_id in workfiles application [\#2274](https://github.com/pypeclub/OpenPype/pull/2274) +- Version handling fixes [\#2272](https://github.com/pypeclub/OpenPype/pull/2272) + +**Merged pull requests:** + +- Maya: Replaced PATH usage with vendored oiio path for maketx utility [\#2405](https://github.com/pypeclub/OpenPype/pull/2405) +- \[Fix\]\[MAYA\] Handle message type attribute within CollectLook [\#2394](https://github.com/pypeclub/OpenPype/pull/2394) +- Add validator to check correct version of extension for PS and AE [\#2387](https://github.com/pypeclub/OpenPype/pull/2387) +- Maya: configurable model top level validation [\#2321](https://github.com/pypeclub/OpenPype/pull/2321) +- Create test publish class for After Effects [\#2270](https://github.com/pypeclub/OpenPype/pull/2270) + +## [3.6.4](https://github.com/pypeclub/OpenPype/tree/3.6.4) (2021-11-23) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.3...3.6.4) + +**🐛 Bug fixes** + +- Nuke: inventory update removes all loaded read nodes [\#2294](https://github.com/pypeclub/OpenPype/pull/2294) + +## [3.6.3](https://github.com/pypeclub/OpenPype/tree/3.6.3) (2021-11-19) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.2...3.6.3) + +**🐛 Bug fixes** + +- Deadline: Fix publish targets [\#2280](https://github.com/pypeclub/OpenPype/pull/2280) + +## [3.6.2](https://github.com/pypeclub/OpenPype/tree/3.6.2) (2021-11-18) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.1...3.6.2) + +**🚀 Enhancements** + +- Tools: Assets widget [\#2265](https://github.com/pypeclub/OpenPype/pull/2265) +- SceneInventory: Choose loader in asset switcher [\#2262](https://github.com/pypeclub/OpenPype/pull/2262) +- Style: New fonts in OpenPype style [\#2256](https://github.com/pypeclub/OpenPype/pull/2256) +- Tools: SceneInventory in OpenPype [\#2255](https://github.com/pypeclub/OpenPype/pull/2255) +- Tools: Tasks widget [\#2251](https://github.com/pypeclub/OpenPype/pull/2251) +- Tools: Creator in OpenPype [\#2244](https://github.com/pypeclub/OpenPype/pull/2244) +- Added endpoint for configured extensions [\#2221](https://github.com/pypeclub/OpenPype/pull/2221) + +**🐛 Bug fixes** + +- Tools: Parenting of tools in Nuke and Hiero [\#2266](https://github.com/pypeclub/OpenPype/pull/2266) +- limiting validator to specific editorial hosts [\#2264](https://github.com/pypeclub/OpenPype/pull/2264) +- Tools: Select Context dialog attribute fix [\#2261](https://github.com/pypeclub/OpenPype/pull/2261) +- Maya: Render publishing fails on linux [\#2260](https://github.com/pypeclub/OpenPype/pull/2260) +- LookAssigner: Fix tool reopen [\#2259](https://github.com/pypeclub/OpenPype/pull/2259) +- Standalone: editorial not publishing thumbnails on all subsets [\#2258](https://github.com/pypeclub/OpenPype/pull/2258) +- Burnins: Support mxf metadata [\#2247](https://github.com/pypeclub/OpenPype/pull/2247) +- Maya: Support for configurable AOV separator characters [\#2197](https://github.com/pypeclub/OpenPype/pull/2197) +- Maya: texture colorspace modes in looks [\#2195](https://github.com/pypeclub/OpenPype/pull/2195) + +## [3.6.1](https://github.com/pypeclub/OpenPype/tree/3.6.1) (2021-11-16) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.6.0...3.6.1) + +**🐛 Bug fixes** + +- Loader doesn't allow changing of version before loading [\#2254](https://github.com/pypeclub/OpenPype/pull/2254) + +## [3.6.0](https://github.com/pypeclub/OpenPype/tree/3.6.0) (2021-11-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.5.0...3.6.0) + +### 📖 Documentation + +- Add alternative sites for Site Sync [\#2206](https://github.com/pypeclub/OpenPype/pull/2206) +- Add command line way of running site sync server [\#2188](https://github.com/pypeclub/OpenPype/pull/2188) + +**🆕 New features** + +- Add validate active site button to sync queue on a project [\#2176](https://github.com/pypeclub/OpenPype/pull/2176) +- Maya : Colorspace configuration [\#2170](https://github.com/pypeclub/OpenPype/pull/2170) +- Blender: Added support for audio [\#2168](https://github.com/pypeclub/OpenPype/pull/2168) +- Flame: a host basic integration [\#2165](https://github.com/pypeclub/OpenPype/pull/2165) +- Houdini: simple HDA workflow [\#2072](https://github.com/pypeclub/OpenPype/pull/2072) +- Basic Royal Render Integration ✨ [\#2061](https://github.com/pypeclub/OpenPype/pull/2061) +- Camera handling between Blender and Unreal [\#1988](https://github.com/pypeclub/OpenPype/pull/1988) +- switch PyQt5 for PySide2 [\#1744](https://github.com/pypeclub/OpenPype/pull/1744) + +**🚀 Enhancements** + +- Tools: Subset manager in OpenPype [\#2243](https://github.com/pypeclub/OpenPype/pull/2243) +- General: Skip module directories without init file [\#2239](https://github.com/pypeclub/OpenPype/pull/2239) +- General: Static interfaces [\#2238](https://github.com/pypeclub/OpenPype/pull/2238) +- Style: Fix transparent image in style [\#2235](https://github.com/pypeclub/OpenPype/pull/2235) +- Add a "following workfile versioning" option on publish [\#2225](https://github.com/pypeclub/OpenPype/pull/2225) +- Modules: Module can add cli commands [\#2224](https://github.com/pypeclub/OpenPype/pull/2224) +- Webpublisher: Separate webpublisher logic [\#2222](https://github.com/pypeclub/OpenPype/pull/2222) +- Add both side availability on Site Sync sites to Loader [\#2220](https://github.com/pypeclub/OpenPype/pull/2220) +- Tools: Center loader and library loader on show [\#2219](https://github.com/pypeclub/OpenPype/pull/2219) +- Maya : Validate shape zero [\#2212](https://github.com/pypeclub/OpenPype/pull/2212) +- Maya : validate unique names [\#2211](https://github.com/pypeclub/OpenPype/pull/2211) +- Tools: OpenPype stylesheet in workfiles tool [\#2208](https://github.com/pypeclub/OpenPype/pull/2208) +- Ftrack: Replace Queue with deque in event handlers logic [\#2204](https://github.com/pypeclub/OpenPype/pull/2204) +- Tools: New select context dialog [\#2200](https://github.com/pypeclub/OpenPype/pull/2200) +- Maya : Validate mesh ngons [\#2199](https://github.com/pypeclub/OpenPype/pull/2199) +- Dirmap in Nuke [\#2198](https://github.com/pypeclub/OpenPype/pull/2198) +- Delivery: Check 'frame' key in template for sequence delivery [\#2196](https://github.com/pypeclub/OpenPype/pull/2196) +- Settings: Site sync project settings improvement [\#2193](https://github.com/pypeclub/OpenPype/pull/2193) +- Usage of tools code [\#2185](https://github.com/pypeclub/OpenPype/pull/2185) +- Settings: Dictionary based on project roots [\#2184](https://github.com/pypeclub/OpenPype/pull/2184) +- Subset name: Be able to pass asset document to get subset name [\#2179](https://github.com/pypeclub/OpenPype/pull/2179) +- Tools: Experimental tools [\#2167](https://github.com/pypeclub/OpenPype/pull/2167) +- Loader: Refactor and use OpenPype stylesheets [\#2166](https://github.com/pypeclub/OpenPype/pull/2166) +- Add loader for linked smart objects in photoshop [\#2149](https://github.com/pypeclub/OpenPype/pull/2149) +- Burnins: DNxHD profiles handling [\#2142](https://github.com/pypeclub/OpenPype/pull/2142) +- Tools: Single access point for host tools [\#2139](https://github.com/pypeclub/OpenPype/pull/2139) + +**🐛 Bug fixes** + +- Ftrack: Sync project ftrack id cache issue [\#2250](https://github.com/pypeclub/OpenPype/pull/2250) +- Ftrack: Session creation and Prepare project [\#2245](https://github.com/pypeclub/OpenPype/pull/2245) +- Added queue for studio processing in PS [\#2237](https://github.com/pypeclub/OpenPype/pull/2237) +- Python 2: Unicode to string conversion [\#2236](https://github.com/pypeclub/OpenPype/pull/2236) +- Fix - enum for color coding in PS [\#2234](https://github.com/pypeclub/OpenPype/pull/2234) +- Pyblish Tool: Fix targets handling [\#2232](https://github.com/pypeclub/OpenPype/pull/2232) +- Ftrack: Base event fix of 'get\_project\_from\_entity' method [\#2214](https://github.com/pypeclub/OpenPype/pull/2214) +- Maya : multiple subsets review broken [\#2210](https://github.com/pypeclub/OpenPype/pull/2210) +- Fix - different command used for Linux and Mac OS [\#2207](https://github.com/pypeclub/OpenPype/pull/2207) +- Tools: Workfiles tool don't use avalon widgets [\#2205](https://github.com/pypeclub/OpenPype/pull/2205) +- Ftrack: Fill missing ftrack id on mongo project [\#2203](https://github.com/pypeclub/OpenPype/pull/2203) +- Project Manager: Fix copying of tasks [\#2191](https://github.com/pypeclub/OpenPype/pull/2191) +- StandalonePublisher: Source validator don't expect representations [\#2190](https://github.com/pypeclub/OpenPype/pull/2190) +- Blender: Fix trying to pack an image when the shader node has no texture [\#2183](https://github.com/pypeclub/OpenPype/pull/2183) +- Maya: review viewport settings [\#2177](https://github.com/pypeclub/OpenPype/pull/2177) +- MacOS: Launching of applications may cause Permissions error [\#2175](https://github.com/pypeclub/OpenPype/pull/2175) +- Maya: Aspect ratio [\#2174](https://github.com/pypeclub/OpenPype/pull/2174) +- Blender: Fix 'Deselect All' with object not in 'Object Mode' [\#2163](https://github.com/pypeclub/OpenPype/pull/2163) +- Tools: Stylesheets are applied after tool show [\#2161](https://github.com/pypeclub/OpenPype/pull/2161) +- Maya: Collect render - fix UNC path support 🐛 [\#2158](https://github.com/pypeclub/OpenPype/pull/2158) +- Maya: Fix hotbox broken by scriptsmenu [\#2151](https://github.com/pypeclub/OpenPype/pull/2151) +- Ftrack: Ignore save warnings exception in Prepare project action [\#2150](https://github.com/pypeclub/OpenPype/pull/2150) +- Loader thumbnails with smooth edges [\#2147](https://github.com/pypeclub/OpenPype/pull/2147) +- Added validator for source files for Standalone Publisher [\#2138](https://github.com/pypeclub/OpenPype/pull/2138) + +**Merged pull requests:** + +- Bump pillow from 8.2.0 to 8.3.2 [\#2162](https://github.com/pypeclub/OpenPype/pull/2162) +- Bump axios from 0.21.1 to 0.21.4 in /website [\#2059](https://github.com/pypeclub/OpenPype/pull/2059) + +## [3.5.0](https://github.com/pypeclub/OpenPype/tree/3.5.0) (2021-10-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.1...3.5.0) + +**Deprecated:** + +- Maya: Change mayaAscii family to mayaScene [\#2106](https://github.com/pypeclub/OpenPype/pull/2106) + +**🆕 New features** + +- Added project and task into context change message in Maya [\#2131](https://github.com/pypeclub/OpenPype/pull/2131) +- Add ExtractBurnin to photoshop review [\#2124](https://github.com/pypeclub/OpenPype/pull/2124) +- PYPE-1218 - changed namespace to contain subset name in Maya [\#2114](https://github.com/pypeclub/OpenPype/pull/2114) +- Added running configurable disk mapping command before start of OP [\#2091](https://github.com/pypeclub/OpenPype/pull/2091) +- SFTP provider [\#2073](https://github.com/pypeclub/OpenPype/pull/2073) +- Maya: Validate setdress top group [\#2068](https://github.com/pypeclub/OpenPype/pull/2068) +- Maya: Enable publishing render attrib sets \(e.g. V-Ray Displacement\) with model [\#1955](https://github.com/pypeclub/OpenPype/pull/1955) + +**🚀 Enhancements** + +- Maya: make rig validators configurable in settings [\#2137](https://github.com/pypeclub/OpenPype/pull/2137) +- Settings: Updated readme for entity types in settings [\#2132](https://github.com/pypeclub/OpenPype/pull/2132) +- Nuke: unified clip loader [\#2128](https://github.com/pypeclub/OpenPype/pull/2128) +- Settings UI: Project model refreshing and sorting [\#2104](https://github.com/pypeclub/OpenPype/pull/2104) +- Create Read From Rendered - Disable Relative paths by default [\#2093](https://github.com/pypeclub/OpenPype/pull/2093) +- Added choosing different dirmap mapping if workfile synched locally [\#2088](https://github.com/pypeclub/OpenPype/pull/2088) +- General: Remove IdleManager module [\#2084](https://github.com/pypeclub/OpenPype/pull/2084) +- Tray UI: Message box about missing settings defaults [\#2080](https://github.com/pypeclub/OpenPype/pull/2080) +- Tray UI: Show menu where first click happened [\#2079](https://github.com/pypeclub/OpenPype/pull/2079) +- Global: add global validators to settings [\#2078](https://github.com/pypeclub/OpenPype/pull/2078) +- Use CRF for burnin when available [\#2070](https://github.com/pypeclub/OpenPype/pull/2070) +- Project manager: Filter first item after selection of project [\#2069](https://github.com/pypeclub/OpenPype/pull/2069) +- Nuke: Adding `still` image family workflow [\#2064](https://github.com/pypeclub/OpenPype/pull/2064) +- Maya: validate authorized loaded plugins [\#2062](https://github.com/pypeclub/OpenPype/pull/2062) +- Tools: add support for pyenv on windows [\#2051](https://github.com/pypeclub/OpenPype/pull/2051) +- SyncServer: Dropbox Provider [\#1979](https://github.com/pypeclub/OpenPype/pull/1979) +- Burnin: Get data from context with defined keys. [\#1897](https://github.com/pypeclub/OpenPype/pull/1897) +- Timers manager: Get task time [\#1896](https://github.com/pypeclub/OpenPype/pull/1896) +- TVPaint: Option to stop timer on application exit. [\#1887](https://github.com/pypeclub/OpenPype/pull/1887) + +**🐛 Bug fixes** + +- Maya: fix model publishing [\#2130](https://github.com/pypeclub/OpenPype/pull/2130) +- Fix - oiiotool wasn't recognized even if present [\#2129](https://github.com/pypeclub/OpenPype/pull/2129) +- General: Disk mapping group [\#2120](https://github.com/pypeclub/OpenPype/pull/2120) +- Hiero: publishing effect first time makes wrong resources path [\#2115](https://github.com/pypeclub/OpenPype/pull/2115) +- Add startup script for Houdini Core. [\#2110](https://github.com/pypeclub/OpenPype/pull/2110) +- TVPaint: Behavior name of loop also accept repeat [\#2109](https://github.com/pypeclub/OpenPype/pull/2109) +- Ftrack: Project settings save custom attributes skip unknown attributes [\#2103](https://github.com/pypeclub/OpenPype/pull/2103) +- Blender: Fix NoneType error when animation\_data is missing for a rig [\#2101](https://github.com/pypeclub/OpenPype/pull/2101) +- Fix broken import in sftp provider [\#2100](https://github.com/pypeclub/OpenPype/pull/2100) +- Global: Fix docstring on publish plugin extract review [\#2097](https://github.com/pypeclub/OpenPype/pull/2097) +- Delivery Action Files Sequence fix [\#2096](https://github.com/pypeclub/OpenPype/pull/2096) +- General: Cloud mongo ca certificate issue [\#2095](https://github.com/pypeclub/OpenPype/pull/2095) +- TVPaint: Creator use context from workfile [\#2087](https://github.com/pypeclub/OpenPype/pull/2087) +- Blender: fix texture missing when publishing blend files [\#2085](https://github.com/pypeclub/OpenPype/pull/2085) +- General: Startup validations oiio tool path fix on linux [\#2083](https://github.com/pypeclub/OpenPype/pull/2083) +- Deadline: Collect deadline server does not check existence of deadline key [\#2082](https://github.com/pypeclub/OpenPype/pull/2082) +- Blender: fixed Curves with modifiers in Rigs [\#2081](https://github.com/pypeclub/OpenPype/pull/2081) +- Nuke UI scaling [\#2077](https://github.com/pypeclub/OpenPype/pull/2077) +- Maya: Fix multi-camera renders [\#2065](https://github.com/pypeclub/OpenPype/pull/2065) +- Fix Sync Queue when project disabled [\#2063](https://github.com/pypeclub/OpenPype/pull/2063) + +**Merged pull requests:** + +- Bump pywin32 from 300 to 301 [\#2086](https://github.com/pypeclub/OpenPype/pull/2086) + +## [3.4.1](https://github.com/pypeclub/OpenPype/tree/3.4.1) (2021-09-23) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.4.0...3.4.1) + +**🆕 New features** + +- Settings: Flag project as deactivated and hide from tools' view [\#2008](https://github.com/pypeclub/OpenPype/pull/2008) + +**🚀 Enhancements** + +- General: Startup validations [\#2054](https://github.com/pypeclub/OpenPype/pull/2054) +- Nuke: proxy mode validator [\#2052](https://github.com/pypeclub/OpenPype/pull/2052) +- Ftrack: Removed ftrack interface [\#2049](https://github.com/pypeclub/OpenPype/pull/2049) +- Settings UI: Deffered set value on entity [\#2044](https://github.com/pypeclub/OpenPype/pull/2044) +- Loader: Families filtering [\#2043](https://github.com/pypeclub/OpenPype/pull/2043) +- Settings UI: Project view enhancements [\#2042](https://github.com/pypeclub/OpenPype/pull/2042) +- Settings for Nuke IncrementScriptVersion [\#2039](https://github.com/pypeclub/OpenPype/pull/2039) +- Loader & Library loader: Use tools from OpenPype [\#2038](https://github.com/pypeclub/OpenPype/pull/2038) +- Adding predefined project folders creation in PM [\#2030](https://github.com/pypeclub/OpenPype/pull/2030) +- WebserverModule: Removed interface of webserver module [\#2028](https://github.com/pypeclub/OpenPype/pull/2028) +- TimersManager: Removed interface of timers manager [\#2024](https://github.com/pypeclub/OpenPype/pull/2024) +- Feature Maya import asset from scene inventory [\#2018](https://github.com/pypeclub/OpenPype/pull/2018) + +**🐛 Bug fixes** + +- Timers manger: Typo fix [\#2058](https://github.com/pypeclub/OpenPype/pull/2058) +- Hiero: Editorial fixes [\#2057](https://github.com/pypeclub/OpenPype/pull/2057) +- Differentiate jpg sequences from thumbnail [\#2056](https://github.com/pypeclub/OpenPype/pull/2056) +- FFmpeg: Split command to list does not work [\#2046](https://github.com/pypeclub/OpenPype/pull/2046) +- Removed shell flag in subprocess call [\#2045](https://github.com/pypeclub/OpenPype/pull/2045) + +**Merged pull requests:** + +- Bump prismjs from 1.24.0 to 1.25.0 in /website [\#2050](https://github.com/pypeclub/OpenPype/pull/2050) + +## [3.4.0](https://github.com/pypeclub/OpenPype/tree/3.4.0) (2021-09-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.1...3.4.0) + +### 📖 Documentation + +- Documentation: Ftrack launch argsuments update [\#2014](https://github.com/pypeclub/OpenPype/pull/2014) +- Nuke Quick Start / Tutorial [\#1952](https://github.com/pypeclub/OpenPype/pull/1952) +- Houdini: add Camera, Point Cache, Composite, Redshift ROP and VDB Cache support [\#1821](https://github.com/pypeclub/OpenPype/pull/1821) + +**🆕 New features** + +- Nuke: Compatibility with Nuke 13 [\#2003](https://github.com/pypeclub/OpenPype/pull/2003) +- Maya: Add Xgen family support [\#1947](https://github.com/pypeclub/OpenPype/pull/1947) +- Feature/webpublisher backend [\#1876](https://github.com/pypeclub/OpenPype/pull/1876) +- Blender: Improved assets handling [\#1615](https://github.com/pypeclub/OpenPype/pull/1615) + +**🚀 Enhancements** + +- Added possibility to configure of synchronization of workfile version… [\#2041](https://github.com/pypeclub/OpenPype/pull/2041) +- General: Task types in profiles [\#2036](https://github.com/pypeclub/OpenPype/pull/2036) +- Console interpreter: Handle invalid sizes on initialization [\#2022](https://github.com/pypeclub/OpenPype/pull/2022) +- Ftrack: Show OpenPype versions in event server status [\#2019](https://github.com/pypeclub/OpenPype/pull/2019) +- General: Staging icon [\#2017](https://github.com/pypeclub/OpenPype/pull/2017) +- Ftrack: Sync to avalon actions have jobs [\#2015](https://github.com/pypeclub/OpenPype/pull/2015) +- Modules: Connect method is not required [\#2009](https://github.com/pypeclub/OpenPype/pull/2009) +- Settings UI: Number with configurable steps [\#2001](https://github.com/pypeclub/OpenPype/pull/2001) +- Moving project folder structure creation out of ftrack module \#1989 [\#1996](https://github.com/pypeclub/OpenPype/pull/1996) +- Configurable items for providers without Settings [\#1987](https://github.com/pypeclub/OpenPype/pull/1987) +- Global: Example addons [\#1986](https://github.com/pypeclub/OpenPype/pull/1986) +- Standalone Publisher: Extract harmony zip handle workfile template [\#1982](https://github.com/pypeclub/OpenPype/pull/1982) +- Settings UI: Number sliders [\#1978](https://github.com/pypeclub/OpenPype/pull/1978) +- Workfiles: Support more workfile templates [\#1966](https://github.com/pypeclub/OpenPype/pull/1966) +- Launcher: Fix crashes on action click [\#1964](https://github.com/pypeclub/OpenPype/pull/1964) +- Settings: Minor fixes in UI and missing default values [\#1963](https://github.com/pypeclub/OpenPype/pull/1963) +- Blender: Toggle system console works on windows [\#1962](https://github.com/pypeclub/OpenPype/pull/1962) +- Global: Settings defined by Addons/Modules [\#1959](https://github.com/pypeclub/OpenPype/pull/1959) +- CI: change release numbering triggers [\#1954](https://github.com/pypeclub/OpenPype/pull/1954) +- Global: Avalon Host name collector [\#1949](https://github.com/pypeclub/OpenPype/pull/1949) +- Global: Define hosts in CollectSceneVersion [\#1948](https://github.com/pypeclub/OpenPype/pull/1948) +- Add face sets to exported alembics [\#1942](https://github.com/pypeclub/OpenPype/pull/1942) +- OpenPype: Add version validation and `--headless` mode and update progress 🔄 [\#1939](https://github.com/pypeclub/OpenPype/pull/1939) +- \#1894 - adds host to template\_name\_profiles for filtering [\#1915](https://github.com/pypeclub/OpenPype/pull/1915) +- Environments: Tool environments in alphabetical order [\#1910](https://github.com/pypeclub/OpenPype/pull/1910) +- Disregard publishing time. [\#1888](https://github.com/pypeclub/OpenPype/pull/1888) +- Dynamic modules [\#1872](https://github.com/pypeclub/OpenPype/pull/1872) + +**🐛 Bug fixes** + +- Workfiles tool: Task selection [\#2040](https://github.com/pypeclub/OpenPype/pull/2040) +- Ftrack: Delete old versions missing settings key [\#2037](https://github.com/pypeclub/OpenPype/pull/2037) +- Nuke: typo on a button [\#2034](https://github.com/pypeclub/OpenPype/pull/2034) +- Hiero: Fix "none" named tags [\#2033](https://github.com/pypeclub/OpenPype/pull/2033) +- FFmpeg: Subprocess arguments as list [\#2032](https://github.com/pypeclub/OpenPype/pull/2032) +- General: Fix Python 2 breaking line [\#2016](https://github.com/pypeclub/OpenPype/pull/2016) +- Bugfix/webpublisher task type [\#2006](https://github.com/pypeclub/OpenPype/pull/2006) +- Nuke thumbnails generated from middle of the sequence [\#1992](https://github.com/pypeclub/OpenPype/pull/1992) +- Nuke: last version from path gets correct version [\#1990](https://github.com/pypeclub/OpenPype/pull/1990) +- nuke, resolve, hiero: precollector order lest then 0.5 [\#1984](https://github.com/pypeclub/OpenPype/pull/1984) +- Last workfile with multiple work templates [\#1981](https://github.com/pypeclub/OpenPype/pull/1981) +- Collectors order [\#1977](https://github.com/pypeclub/OpenPype/pull/1977) +- Stop timer was within validator order range. [\#1975](https://github.com/pypeclub/OpenPype/pull/1975) +- Ftrack: arrow submodule has https url source [\#1974](https://github.com/pypeclub/OpenPype/pull/1974) +- Ftrack: Fix hosts attribute in collect ftrack username [\#1972](https://github.com/pypeclub/OpenPype/pull/1972) +- Deadline: Houdini plugins in different hierarchy [\#1970](https://github.com/pypeclub/OpenPype/pull/1970) +- Removed deprecated submodules [\#1967](https://github.com/pypeclub/OpenPype/pull/1967) +- Global: ExtractJpeg can handle filepaths with spaces [\#1961](https://github.com/pypeclub/OpenPype/pull/1961) +- Resolve path when adding to zip [\#1960](https://github.com/pypeclub/OpenPype/pull/1960) + +**Merged pull requests:** + +- Bump url-parse from 1.5.1 to 1.5.3 in /website [\#1958](https://github.com/pypeclub/OpenPype/pull/1958) +- Bump path-parse from 1.0.6 to 1.0.7 in /website [\#1933](https://github.com/pypeclub/OpenPype/pull/1933) + +## [3.3.1](https://github.com/pypeclub/OpenPype/tree/3.3.1) (2021-08-20) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.3.0...3.3.1) + +**🐛 Bug fixes** + +- TVPaint: Fixed rendered frame indexes [\#1946](https://github.com/pypeclub/OpenPype/pull/1946) +- Maya: Menu actions fix [\#1945](https://github.com/pypeclub/OpenPype/pull/1945) +- standalone: editorial shared object problem [\#1941](https://github.com/pypeclub/OpenPype/pull/1941) +- Bugfix nuke deadline app name [\#1928](https://github.com/pypeclub/OpenPype/pull/1928) + +## [3.3.0](https://github.com/pypeclub/OpenPype/tree/3.3.0) (2021-08-17) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.2.0...3.3.0) + +### 📖 Documentation + +- Standalone Publish of textures family [\#1834](https://github.com/pypeclub/OpenPype/pull/1834) + +**🆕 New features** + +- Settings UI: Breadcrumbs in settings [\#1932](https://github.com/pypeclub/OpenPype/pull/1932) +- Maya: Scene patching 🩹on submission to Deadline [\#1923](https://github.com/pypeclub/OpenPype/pull/1923) +- Feature AE local render [\#1901](https://github.com/pypeclub/OpenPype/pull/1901) + +**🚀 Enhancements** + +- Python console interpreter [\#1940](https://github.com/pypeclub/OpenPype/pull/1940) +- Global: Updated logos and Default settings [\#1927](https://github.com/pypeclub/OpenPype/pull/1927) +- Check for missing ✨ Python when using `pyenv` [\#1925](https://github.com/pypeclub/OpenPype/pull/1925) +- Settings: Default values for enum [\#1920](https://github.com/pypeclub/OpenPype/pull/1920) +- Settings UI: Modifiable dict view enhance [\#1919](https://github.com/pypeclub/OpenPype/pull/1919) +- submodules: avalon-core update [\#1911](https://github.com/pypeclub/OpenPype/pull/1911) +- Ftrack: Where I run action enhancement [\#1900](https://github.com/pypeclub/OpenPype/pull/1900) +- Ftrack: Private project server actions [\#1899](https://github.com/pypeclub/OpenPype/pull/1899) +- Support nested studio plugins paths. [\#1898](https://github.com/pypeclub/OpenPype/pull/1898) +- Settings: global validators with options [\#1892](https://github.com/pypeclub/OpenPype/pull/1892) +- Settings: Conditional dict enum positioning [\#1891](https://github.com/pypeclub/OpenPype/pull/1891) +- Expose stop timer through rest api. [\#1886](https://github.com/pypeclub/OpenPype/pull/1886) +- TVPaint: Increment workfile [\#1885](https://github.com/pypeclub/OpenPype/pull/1885) +- Allow Multiple Notes to run on tasks. [\#1882](https://github.com/pypeclub/OpenPype/pull/1882) +- Prepare for pyside2 [\#1869](https://github.com/pypeclub/OpenPype/pull/1869) +- Filter hosts in settings host-enum [\#1868](https://github.com/pypeclub/OpenPype/pull/1868) +- Local actions with process identifier [\#1867](https://github.com/pypeclub/OpenPype/pull/1867) +- Workfile tool start at host launch support [\#1865](https://github.com/pypeclub/OpenPype/pull/1865) +- Anatomy schema validation [\#1864](https://github.com/pypeclub/OpenPype/pull/1864) +- Ftrack prepare project structure [\#1861](https://github.com/pypeclub/OpenPype/pull/1861) +- Maya: support for configurable `dirmap` 🗺️ [\#1859](https://github.com/pypeclub/OpenPype/pull/1859) +- Independent general environments [\#1853](https://github.com/pypeclub/OpenPype/pull/1853) +- TVPaint Start Frame [\#1844](https://github.com/pypeclub/OpenPype/pull/1844) +- Ftrack push attributes action adds traceback to job [\#1843](https://github.com/pypeclub/OpenPype/pull/1843) +- Prepare project action enhance [\#1838](https://github.com/pypeclub/OpenPype/pull/1838) +- nuke: settings create missing default subsets [\#1829](https://github.com/pypeclub/OpenPype/pull/1829) +- Update poetry lock [\#1823](https://github.com/pypeclub/OpenPype/pull/1823) +- Settings: settings for plugins [\#1819](https://github.com/pypeclub/OpenPype/pull/1819) +- Settings list can use template or schema as object type [\#1815](https://github.com/pypeclub/OpenPype/pull/1815) +- Maya: Deadline custom settings [\#1797](https://github.com/pypeclub/OpenPype/pull/1797) +- Maya: Shader name validation [\#1762](https://github.com/pypeclub/OpenPype/pull/1762) + +**🐛 Bug fixes** + +- Fix - ftrack family was added incorrectly in some cases [\#1935](https://github.com/pypeclub/OpenPype/pull/1935) +- Fix - Deadline publish on Linux started Tray instead of headless publishing [\#1930](https://github.com/pypeclub/OpenPype/pull/1930) +- Maya: Validate Model Name - repair accident deletion in settings defaults [\#1929](https://github.com/pypeclub/OpenPype/pull/1929) +- Nuke: submit to farm failed due `ftrack` family remove [\#1926](https://github.com/pypeclub/OpenPype/pull/1926) +- Fix - validate takes repre\["files"\] as list all the time [\#1922](https://github.com/pypeclub/OpenPype/pull/1922) +- standalone: validator asset parents [\#1917](https://github.com/pypeclub/OpenPype/pull/1917) +- Nuke: update video file crassing [\#1916](https://github.com/pypeclub/OpenPype/pull/1916) +- Fix - texture validators for workfiles triggers only for textures workfiles [\#1914](https://github.com/pypeclub/OpenPype/pull/1914) +- Settings UI: List order works as expected [\#1906](https://github.com/pypeclub/OpenPype/pull/1906) +- Hiero: loaded clip was not set colorspace from version data [\#1904](https://github.com/pypeclub/OpenPype/pull/1904) +- Pyblish UI: Fix collecting stage processing [\#1903](https://github.com/pypeclub/OpenPype/pull/1903) +- Burnins: Use input's bitrate in h624 [\#1902](https://github.com/pypeclub/OpenPype/pull/1902) +- Bug: fixed python detection [\#1893](https://github.com/pypeclub/OpenPype/pull/1893) +- global: integrate name missing default template [\#1890](https://github.com/pypeclub/OpenPype/pull/1890) +- publisher: editorial plugins fixes [\#1889](https://github.com/pypeclub/OpenPype/pull/1889) +- Normalize path returned from Workfiles. [\#1880](https://github.com/pypeclub/OpenPype/pull/1880) +- Workfiles tool event arguments fix [\#1862](https://github.com/pypeclub/OpenPype/pull/1862) +- imageio: fix grouping [\#1856](https://github.com/pypeclub/OpenPype/pull/1856) +- Maya: don't add reference members as connections to the container set 📦 [\#1855](https://github.com/pypeclub/OpenPype/pull/1855) +- publisher: missing version in subset prop [\#1849](https://github.com/pypeclub/OpenPype/pull/1849) +- Ftrack type error fix in sync to avalon event handler [\#1845](https://github.com/pypeclub/OpenPype/pull/1845) +- Nuke: updating effects subset fail [\#1841](https://github.com/pypeclub/OpenPype/pull/1841) +- nuke: write render node skipped with crop [\#1836](https://github.com/pypeclub/OpenPype/pull/1836) +- Project folder structure overrides [\#1813](https://github.com/pypeclub/OpenPype/pull/1813) +- Maya: fix yeti settings path in extractor [\#1809](https://github.com/pypeclub/OpenPype/pull/1809) +- Failsafe for cross project containers. [\#1806](https://github.com/pypeclub/OpenPype/pull/1806) +- Houdini colector formatting keys fix [\#1802](https://github.com/pypeclub/OpenPype/pull/1802) +- Settings error dialog on show [\#1798](https://github.com/pypeclub/OpenPype/pull/1798) +- Application launch stdout/stderr in GUI build [\#1684](https://github.com/pypeclub/OpenPype/pull/1684) +- Nuke: re-use instance nodes output path [\#1577](https://github.com/pypeclub/OpenPype/pull/1577) + +**Merged pull requests:** + +- Fix - make AE workfile publish to Ftrack configurable [\#1937](https://github.com/pypeclub/OpenPype/pull/1937) +- Add support for multiple Deadline ☠️➖ servers [\#1905](https://github.com/pypeclub/OpenPype/pull/1905) +- Maya: add support for `RedshiftNormalMap` node, fix `tx` linear space 🚀 [\#1863](https://github.com/pypeclub/OpenPype/pull/1863) +- Maya: expected files -\> render products ⚙️ overhaul [\#1812](https://github.com/pypeclub/OpenPype/pull/1812) +- PS, AE - send actual context when another webserver is running [\#1811](https://github.com/pypeclub/OpenPype/pull/1811) + +## [3.2.0](https://github.com/pypeclub/OpenPype/tree/3.2.0) (2021-07-13) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.4...3.2.0) + +### 📖 Documentation + +- Fix: staging and `--use-version` option [\#1786](https://github.com/pypeclub/OpenPype/pull/1786) +- Subset template and TVPaint subset template docs [\#1717](https://github.com/pypeclub/OpenPype/pull/1717) +- Overscan color extract review [\#1701](https://github.com/pypeclub/OpenPype/pull/1701) + +**🚀 Enhancements** + +- Nuke: ftrack family plugin settings preset [\#1805](https://github.com/pypeclub/OpenPype/pull/1805) +- Standalone publisher last project [\#1799](https://github.com/pypeclub/OpenPype/pull/1799) +- Ftrack Multiple notes as server action [\#1795](https://github.com/pypeclub/OpenPype/pull/1795) +- Settings conditional dict [\#1777](https://github.com/pypeclub/OpenPype/pull/1777) +- Settings application use python 2 only where needed [\#1776](https://github.com/pypeclub/OpenPype/pull/1776) +- Settings UI copy/paste [\#1769](https://github.com/pypeclub/OpenPype/pull/1769) +- Workfile tool widths [\#1766](https://github.com/pypeclub/OpenPype/pull/1766) +- Push hierarchical attributes care about task parent changes [\#1763](https://github.com/pypeclub/OpenPype/pull/1763) +- Application executables with environment variables [\#1757](https://github.com/pypeclub/OpenPype/pull/1757) +- Deadline: Nuke submission additional attributes [\#1756](https://github.com/pypeclub/OpenPype/pull/1756) +- Settings schema without prefill [\#1753](https://github.com/pypeclub/OpenPype/pull/1753) +- Settings Hosts enum [\#1739](https://github.com/pypeclub/OpenPype/pull/1739) +- Validate containers settings [\#1736](https://github.com/pypeclub/OpenPype/pull/1736) +- PS - added loader from sequence [\#1726](https://github.com/pypeclub/OpenPype/pull/1726) +- Autoupdate launcher [\#1725](https://github.com/pypeclub/OpenPype/pull/1725) +- Toggle Ftrack upload in StandalonePublisher [\#1708](https://github.com/pypeclub/OpenPype/pull/1708) +- Nuke: Prerender Frame Range by default [\#1699](https://github.com/pypeclub/OpenPype/pull/1699) +- Smoother edges of color triangle [\#1695](https://github.com/pypeclub/OpenPype/pull/1695) + +**🐛 Bug fixes** + +- nuke: fixing wrong name of family folder when `used existing frames` [\#1803](https://github.com/pypeclub/OpenPype/pull/1803) +- Collect ftrack family bugs [\#1801](https://github.com/pypeclub/OpenPype/pull/1801) +- Invitee email can be None which break the Ftrack commit. [\#1788](https://github.com/pypeclub/OpenPype/pull/1788) +- Otio unrelated error on import [\#1782](https://github.com/pypeclub/OpenPype/pull/1782) +- FFprobe streams order [\#1775](https://github.com/pypeclub/OpenPype/pull/1775) +- Fix - single file files are str only, cast it to list to count properly [\#1772](https://github.com/pypeclub/OpenPype/pull/1772) +- Environments in app executable for MacOS [\#1768](https://github.com/pypeclub/OpenPype/pull/1768) +- Project specific environments [\#1767](https://github.com/pypeclub/OpenPype/pull/1767) +- Settings UI with refresh button [\#1764](https://github.com/pypeclub/OpenPype/pull/1764) +- Standalone publisher thumbnail extractor fix [\#1761](https://github.com/pypeclub/OpenPype/pull/1761) +- Anatomy others templates don't cause crash [\#1758](https://github.com/pypeclub/OpenPype/pull/1758) +- Backend acre module commit update [\#1745](https://github.com/pypeclub/OpenPype/pull/1745) +- hiero: precollect instances failing when audio selected [\#1743](https://github.com/pypeclub/OpenPype/pull/1743) +- Hiero: creator instance error [\#1742](https://github.com/pypeclub/OpenPype/pull/1742) +- Nuke: fixing render creator for no selection format failing [\#1741](https://github.com/pypeclub/OpenPype/pull/1741) +- StandalonePublisher: failing collector for editorial [\#1738](https://github.com/pypeclub/OpenPype/pull/1738) +- Local settings UI crash on missing defaults [\#1737](https://github.com/pypeclub/OpenPype/pull/1737) +- TVPaint white background on thumbnail [\#1735](https://github.com/pypeclub/OpenPype/pull/1735) +- Ftrack missing custom attribute message [\#1734](https://github.com/pypeclub/OpenPype/pull/1734) +- Launcher project changes [\#1733](https://github.com/pypeclub/OpenPype/pull/1733) +- Ftrack sync status [\#1732](https://github.com/pypeclub/OpenPype/pull/1732) +- TVPaint use layer name for default variant [\#1724](https://github.com/pypeclub/OpenPype/pull/1724) +- Default subset template for TVPaint review and workfile families [\#1716](https://github.com/pypeclub/OpenPype/pull/1716) +- Maya: Extract review hotfix [\#1714](https://github.com/pypeclub/OpenPype/pull/1714) +- Settings: Imageio improving granularity [\#1711](https://github.com/pypeclub/OpenPype/pull/1711) +- Application without executables [\#1679](https://github.com/pypeclub/OpenPype/pull/1679) +- Unreal: launching on Linux [\#1672](https://github.com/pypeclub/OpenPype/pull/1672) + +**Merged pull requests:** + +- Bump prismjs from 1.23.0 to 1.24.0 in /website [\#1773](https://github.com/pypeclub/OpenPype/pull/1773) +- TVPaint ftrack family [\#1755](https://github.com/pypeclub/OpenPype/pull/1755) + +## [2.18.4](https://github.com/pypeclub/OpenPype/tree/2.18.4) (2021-06-24) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.3...2.18.4) + +## [2.18.3](https://github.com/pypeclub/OpenPype/tree/2.18.3) (2021-06-23) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/2.18.2...2.18.3) + +## [2.18.2](https://github.com/pypeclub/OpenPype/tree/2.18.2) (2021-06-16) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.1.0...2.18.2) + +## [3.1.0](https://github.com/pypeclub/OpenPype/tree/3.1.0) (2021-06-15) + +[Full Changelog](https://github.com/pypeclub/OpenPype/compare/3.0.0...3.1.0) + +### 📖 Documentation + +- Feature Slack integration [\#1657](https://github.com/pypeclub/OpenPype/pull/1657) + +**🚀 Enhancements** + +- Log Viewer with OpenPype style [\#1703](https://github.com/pypeclub/OpenPype/pull/1703) +- Scrolling in OpenPype info widget [\#1702](https://github.com/pypeclub/OpenPype/pull/1702) +- OpenPype style in modules [\#1694](https://github.com/pypeclub/OpenPype/pull/1694) +- Sort applications and tools alphabetically in Settings UI [\#1689](https://github.com/pypeclub/OpenPype/pull/1689) +- \#683 - Validate Frame Range in Standalone Publisher [\#1683](https://github.com/pypeclub/OpenPype/pull/1683) +- Hiero: old container versions identify with red color [\#1682](https://github.com/pypeclub/OpenPype/pull/1682) +- Project Manger: Default name column width [\#1669](https://github.com/pypeclub/OpenPype/pull/1669) +- Remove outline in stylesheet [\#1667](https://github.com/pypeclub/OpenPype/pull/1667) +- TVPaint: Creator take layer name as default value for subset variant [\#1663](https://github.com/pypeclub/OpenPype/pull/1663) +- TVPaint custom subset template [\#1662](https://github.com/pypeclub/OpenPype/pull/1662) +- Editorial: conform assets validator [\#1659](https://github.com/pypeclub/OpenPype/pull/1659) +- Nuke - Publish simplification [\#1653](https://github.com/pypeclub/OpenPype/pull/1653) +- \#1333 - added tooltip hints to Pyblish buttons [\#1649](https://github.com/pypeclub/OpenPype/pull/1649) + +**🐛 Bug fixes** + +- Nuke: broken publishing rendered frames [\#1707](https://github.com/pypeclub/OpenPype/pull/1707) +- Standalone publisher Thumbnail export args [\#1705](https://github.com/pypeclub/OpenPype/pull/1705) +- Bad zip can break OpenPype start [\#1691](https://github.com/pypeclub/OpenPype/pull/1691) +- Hiero: published whole edit mov [\#1687](https://github.com/pypeclub/OpenPype/pull/1687) +- Ftrack subprocess handle of stdout/stderr [\#1675](https://github.com/pypeclub/OpenPype/pull/1675) +- Settings list race condifiton and mutable dict list conversion [\#1671](https://github.com/pypeclub/OpenPype/pull/1671) +- Mac launch arguments fix [\#1660](https://github.com/pypeclub/OpenPype/pull/1660) +- Fix missing dbm python module [\#1652](https://github.com/pypeclub/OpenPype/pull/1652) +- Transparent branches in view on Mac [\#1648](https://github.com/pypeclub/OpenPype/pull/1648) +- Add asset on task item [\#1646](https://github.com/pypeclub/OpenPype/pull/1646) +- Project manager save and queue [\#1645](https://github.com/pypeclub/OpenPype/pull/1645) +- New project anatomy values [\#1644](https://github.com/pypeclub/OpenPype/pull/1644) +- Farm publishing: check if published items do exist [\#1573](https://github.com/pypeclub/OpenPype/pull/1573) + +**Merged pull requests:** + +- Bump normalize-url from 4.5.0 to 4.5.1 in /website [\#1686](https://github.com/pypeclub/OpenPype/pull/1686) + ## [3.0.0](https://github.com/pypeclub/openpype/tree/3.0.0) @@ -11,12 +2253,12 @@ - Easy to add Application versions. - Per Project Environment and plugin management. - Robust profile system for creating reviewables and burnins, with filtering based on Application, Task and data family. -- Configurable publish plugins. +- Configurable publish plugins. - Options to make any validator or extractor, optional or disabled. - Color Management is now unified under anatomy settings. - Subset naming and grouping is fully configurable. - All project attributes can now be set directly in OpenPype settings. -- Studio Setting can be locked to prevent unwanted artist changes. +- Studio Setting can be locked to prevent unwanted artist changes. - You can now add per project and per task type templates for workfile initialization in most hosts. - Too many other individual configurable option to list in this changelog :) @@ -774,8 +3016,6 @@ - Standalone Publisher: getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729) -# Changelog - ## [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) (2020-11-15) [Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6) @@ -1565,10 +3805,4 @@ A large cleanup release. Most of the change are under the hood. - _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* - - -\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* - - \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* diff --git a/README.md b/README.md index 0e450fc48d..514ffb62c0 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,11 @@ + +[![All Contributors](https://img.shields.io/badge/all_contributors-27-orange.svg?style=flat-square)](#contributors-) + OpenPype ==== -[![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2021-lightgrey?labelColor=303846) - +[![documentation](https://github.com/pypeclub/pype/actions/workflows/documentation.yml/badge.svg)](https://github.com/pypeclub/pype/actions/workflows/documentation.yml) ![GitHub VFX Platform](https://img.shields.io/badge/vfx%20platform-2022-lightgrey?labelColor=303846) Introduction @@ -28,7 +30,7 @@ The main things you will need to run and build OpenPype are: - **Terminal** in your OS - PowerShell 5.0+ (Windows) - Bash (Linux) -- [**Python 3.7.8**](#python) or higher +- [**Python 3.9.6**](#python) or higher - [**MongoDB**](#database) (needed only for local development) @@ -38,7 +40,7 @@ It can be built and ran on all common platforms. We develop and test on the foll - **Linux** - **Ubuntu** 20.04 LTS - **Centos** 7 -- **Mac OSX** +- **Mac OSX** - **10.15** Catalina - **11.1** Big Sur (using Rosetta2) @@ -47,13 +49,14 @@ For more details on requirements visit [requirements documentation](https://open Building OpenPype ------------- -To build OpenPype you currently need [Python 3.7](https://www.python.org/downloads/) as we are following +To build OpenPype you currently need [Python 3.9](https://www.python.org/downloads/) as we are following [vfx platform](https://vfxplatform.com). Because of some Linux distros comes with newer Python version -already, you need to install **3.7** version and make use of it. You can use perhaps [pyenv](https://github.com/pyenv/pyenv) for this on Linux. +already, you need to install **3.9** version and make use of it. You can use perhaps [pyenv](https://github.com/pyenv/pyenv) for this on Linux. +**Note**: We do not support 3.9.0 because of [this bug](https://github.com/python/cpython/pull/22670). Please, use higher versions of 3.9.x. ### Windows -You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). +You will need [Python >= 3.9.1](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). More tools might be needed for installing dependencies (for example for **OpenTimelineIO**) - mostly development tools like [CMake](https://cmake.org/) and [Visual Studio](https://visualstudio.microsoft.com/cs/downloads/) @@ -79,7 +82,7 @@ OpenPype is build using [CX_Freeze](https://cx-freeze.readthedocs.io/en/latest) ### macOS -You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll need also other tools to build +You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll need also other tools to build some OpenPype dependencies like [CMake](https://cmake.org/) and **XCode Command Line Tools** (or some other build system). Easy way of installing everything necessary is to use [Homebrew](https://brew.sh): @@ -103,19 +106,19 @@ exec "$SHELL" PATH=$(pyenv root)/shims:$PATH ``` -4) Pull in required Python version 3.7.x +4) Pull in required Python version 3.9.x ```sh # install Python build dependences brew install openssl readline sqlite3 xz zlib -# replace with up-to-date 3.7.x version -pyenv install 3.7.9 +# replace with up-to-date 3.9.x version +pyenv install 3.9.6 ``` 5) Set local Python version ```sh # switch to OpenPype source directory -pyenv local 3.7.9 +pyenv local 3.9.6 ``` #### To build OpenPype: @@ -142,7 +145,7 @@ sudo ./tools/docker_build.sh centos7 If all is successful, you'll find built OpenPype in `./build/` folder. #### Manual build -You will need [Python 3.7](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled. +You will need [Python >= 3.9](https://www.python.org/downloads/) and [git](https://git-scm.com/downloads). You'll also need [curl](https://curl.se) on systems that doesn't have one preinstalled. To build Python related stuff, you need Python header files installed (`python3-dev` on Ubuntu for example). @@ -219,14 +222,14 @@ eval "$(pyenv virtualenv-init -)" # reload shell exec $SHELL -# install Python 3.7.9 -pyenv install -v 3.7.9 +# install Python 3.9.x +pyenv install -v 3.9.6 # change path to OpenPype 3 cd /path/to/openpype-3 # set local python version -pyenv local 3.7.9 +pyenv local 3.9.6 ``` @@ -283,3 +286,63 @@ Running tests To run tests, execute `.\tools\run_tests(.ps1|.sh)`. **Note that it needs existing virtual environment.** + + +Developer tools +------------- + +In case you wish to add your own tools to `.\tools` folder without git tracking, it is possible by adding it with `dev_*` suffix (example: `dev_clear_pyc(.ps1|.sh)`). + + + +## Contributors ✨ + +Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Milan Kolar

💻 📖 🚇 💼 🖋 🔍 🚧 📆 👀 🧑‍🏫 💬

Jakub Ježek

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Ondřej Samohel

💻 📖 🚇 🖋 👀 🚧 🧑‍🏫 📆 💬

Jakub Trllo

💻 📖 🚇 👀 🚧 💬

Petr Kalis

💻 📖 🚇 👀 🚧 💬

64qam

💻 👀 📖 🚇 📆 🚧 🖋 📓

Roy Nieterau

💻 📖 👀 🧑‍🏫 💬

Toke Jepsen

💻 📖 👀 🧑‍🏫 💬

Jiri Sindelar

💻 👀 📖 🖋 📓

Simone Barbieri

💻 📖

karimmozilla

💻

Allan I. A.

💻

murphy

💻 👀 📓 📖 📆

Wijnand Koreman

💻

Bo Zhou

💻

Clément Hector

💻 👀

David Lai

💻 👀

Derek

💻 📖

Gábor Marinov

💻 📖

icyvapor

💻 📖

Jérôme LORRAIN

💻

David Morris-Oliveros

💻

BenoitConnan

💻

Malthaldar

💻

Sven Neve

💻

zafrs

💻

Félix David

💻 📖
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! diff --git a/common/openpype_common/distribution/README.md b/common/openpype_common/distribution/README.md new file mode 100644 index 0000000000..212eb267b8 --- /dev/null +++ b/common/openpype_common/distribution/README.md @@ -0,0 +1,18 @@ +Addon distribution tool +------------------------ + +Code in this folder is backend portion of Addon distribution logic for v4 server. + +Each host, module will be separate Addon in the future. Each v4 server could run different set of Addons. + +Client (running on artist machine) will in the first step ask v4 for list of enabled addons. +(It expects list of json documents matching to `addon_distribution.py:AddonInfo` object.) +Next it will compare presence of enabled addon version in local folder. In the case of missing version of +an addon, client will use information in the addon to download (from http/shared local disk/git) zip file +and unzip it. + +Required part of addon distribution will be sharing of dependencies (python libraries, utilities) which is not part of this folder. + +Location of this folder might change in the future as it will be required for a clint to add this folder to sys.path reliably. + +This code needs to be independent on Openpype code as much as possible! \ No newline at end of file diff --git a/openpype/hosts/testhost/__init__.py b/common/openpype_common/distribution/__init__.py similarity index 100% rename from openpype/hosts/testhost/__init__.py rename to common/openpype_common/distribution/__init__.py diff --git a/common/openpype_common/distribution/addon_distribution.py b/common/openpype_common/distribution/addon_distribution.py new file mode 100644 index 0000000000..5e48639dec --- /dev/null +++ b/common/openpype_common/distribution/addon_distribution.py @@ -0,0 +1,208 @@ +import os +from enum import Enum +from abc import abstractmethod +import attr +import logging +import requests +import platform +import shutil + +from .file_handler import RemoteFileHandler +from .addon_info import AddonInfo + + +class UpdateState(Enum): + EXISTS = "exists" + UPDATED = "updated" + FAILED = "failed" + + +class AddonDownloader: + log = logging.getLogger(__name__) + + def __init__(self): + self._downloaders = {} + + def register_format(self, downloader_type, downloader): + self._downloaders[downloader_type.value] = downloader + + def get_downloader(self, downloader_type): + downloader = self._downloaders.get(downloader_type) + if not downloader: + raise ValueError(f"{downloader_type} not implemented") + return downloader() + + @classmethod + @abstractmethod + def download(cls, source, destination): + """Returns url to downloaded addon zip file. + + Args: + source (dict): {type:"http", "url":"https://} ...} + destination (str): local folder to unzip + Returns: + (str) local path to addon zip file + """ + pass + + @classmethod + def check_hash(cls, addon_path, addon_hash): + """Compares 'hash' of downloaded 'addon_url' file. + + Args: + addon_path (str): local path to addon zip file + addon_hash (str): sha256 hash of zip file + Raises: + ValueError if hashes doesn't match + """ + if not os.path.exists(addon_path): + raise ValueError(f"{addon_path} doesn't exist.") + if not RemoteFileHandler.check_integrity(addon_path, + addon_hash, + hash_type="sha256"): + raise ValueError(f"{addon_path} doesn't match expected hash.") + + @classmethod + def unzip(cls, addon_zip_path, destination): + """Unzips local 'addon_zip_path' to 'destination'. + + Args: + addon_zip_path (str): local path to addon zip file + destination (str): local folder to unzip + """ + RemoteFileHandler.unzip(addon_zip_path, destination) + os.remove(addon_zip_path) + + @classmethod + def remove(cls, addon_url): + pass + + +class OSAddonDownloader(AddonDownloader): + + @classmethod + def download(cls, source, destination): + # OS doesnt need to download, unzip directly + addon_url = source["path"].get(platform.system().lower()) + if not os.path.exists(addon_url): + raise ValueError("{} is not accessible".format(addon_url)) + return addon_url + + +class HTTPAddonDownloader(AddonDownloader): + CHUNK_SIZE = 100000 + + @classmethod + def download(cls, source, destination): + source_url = source["url"] + cls.log.debug(f"Downloading {source_url} to {destination}") + file_name = os.path.basename(destination) + _, ext = os.path.splitext(file_name) + if (ext.replace(".", '') not + in set(RemoteFileHandler.IMPLEMENTED_ZIP_FORMATS)): + file_name += ".zip" + RemoteFileHandler.download_url(source_url, + destination, + filename=file_name) + + return os.path.join(destination, file_name) + + +def get_addons_info(server_endpoint): + """Returns list of addon information from Server""" + # TODO temp + # addon_info = AddonInfo( + # **{"name": "openpype_slack", + # "version": "1.0.0", + # "addon_url": "c:/projects/openpype_slack_1.0.0.zip", + # "type": UrlType.FILESYSTEM, + # "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658"}) # noqa + # + # http_addon = AddonInfo( + # **{"name": "openpype_slack", + # "version": "1.0.0", + # "addon_url": "https://drive.google.com/file/d/1TcuV8c2OV8CcbPeWi7lxOdqWsEqQNPYy/view?usp=sharing", # noqa + # "type": UrlType.HTTP, + # "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658"}) # noqa + + response = requests.get(server_endpoint) + if not response.ok: + raise Exception(response.text) + + addons_info = [] + for addon in response.json(): + addons_info.append(AddonInfo(**addon)) + return addons_info + + +def update_addon_state(addon_infos, destination_folder, factory, + log=None): + """Loops through all 'addon_infos', compares local version, unzips. + + Loops through server provided list of dictionaries with information about + available addons. Looks if each addon is already present and deployed. + If isn't, addon zip gets downloaded and unzipped into 'destination_folder'. + Args: + addon_infos (list of AddonInfo) + destination_folder (str): local path + factory (AddonDownloader): factory to get appropriate downloader per + addon type + log (logging.Logger) + Returns: + (dict): {"addon_full_name": UpdateState.value + (eg. "exists"|"updated"|"failed") + """ + if not log: + log = logging.getLogger(__name__) + + download_states = {} + for addon in addon_infos: + full_name = "{}_{}".format(addon.name, addon.version) + addon_dest = os.path.join(destination_folder, full_name) + + if os.path.isdir(addon_dest): + log.debug(f"Addon version folder {addon_dest} already exists.") + download_states[full_name] = UpdateState.EXISTS.value + continue + + for source in addon.sources: + download_states[full_name] = UpdateState.FAILED.value + try: + downloader = factory.get_downloader(source.type) + zip_file_path = downloader.download(attr.asdict(source), + addon_dest) + downloader.check_hash(zip_file_path, addon.hash) + downloader.unzip(zip_file_path, addon_dest) + download_states[full_name] = UpdateState.UPDATED.value + break + except Exception: + log.warning(f"Error happened during updating {addon.name}", + exc_info=True) + if os.path.isdir(addon_dest): + log.debug(f"Cleaning {addon_dest}") + shutil.rmtree(addon_dest) + + return download_states + + +def check_addons(server_endpoint, addon_folder, downloaders): + """Main entry point to compare existing addons with those on server. + + Args: + server_endpoint (str): url to v4 server endpoint + addon_folder (str): local dir path for addons + downloaders (AddonDownloader): factory of downloaders + + Raises: + (RuntimeError) if any addon failed update + """ + addons_info = get_addons_info(server_endpoint) + result = update_addon_state(addons_info, + addon_folder, + downloaders) + if UpdateState.FAILED.value in result.values(): + raise RuntimeError(f"Unable to update some addons {result}") + + +def cli(*args): + raise NotImplementedError diff --git a/common/openpype_common/distribution/addon_info.py b/common/openpype_common/distribution/addon_info.py new file mode 100644 index 0000000000..00ece11f3b --- /dev/null +++ b/common/openpype_common/distribution/addon_info.py @@ -0,0 +1,80 @@ +import attr +from enum import Enum + + +class UrlType(Enum): + HTTP = "http" + GIT = "git" + FILESYSTEM = "filesystem" + + +@attr.s +class MultiPlatformPath(object): + windows = attr.ib(default=None) + linux = attr.ib(default=None) + darwin = attr.ib(default=None) + + +@attr.s +class AddonSource(object): + type = attr.ib() + + +@attr.s +class LocalAddonSource(AddonSource): + path = attr.ib(default=attr.Factory(MultiPlatformPath)) + + +@attr.s +class WebAddonSource(AddonSource): + url = attr.ib(default=None) + + +@attr.s +class VersionData(object): + version_data = attr.ib(default=None) + + +@attr.s +class AddonInfo(object): + """Object matching json payload from Server""" + name = attr.ib() + version = attr.ib() + title = attr.ib(default=None) + sources = attr.ib(default=attr.Factory(dict)) + hash = attr.ib(default=None) + description = attr.ib(default=None) + license = attr.ib(default=None) + authors = attr.ib(default=None) + + @classmethod + def from_dict(cls, data): + sources = [] + + production_version = data.get("productionVersion") + if not production_version: + return + + # server payload contains info about all versions + # active addon must have 'productionVersion' and matching version info + version_data = data.get("versions", {})[production_version] + + for source in version_data.get("clientSourceInfo", []): + if source.get("type") == UrlType.FILESYSTEM.value: + source_addon = LocalAddonSource(type=source["type"], + path=source["path"]) + if source.get("type") == UrlType.HTTP.value: + source_addon = WebAddonSource(type=source["type"], + url=source["url"]) + + sources.append(source_addon) + + return cls(name=data.get("name"), + version=production_version, + sources=sources, + hash=data.get("hash"), + description=data.get("description"), + title=data.get("title"), + license=data.get("license"), + authors=data.get("authors")) + diff --git a/tests/lib/file_handler.py b/common/openpype_common/distribution/file_handler.py similarity index 82% rename from tests/lib/file_handler.py rename to common/openpype_common/distribution/file_handler.py index ee3abc6ecb..e649f143e9 100644 --- a/tests/lib/file_handler.py +++ b/common/openpype_common/distribution/file_handler.py @@ -1,4 +1,3 @@ -import enlighten import os import re import urllib @@ -21,7 +20,7 @@ class RemoteFileHandler: 'tar.gz', 'tar.xz', 'tar.bz2'] @staticmethod - def calculate_md5(fpath, chunk_size): + def calculate_md5(fpath, chunk_size=10000): md5 = hashlib.md5() with open(fpath, 'rb') as f: for chunk in iter(lambda: f.read(chunk_size), b''): @@ -33,17 +32,45 @@ class RemoteFileHandler: return md5 == RemoteFileHandler.calculate_md5(fpath, **kwargs) @staticmethod - def check_integrity(fpath, md5=None): + def calculate_sha256(fpath): + """Calculate sha256 for content of the file. + + Args: + fpath (str): Path to file. + + Returns: + str: hex encoded sha256 + + """ + h = hashlib.sha256() + b = bytearray(128 * 1024) + mv = memoryview(b) + with open(fpath, 'rb', buffering=0) as f: + for n in iter(lambda: f.readinto(mv), 0): + h.update(mv[:n]) + return h.hexdigest() + + @staticmethod + def check_sha256(fpath, sha256, **kwargs): + return sha256 == RemoteFileHandler.calculate_sha256(fpath, **kwargs) + + @staticmethod + def check_integrity(fpath, hash_value=None, hash_type=None): if not os.path.isfile(fpath): return False - if md5 is None: + if hash_value is None: return True - return RemoteFileHandler.check_md5(fpath, md5) + if not hash_type: + raise ValueError("Provide hash type, md5 or sha256") + if hash_type == 'md5': + return RemoteFileHandler.check_md5(fpath, hash_value) + if hash_type == "sha256": + return RemoteFileHandler.check_sha256(fpath, hash_value) @staticmethod def download_url( url, root, filename=None, - md5=None, max_redirect_hops=3 + sha256=None, max_redirect_hops=3 ): """Download a file from a url and place it in root. Args: @@ -51,7 +78,7 @@ class RemoteFileHandler: root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL - md5 (str, optional): MD5 checksum of the download. + sha256 (str, optional): sha256 checksum of the download. If None, do not check max_redirect_hops (int, optional): Maximum number of redirect hops allowed @@ -64,7 +91,8 @@ class RemoteFileHandler: os.makedirs(root, exist_ok=True) # check if file is already present locally - if RemoteFileHandler.check_integrity(fpath, md5): + if RemoteFileHandler.check_integrity(fpath, + sha256, hash_type="sha256"): print('Using downloaded and verified file: ' + fpath) return @@ -76,7 +104,7 @@ class RemoteFileHandler: file_id = RemoteFileHandler._get_google_drive_file_id(url) if file_id is not None: return RemoteFileHandler.download_file_from_google_drive( - file_id, root, filename, md5) + file_id, root, filename, sha256) # download the file try: @@ -92,20 +120,21 @@ class RemoteFileHandler: raise e # check integrity of downloaded file - if not RemoteFileHandler.check_integrity(fpath, md5): + if not RemoteFileHandler.check_integrity(fpath, + sha256, hash_type="sha256"): raise RuntimeError("File not found or corrupted.") @staticmethod def download_file_from_google_drive(file_id, root, filename=None, - md5=None): + sha256=None): """Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. - md5 (str, optional): MD5 checksum of the download. + sha256 (str, optional): sha256 checksum of the download. If None, do not check """ # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url # noqa @@ -119,8 +148,8 @@ class RemoteFileHandler: os.makedirs(root, exist_ok=True) - if os.path.isfile(fpath) and RemoteFileHandler.check_integrity(fpath, - md5): + if os.path.isfile(fpath) and RemoteFileHandler.check_integrity( + fpath, sha256, hash_type="sha256"): print('Using downloaded and verified file: ' + fpath) else: session = requests.Session() @@ -222,6 +251,11 @@ class RemoteFileHandler: if key.startswith('download_warning'): return value + # handle antivirus warning for big zips + found = re.search("(confirm=)([^&.+])", response.text) + if found: + return found.groups()[1] + return None @staticmethod @@ -229,15 +263,9 @@ class RemoteFileHandler: response_gen, destination, ): with open(destination, "wb") as f: - pbar = enlighten.Counter( - total=None, desc="Save content", units="%", color="green") - progress = 0 for chunk in response_gen: if chunk: # filter out keep-alive new chunks f.write(chunk) - progress += len(chunk) - - pbar.close() @staticmethod def _quota_exceeded(first_chunk): diff --git a/common/openpype_common/distribution/tests/test_addon_distributtion.py b/common/openpype_common/distribution/tests/test_addon_distributtion.py new file mode 100644 index 0000000000..765ea0596a --- /dev/null +++ b/common/openpype_common/distribution/tests/test_addon_distributtion.py @@ -0,0 +1,167 @@ +import pytest +import attr +import tempfile + +from common.openpype_common.distribution.addon_distribution import ( + AddonDownloader, + OSAddonDownloader, + HTTPAddonDownloader, + AddonInfo, + update_addon_state, + UpdateState +) +from common.openpype_common.distribution.addon_info import UrlType + + +@pytest.fixture +def addon_downloader(): + addon_downloader = AddonDownloader() + addon_downloader.register_format(UrlType.FILESYSTEM, OSAddonDownloader) + addon_downloader.register_format(UrlType.HTTP, HTTPAddonDownloader) + + yield addon_downloader + + +@pytest.fixture +def http_downloader(addon_downloader): + yield addon_downloader.get_downloader(UrlType.HTTP.value) + + +@pytest.fixture +def temp_folder(): + yield tempfile.mkdtemp() + + +@pytest.fixture +def sample_addon_info(): + addon_info = { + "versions": { + "1.0.0": { + "clientPyproject": { + "tool": { + "poetry": { + "dependencies": { + "nxtools": "^1.6", + "orjson": "^3.6.7", + "typer": "^0.4.1", + "email-validator": "^1.1.3", + "python": "^3.10", + "fastapi": "^0.73.0" + } + } + } + }, + "hasSettings": True, + "clientSourceInfo": [ + { + "type": "http", + "url": "https://drive.google.com/file/d/1TcuV8c2OV8CcbPeWi7lxOdqWsEqQNPYy/view?usp=sharing" # noqa + }, + { + "type": "filesystem", + "path": { + "windows": ["P:/sources/some_file.zip", + "W:/sources/some_file.zip"], # noqa + "linux": ["/mnt/srv/sources/some_file.zip"], + "darwin": ["/Volumes/srv/sources/some_file.zip"] + } + } + ], + "frontendScopes": { + "project": { + "sidebar": "hierarchy" + } + } + } + }, + "description": "", + "title": "Slack addon", + "name": "openpype_slack", + "productionVersion": "1.0.0", + "hash": "4be25eb6215e91e5894d3c5475aeb1e379d081d3f5b43b4ee15b0891cf5f5658" # noqa + } + yield addon_info + + +def test_register(printer): + addon_downloader = AddonDownloader() + + assert len(addon_downloader._downloaders) == 0, "Contains registered" + + addon_downloader.register_format(UrlType.FILESYSTEM, OSAddonDownloader) + assert len(addon_downloader._downloaders) == 1, "Should contain one" + + +def test_get_downloader(printer, addon_downloader): + assert addon_downloader.get_downloader(UrlType.FILESYSTEM.value), "Should find" # noqa + + with pytest.raises(ValueError): + addon_downloader.get_downloader("unknown"), "Shouldn't find" + + +def test_addon_info(printer, sample_addon_info): + """Tests parsing of expected payload from v4 server into AadonInfo.""" + valid_minimum = { + "name": "openpype_slack", + "productionVersion": "1.0.0", + "versions": { + "1.0.0": { + "clientSourceInfo": [ + { + "type": "filesystem", + "path": { + "windows": [ + "P:/sources/some_file.zip", + "W:/sources/some_file.zip"], + "linux": [ + "/mnt/srv/sources/some_file.zip"], + "darwin": [ + "/Volumes/srv/sources/some_file.zip"] # noqa + } + } + ] + } + } + } + + assert AddonInfo.from_dict(valid_minimum), "Missing required fields" + + valid_minimum["versions"].pop("1.0.0") + with pytest.raises(KeyError): + assert not AddonInfo.from_dict(valid_minimum), "Must fail without version data" # noqa + + valid_minimum.pop("productionVersion") + assert not AddonInfo.from_dict( + valid_minimum), "none if not productionVersion" # noqa + + addon = AddonInfo.from_dict(sample_addon_info) + assert addon, "Should be created" + assert addon.name == "openpype_slack", "Incorrect name" + assert addon.version == "1.0.0", "Incorrect version" + + with pytest.raises(TypeError): + assert addon["name"], "Dict approach not implemented" + + addon_as_dict = attr.asdict(addon) + assert addon_as_dict["name"], "Dict approach should work" + + +def test_update_addon_state(printer, sample_addon_info, + temp_folder, addon_downloader): + """Tests possible cases of addon update.""" + addon_info = AddonInfo.from_dict(sample_addon_info) + orig_hash = addon_info.hash + + addon_info.hash = "brokenhash" + result = update_addon_state([addon_info], temp_folder, addon_downloader) + assert result["openpype_slack_1.0.0"] == UpdateState.FAILED.value, \ + "Update should failed because of wrong hash" + + addon_info.hash = orig_hash + result = update_addon_state([addon_info], temp_folder, addon_downloader) + assert result["openpype_slack_1.0.0"] == UpdateState.UPDATED.value, \ + "Addon should have been updated" + + result = update_addon_state([addon_info], temp_folder, addon_downloader) + assert result["openpype_slack_1.0.0"] == UpdateState.EXISTS.value, \ + "Addon should already exist" diff --git a/igniter/__init__.py b/igniter/__init__.py index 02cba6a483..aa1b1d209e 100644 --- a/igniter/__init__.py +++ b/igniter/__init__.py @@ -24,7 +24,7 @@ def open_dialog(): if os.getenv("OPENPYPE_HEADLESS_MODE"): print("!!! Can't open dialog in headless mode. Exiting.") sys.exit(1) - from Qt import QtWidgets, QtCore + from qtpy import QtWidgets, QtCore from .install_dialog import InstallDialog scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) @@ -47,7 +47,7 @@ def open_update_window(openpype_version): if os.getenv("OPENPYPE_HEADLESS_MODE"): print("!!! Can't open dialog in headless mode. Exiting.") sys.exit(1) - from Qt import QtWidgets, QtCore + from qtpy import QtWidgets, QtCore from .update_window import UpdateWindow scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) @@ -71,7 +71,7 @@ def show_message_dialog(title, message): if os.getenv("OPENPYPE_HEADLESS_MODE"): print("!!! Can't open dialog in headless mode. Exiting.") sys.exit(1) - from Qt import QtWidgets, QtCore + from qtpy import QtWidgets, QtCore from .message_dialog import MessageDialog scale_attr = getattr(QtCore.Qt, "AA_EnableHighDpiScaling", None) diff --git a/igniter/__main__.py b/igniter/__main__.py index b453d29d5f..9783b20f49 100644 --- a/igniter/__main__.py +++ b/igniter/__main__.py @@ -2,8 +2,7 @@ """Open install dialog.""" import sys -from Qt import QtWidgets # noqa -from Qt.QtCore import Signal # noqa +from qtpy import QtWidgets from .install_dialog import InstallDialog diff --git a/igniter/bootstrap_repos.py b/igniter/bootstrap_repos.py index ad49f868d5..6c7c834062 100644 --- a/igniter/bootstrap_repos.py +++ b/igniter/bootstrap_repos.py @@ -57,13 +57,12 @@ class OpenPypeVersion(semver.VersionInfo): """Class for storing information about OpenPype version. Attributes: - staging (bool): True if it is staging version path (str): path to OpenPype """ - staging = False path = None - _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$") # noqa: E501 + # this should match any string complying with https://semver.org/ + _VERSION_REGEX = re.compile(r"(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P[a-zA-Z\d\-.]*))?(?:\+(?P[a-zA-Z\d\-.]*))?") # noqa: E501 _installed_version = None def __init__(self, *args, **kwargs): @@ -82,12 +81,10 @@ class OpenPypeVersion(semver.VersionInfo): build (str): an optional build string version (str): if set, it will be parsed and will override parameters like `major`, `minor` and so on. - staging (bool): set to True if version is staging. path (Path): path to version location. """ self.path = None - self.staging = False if "version" in kwargs.keys(): if not kwargs.get("version"): @@ -112,32 +109,10 @@ class OpenPypeVersion(semver.VersionInfo): if "path" in kwargs.keys(): kwargs.pop("path") - if kwargs.get("staging"): - self.staging = kwargs.get("staging", False) - kwargs.pop("staging") - - if "staging" in kwargs.keys(): - kwargs.pop("staging") - - if self.staging: - if kwargs.get("build"): - if "staging" not in kwargs.get("build"): - kwargs["build"] = "{}-staging".format(kwargs.get("build")) - else: - kwargs["build"] = "staging" - - if kwargs.get("build") and "staging" in kwargs.get("build", ""): - self.staging = True - super().__init__(*args, **kwargs) - def __eq__(self, other): - result = super().__eq__(other) - return bool(result and self.staging == other.staging) - def __repr__(self): - return "<{}: {} - path={}>".format( - self.__class__.__name__, str(self), self.path) + return f"<{self.__class__.__name__}: {str(self)} - path={self.path}>" def __lt__(self, other: OpenPypeVersion): result = super().__lt__(other) @@ -150,43 +125,11 @@ class OpenPypeVersion(semver.VersionInfo): return True if self.finalize_version() == other.finalize_version() and \ - self.prerelease == other.prerelease and \ - self.is_staging() and not other.is_staging(): + self.prerelease == other.prerelease: return True return result - def set_staging(self) -> OpenPypeVersion: - """Set version as staging and return it. - - This will preserve current one. - - Returns: - OpenPypeVersion: Set as staging. - - """ - if self.staging: - return self - return self.replace(parts={"build": f"{self.build}-staging"}) - - def set_production(self) -> OpenPypeVersion: - """Set version as production and return it. - - This will preserve current one. - - Returns: - OpenPypeVersion: Set as production. - - """ - if not self.staging: - return self - return self.replace( - parts={"build": self.build.replace("-staging", "")}) - - def is_staging(self) -> bool: - """Test if current version is staging one.""" - return self.staging - def get_main_version(self) -> str: """Return main version component. @@ -212,30 +155,16 @@ class OpenPypeVersion(semver.VersionInfo): OpenPypeVersion: of detected or None. """ + # strip .zip ext if present + string = re.sub(r"\.zip$", "", string, flags=re.IGNORECASE) m = re.search(OpenPypeVersion._VERSION_REGEX, string) if not m: return None version = OpenPypeVersion.parse(string[m.start():m.end()]) - if "staging" in string[m.start():m.end()]: - version.staging = True return version - @classmethod - def parse(cls, version): - """Extends parse to handle ta handle staging variant.""" - v = super().parse(version) - openpype_version = cls(major=v.major, minor=v.minor, - patch=v.patch, prerelease=v.prerelease, - build=v.build) - if v.build and "staging" in v.build: - openpype_version.staging = True - return openpype_version - def __hash__(self): - if self.path: - return hash(self.path) - else: - return hash(str(self)) + return hash(self.path) if self.path else hash(str(self)) @staticmethod def is_version_in_dir( @@ -383,73 +312,28 @@ class OpenPypeVersion(semver.VersionInfo): return False @classmethod - def get_local_versions( - cls, production: bool = None, staging: bool = None - ) -> List: + def get_local_versions(cls) -> List: """Get all versions available on this machine. - Arguments give ability to specify if filtering is needed. If both - arguments are set to None all found versions are returned. + Returns: + list: of compatible versions available on the machine. - Args: - production (bool): Return production versions. - staging (bool): Return staging versions. """ - # Return all local versions if arguments are set to None - if production is None and staging is None: - production = True - staging = True - - elif production is None and not staging: - production = True - - elif staging is None and not production: - staging = True - - # Just return empty output if both are disabled - if not production and not staging: - return [] - + # DEPRECATED: backwards compatible way to look for versions in root dir_to_search = Path(user_data_dir("openpype", "pypeclub")) - versions = OpenPypeVersion.get_versions_from_directory( - dir_to_search - ) - filtered_versions = [] - for version in versions: - if version.is_staging(): - if staging: - filtered_versions.append(version) - elif production: - filtered_versions.append(version) - return list(sorted(set(filtered_versions))) + versions = OpenPypeVersion.get_versions_from_directory(dir_to_search) + + return list(sorted(set(versions))) @classmethod - def get_remote_versions( - cls, production: bool = None, staging: bool = None - ) -> List: + def get_remote_versions(cls) -> List: """Get all versions available in OpenPype Path. - Arguments give ability to specify if filtering is needed. If both - arguments are set to None all found versions are returned. + Returns: + list of OpenPypeVersions: Versions found in OpenPype path. - Args: - production (bool): Return production versions. - staging (bool): Return staging versions. """ # Return all local versions if arguments are set to None - if production is None and staging is None: - production = True - staging = True - - elif production is None and not staging: - production = True - - elif staging is None and not production: - staging = True - - # Just return empty output if both are disabled - if not production and not staging: - return [] dir_to_search = None if cls.openpype_path_is_accessible(): @@ -469,17 +353,12 @@ class OpenPypeVersion(semver.VersionInfo): return [] versions = cls.get_versions_from_directory(dir_to_search) - filtered_versions = [] - for version in versions: - if version.is_staging(): - if staging: - filtered_versions.append(version) - elif production: - filtered_versions.append(version) - return list(sorted(set(filtered_versions))) + + return list(sorted(set(versions))) @staticmethod - def get_versions_from_directory(openpype_dir: Path) -> List: + def get_versions_from_directory( + openpype_dir: Path) -> List: """Get all detected OpenPype versions in directory. Args: @@ -492,15 +371,22 @@ class OpenPypeVersion(semver.VersionInfo): ValueError: if invalid path is specified. """ + openpype_versions = [] if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") + return openpype_versions - _openpype_versions = [] # iterate over directory in first level and find all that might # contain OpenPype. for item in openpype_dir.iterdir(): + # if the item is directory with major.minor version, dive deeper - # if file, strip extension, in case of dir not. + if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): + _versions = OpenPypeVersion.get_versions_from_directory( + item) + if _versions: + openpype_versions += _versions + + # if file exists, strip extension, in case of dir don't. name = item.name if item.is_dir() else item.stem result = OpenPypeVersion.version_in_str(name) @@ -519,9 +405,9 @@ class OpenPypeVersion(semver.VersionInfo): continue detected_version.path = item - _openpype_versions.append(detected_version) + openpype_versions.append(detected_version) - return sorted(_openpype_versions) + return sorted(openpype_versions) @staticmethod def get_installed_version_str() -> str: @@ -547,16 +433,14 @@ class OpenPypeVersion(semver.VersionInfo): @staticmethod def get_latest_version( - staging: bool = False, local: bool = None, remote: bool = None - ) -> OpenPypeVersion: - """Get latest available version. + ) -> Union[OpenPypeVersion, None]: + """Get the latest available version. The version does not contain information about path and source. - This is utility version to get latest version from all found. Build - version is not listed if staging is enabled. + This is utility version to get the latest version from all found. Arguments 'local' and 'remote' define if local and remote repository versions are used. All versions are used if both are not set (or set @@ -565,9 +449,12 @@ class OpenPypeVersion(semver.VersionInfo): 'False' in that case only build version can be used. Args: - staging (bool, optional): List staging versions if True. local (bool, optional): List local versions if True. remote (bool, optional): List remote versions if True. + + Returns: + Latest OpenPypeVersion or None + """ if local is None and remote is None: local = True @@ -580,22 +467,9 @@ class OpenPypeVersion(semver.VersionInfo): remote = True installed_version = OpenPypeVersion.get_installed_version() - local_versions = [] - remote_versions = [] - if local: - local_versions = OpenPypeVersion.get_local_versions( - staging=staging - ) - if remote: - remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging - ) - all_versions = local_versions + remote_versions - if not staging: - all_versions.append(installed_version) - - if not all_versions: - return None + local_versions = OpenPypeVersion.get_local_versions() if local else [] + remote_versions = OpenPypeVersion.get_remote_versions() if remote else [] # noqa: E501 + all_versions = local_versions + remote_versions + [installed_version] all_versions.sort() return all_versions[-1] @@ -621,14 +495,27 @@ class OpenPypeVersion(semver.VersionInfo): return None return OpenPypeVersion(version=result) + def is_compatible(self, version: OpenPypeVersion): + """Test build compatibility. + + This will simply compare major and minor versions (ignoring patch + and the rest). + + Args: + version (OpenPypeVersion): Version to check compatibility with. + + Returns: + bool: if the version is compatible + + """ + return self.major == version.major and self.minor == version.minor + class BootstrapRepos: """Class for bootstrapping local OpenPype installation. Attributes: data_dir (Path): local OpenPype installation directory. - live_repo_dir (Path): path to repos directory if running live, - otherwise `None`. registry (OpenPypeSettingsRegistry): OpenPype registry object. zip_filter (list): List of files to exclude from zip openpype_filter (list): list of top level directories to @@ -654,7 +541,7 @@ class BootstrapRepos: self.registry = OpenPypeSettingsRegistry() self.zip_filter = [".pyc", "__pycache__"] self.openpype_filter = [ - "openpype", "repos", "schema", "LICENSE" + "openpype", "schema", "LICENSE" ] self._message = message @@ -667,18 +554,13 @@ class BootstrapRepos: progress_callback = empty_progress self._progress_callback = progress_callback - if getattr(sys, "frozen", False): - self.live_repo_dir = Path(sys.executable).parent / "repos" - else: - self.live_repo_dir = Path(Path(__file__).parent / ".." / "repos") - @staticmethod def get_version_path_from_list( version: str, version_list: list) -> Union[Path, None]: """Get path for specific version in list of OpenPype versions. Args: - version (str): Version string to look for (1.2.4+staging) + version (str): Version string to look for (1.2.4-nightly.1+test) version_list (list of OpenPypeVersion): list of version to search. Returns: @@ -721,9 +603,9 @@ class BootstrapRepos: self, repo_dir: Path = None) -> Union[OpenPypeVersion, None]: """Copy zip created from OpenPype repositories to user data dir. - This detect OpenPype version either in local "live" OpenPype + This detects OpenPype version either in local "live" OpenPype repository or in user provided path. Then it will zip it in temporary - directory and finally it will move it to destination which is user + directory, and finally it will move it to destination which is user data directory. Existing files will be replaced. Args: @@ -734,21 +616,23 @@ class BootstrapRepos: """ # if repo dir is not set, we detect local "live" OpenPype repository - # version and use it as a source. Otherwise repo_dir is user + # version and use it as a source. Otherwise, repo_dir is user # entered location. - if not repo_dir: - version = OpenPypeVersion.get_installed_version_str() - repo_dir = self.live_repo_dir - else: + if repo_dir: version = self.get_version(repo_dir) + else: + installed_version = OpenPypeVersion.get_installed_version() + version = str(installed_version) + repo_dir = installed_version.path if not version: self._print("OpenPype not found.", LOG_ERROR) return # create destination directory - if not self.data_dir.exists(): - self.data_dir.mkdir(parents=True) + destination = self.data_dir / f"{installed_version.major}.{installed_version.minor}" # noqa + if not destination.exists(): + destination.mkdir(parents=True) # create zip inside temporary directory. with tempfile.TemporaryDirectory() as temp_dir: @@ -756,7 +640,7 @@ class BootstrapRepos: Path(temp_dir) / f"openpype-v{version}.zip" self._print(f"creating zip: {temp_zip}") - self._create_openpype_zip(temp_zip, repo_dir.parent) + self._create_openpype_zip(temp_zip, repo_dir) if not os.path.exists(temp_zip): self._print("make archive failed.", LOG_ERROR) return None @@ -776,7 +660,11 @@ class BootstrapRepos: Path to moved zip on success. """ - destination = self.data_dir / zip_file.name + version = OpenPypeVersion.version_in_str(zip_file.name) + destination_dir = self.data_dir / f"{version.major}.{version.minor}" + if not destination_dir.exists(): + destination_dir.mkdir(parents=True) + destination = destination_dir / zip_file.name if destination.exists(): self._print( @@ -787,8 +675,15 @@ class BootstrapRepos: except Exception as e: self._print(str(e), LOG_ERROR, exc_info=True) return None + if not destination_dir.exists(): + destination_dir.mkdir(parents=True) + elif not destination_dir.is_dir(): + self._print( + "Destination exists but is not directory.", LOG_ERROR) + return None + try: - shutil.move(zip_file.as_posix(), self.data_dir.as_posix()) + shutil.move(zip_file.as_posix(), destination_dir.as_posix()) except shutil.Error as e: self._print(str(e), LOG_ERROR, exc_info=True) return None @@ -1001,6 +896,16 @@ class BootstrapRepos: @staticmethod def _validate_dir(path: Path) -> tuple: + """Validate checksums in a given path. + + Args: + path (Path): path to folder to validate. + + Returns: + tuple(bool, str): returns status and reason as a bool + and str in a tuple. + + """ checksums_file = Path(path / "checksums") if not checksums_file.exists(): # FIXME: This should be set to False sometimes in the future @@ -1057,27 +962,11 @@ class BootstrapRepos: if not archive.is_file() and not archive.exists(): raise ValueError("Archive is not file.") - with ZipFile(archive, "r") as zip_file: - name_list = zip_file.namelist() - - roots = [] - paths = [] - for item in name_list: - if not item.startswith("repos/"): - continue - - root = item.split("/")[1] - - if root not in roots: - roots.append(root) - paths.append( - f"{archive}{os.path.sep}repos{os.path.sep}{root}") - sys.path.insert(0, paths[-1]) - - sys.path.insert(0, f"{archive}") + archive_path = str(archive) + sys.path.insert(0, archive_path) pythonpath = os.getenv("PYTHONPATH", "") python_paths = pythonpath.split(os.pathsep) - python_paths += paths + python_paths.insert(0, archive_path) os.environ["PYTHONPATH"] = os.pathsep.join(python_paths) @@ -1094,37 +983,30 @@ class BootstrapRepos: directory (Path): path to directory. """ + sys.path.insert(0, directory.as_posix()) - directory /= "repos" - if not directory.exists() and not directory.is_dir(): - raise ValueError("directory is invalid") - - roots = [] - for item in directory.iterdir(): - if item.is_dir(): - root = item.as_posix() - if root not in roots: - roots.append(root) - sys.path.insert(0, root) - - pythonpath = os.getenv("PYTHONPATH", "") - paths = pythonpath.split(os.pathsep) - paths += roots - - os.environ["PYTHONPATH"] = os.pathsep.join(paths) @staticmethod - def find_openpype_version(version, staging): + def find_openpype_version( + version: Union[str, OpenPypeVersion] + ) -> Union[OpenPypeVersion, None]: + """Find location of specified OpenPype version. + + Args: + version (Union[str, OpenPypeVersion): Version to find. + + Returns: + requested OpenPypeVersion. + + """ + installed_version = OpenPypeVersion.get_installed_version() if isinstance(version, str): version = OpenPypeVersion(version=version) - installed_version = OpenPypeVersion.get_installed_version() if installed_version == version: return installed_version - local_versions = OpenPypeVersion.get_local_versions( - staging=staging, production=not staging - ) + local_versions = OpenPypeVersion.get_local_versions() zip_version = None for local_version in local_versions: if local_version == version: @@ -1136,26 +1018,25 @@ class BootstrapRepos: if zip_version is not None: return zip_version - remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging, production=not staging - ) - for remote_version in remote_versions: - if remote_version == version: - return remote_version - return None + remote_versions = OpenPypeVersion.get_remote_versions() + return next( + ( + remote_version for remote_version in remote_versions + if remote_version == version + ), None) @staticmethod - def find_latest_openpype_version(staging): + def find_latest_openpype_version() -> Union[OpenPypeVersion, None]: + """Find the latest available OpenPype version in all location. + + Returns: + Latest OpenPype version on None if nothing was found. + + """ installed_version = OpenPypeVersion.get_installed_version() - local_versions = OpenPypeVersion.get_local_versions( - staging=staging - ) - remote_versions = OpenPypeVersion.get_remote_versions( - staging=staging - ) - all_versions = local_versions + remote_versions - if not staging: - all_versions.append(installed_version) + local_versions = OpenPypeVersion.get_local_versions() + remote_versions = OpenPypeVersion.get_remote_versions() + all_versions = local_versions + remote_versions + [installed_version] if not all_versions: return None @@ -1175,8 +1056,8 @@ class BootstrapRepos: def find_openpype( self, openpype_path: Union[Path, str] = None, - staging: bool = False, - include_zips: bool = False) -> Union[List[OpenPypeVersion], None]: + include_zips: bool = False + ) -> Union[List[OpenPypeVersion], None]: """Get ordered dict of detected OpenPype version. Resolution order for OpenPype is following: @@ -1188,8 +1069,6 @@ class BootstrapRepos: Args: openpype_path (Path or str, optional): Try to find OpenPype on the given path or url. - staging (bool, optional): Filter only staging version, skip them - otherwise. include_zips (bool, optional): If set True it will try to find OpenPype in zip files in given directory. @@ -1210,30 +1089,38 @@ class BootstrapRepos: ("Finding OpenPype in non-filesystem locations is" " not implemented yet.")) - dir_to_search = self.data_dir - user_versions = self.get_openpype_versions(self.data_dir, staging) - # if we have openpype_path specified, search only there. + # if checks bellow for OPENPYPE_PATH and registry fails, use data_dir + # DEPRECATED: lookup in root of this folder is deprecated in favour + # of major.minor sub-folders. + dirs_to_search = [self.data_dir] + if openpype_path: - dir_to_search = openpype_path + dirs_to_search = [openpype_path] + elif os.getenv("OPENPYPE_PATH") \ + and Path(os.getenv("OPENPYPE_PATH")).exists(): + # first try OPENPYPE_PATH and if that is not available, + # try registry. + dirs_to_search = [Path(os.getenv("OPENPYPE_PATH"))] else: - if os.getenv("OPENPYPE_PATH"): - if Path(os.getenv("OPENPYPE_PATH")).exists(): - dir_to_search = Path(os.getenv("OPENPYPE_PATH")) - else: - try: - registry_dir = Path( - str(self.registry.get_item("openPypePath"))) - if registry_dir.exists(): - dir_to_search = registry_dir + try: + registry_dir = Path( + str(self.registry.get_item("openPypePath"))) + if registry_dir.exists(): + dirs_to_search = [registry_dir] - except ValueError: - # nothing found in registry, we'll use data dir - pass + except ValueError: + # nothing found in registry, we'll use data dir + pass - openpype_versions = self.get_openpype_versions(dir_to_search, staging) - openpype_versions += user_versions + openpype_versions = [] + for dir_to_search in dirs_to_search: + try: + openpype_versions += self.get_openpype_versions( + dir_to_search) + except ValueError: + # location is invalid, skip it + pass - # remove zip file version if needed. if not include_zips: openpype_versions = [ v for v in openpype_versions if v.path.suffix != ".zip" @@ -1346,9 +1233,8 @@ class BootstrapRepos: raise ValueError( f"version {version} is not associated with any file") - destination = self.data_dir / version.path.stem - if destination.exists(): - assert destination.is_dir() + destination = self.data_dir / f"{version.major}.{version.minor}" / version.path.stem # noqa + if destination.exists() and destination.is_dir(): try: shutil.rmtree(destination) except OSError as e: @@ -1417,7 +1303,7 @@ class BootstrapRepos: else: dir_name = openpype_version.path.stem - destination = self.data_dir / dir_name + destination = self.data_dir / f"{openpype_version.major}.{openpype_version.minor}" / dir_name # noqa # test if destination directory already exist, if so lets delete it. if destination.exists() and force: @@ -1437,6 +1323,7 @@ class BootstrapRepos: # create destination parent directories even if they don't exist. destination.mkdir(parents=True) + remove_source_file = False # version is directory if openpype_version.path.is_dir(): # create zip inside temporary directory. @@ -1470,6 +1357,8 @@ class BootstrapRepos: self._progress_callback(35) openpype_version.path = self._copy_zip( openpype_version.path, destination) + # Mark zip to be deleted when done + remove_source_file = True # extract zip there self._print("extracting zip to destination ...") @@ -1478,6 +1367,10 @@ class BootstrapRepos: zip_ref.extractall(destination) self._progress_callback(100) + # Remove zip file copied to local app data + if remove_source_file: + os.remove(openpype_version.path) + return destination def _copy_zip(self, source: Path, destination: Path) -> Path: @@ -1588,14 +1481,11 @@ class BootstrapRepos: return False return True - def get_openpype_versions(self, - openpype_dir: Path, - staging: bool = False) -> list: + def get_openpype_versions(self, openpype_dir: Path) -> list: """Get all detected OpenPype versions in directory. Args: openpype_dir (Path): Directory to scan. - staging (bool, optional): Find staging versions if True. Returns: list of OpenPypeVersion @@ -1605,14 +1495,19 @@ class BootstrapRepos: """ if not openpype_dir.exists() and not openpype_dir.is_dir(): - raise ValueError("specified directory is invalid") + raise ValueError(f"specified directory {openpype_dir} is invalid") - _openpype_versions = [] + openpype_versions = [] # iterate over directory in first level and find all that might # contain OpenPype. for item in openpype_dir.iterdir(): + # if the item is directory with major.minor version, dive deeper + if item.is_dir() and re.match(r"^\d+\.\d+$", item.name): + _versions = self.get_openpype_versions(item) + if _versions: + openpype_versions += _versions - # if file, strip extension, in case of dir not. + # if it is file, strip extension, in case of dir don't. name = item.name if item.is_dir() else item.stem result = OpenPypeVersion.version_in_str(name) @@ -1631,13 +1526,9 @@ class BootstrapRepos: continue detected_version.path = item - if staging and detected_version.is_staging(): - _openpype_versions.append(detected_version) + openpype_versions.append(detected_version) - if not staging and not detected_version.is_staging(): - _openpype_versions.append(detected_version) - - return sorted(_openpype_versions) + return sorted(openpype_versions) class OpenPypeVersionExists(Exception): diff --git a/igniter/install_dialog.py b/igniter/install_dialog.py index b09529f5c5..551e2da918 100644 --- a/igniter/install_dialog.py +++ b/igniter/install_dialog.py @@ -5,9 +5,7 @@ import sys import re import collections -from Qt import QtCore, QtGui, QtWidgets # noqa -from Qt.QtGui import QValidator # noqa -from Qt.QtCore import QTimer # noqa +from qtpy import QtCore, QtGui, QtWidgets from .install_thread import InstallThread from .tools import ( @@ -388,8 +386,11 @@ class InstallDialog(QtWidgets.QDialog): install_thread.start() def _installation_finished(self): + # TODO we should find out why status can be set to 'None'? + # - 'InstallThread.run' should handle all cases so not sure where + # that come from status = self._install_thread.result() - if status >= 0: + if status is not None and status >= 0: self._update_progress(100) QtWidgets.QApplication.processEvents() self.done(3) diff --git a/igniter/install_thread.py b/igniter/install_thread.py index 8e31f8cb8f..4723e6adfb 100644 --- a/igniter/install_thread.py +++ b/igniter/install_thread.py @@ -4,7 +4,7 @@ import os import sys from pathlib import Path -from Qt.QtCore import QThread, Signal, QObject # noqa +from qtpy import QtCore from .bootstrap_repos import ( BootstrapRepos, @@ -17,7 +17,7 @@ from .bootstrap_repos import ( from .tools import validate_mongo_connection -class InstallThread(QThread): +class InstallThread(QtCore.QThread): """Install Worker thread. This class takes care of finding OpenPype version on user entered path @@ -28,15 +28,14 @@ class InstallThread(QThread): user data dir. """ - progress = Signal(int) - message = Signal((str, bool)) + progress = QtCore.Signal(int) + message = QtCore.Signal((str, bool)) def __init__(self, parent=None,): self._mongo = None - self._path = None self._result = None - QThread.__init__(self, parent) + super().__init__(parent) def result(self): """Result of finished installation.""" @@ -62,126 +61,117 @@ class InstallThread(QThread): progress_callback=self.set_progress, message=self.message) local_version = OpenPypeVersion.get_installed_version_str() - # if user did entered nothing, we install OpenPype from local version. - # zip content of `repos`, copy it to user data dir and append - # version to it. - if not self._path: - # user did not entered url - if not self._mongo: - # it not set in environment - if not os.getenv("OPENPYPE_MONGO"): - # try to get it from settings registry - try: - self._mongo = bs.secure_registry.get_item( - "openPypeMongo") - except ValueError: - self.message.emit( - "!!! We need MongoDB URL to proceed.", True) - self._set_result(-1) - return - else: - self._mongo = os.getenv("OPENPYPE_MONGO") - else: - self.message.emit("Saving mongo connection string ...", False) - bs.secure_registry.set_item("openPypeMongo", self._mongo) - - os.environ["OPENPYPE_MONGO"] = self._mongo - - self.message.emit( - f"Detecting installed OpenPype versions in {bs.data_dir}", - False) - detected = bs.find_openpype(include_zips=True) - - if detected: - if OpenPypeVersion( - version=local_version, path=Path()) < detected[-1]: - self.message.emit(( - f"Latest installed version {detected[-1]} is newer " - f"then currently running {local_version}" - ), False) - self.message.emit("Skipping OpenPype install ...", False) - if detected[-1].path.suffix.lower() == ".zip": - bs.extract_openpype(detected[-1]) - self._set_result(0) - return - - if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa - self.message.emit(( - f"Latest installed version is the same as " - f"currently running {local_version}" - ), False) - self.message.emit("Skipping OpenPype install ...", False) - self._set_result(0) - return - - self.message.emit(( - "All installed versions are older then " - f"currently running one {local_version}" - ), False) - else: - if getattr(sys, 'frozen', False): - self.message.emit("None detected.", True) - self.message.emit(("We will use OpenPype coming with " - "installer."), False) - openpype_version = bs.create_version_from_frozen_code() - if not openpype_version: - self.message.emit( - f"!!! Install failed - {openpype_version}", True) - self._set_result(-1) - return - self.message.emit(f"Using: {openpype_version}", False) - bs.install_version(openpype_version) - self.message.emit(f"Installed as {openpype_version}", False) - self.progress.emit(100) - self._set_result(1) - return - else: - self.message.emit("None detected.", False) - - self.message.emit( - f"We will use local OpenPype version {local_version}", False) - - local_openpype = bs.create_version_from_live_code() - if not local_openpype: - self.message.emit( - f"!!! Install failed - {local_openpype}", True) - self._set_result(-1) - return + # user did not entered url + if self._mongo: + self.message.emit("Saving mongo connection string ...", False) + bs.secure_registry.set_item("openPypeMongo", self._mongo) + elif os.getenv("OPENPYPE_MONGO"): + self._mongo = os.getenv("OPENPYPE_MONGO") + else: + # try to get it from settings registry try: - bs.install_version(local_openpype) - except (OpenPypeVersionExists, - OpenPypeVersionInvalid, - OpenPypeVersionIOError) as e: - self.message.emit(f"Installed failed: ", True) - self.message.emit(str(e), True) + self._mongo = bs.secure_registry.get_item( + "openPypeMongo") + except ValueError: + self.message.emit( + "!!! We need MongoDB URL to proceed.", True) self._set_result(-1) return + os.environ["OPENPYPE_MONGO"] = self._mongo - self.message.emit(f"Installed as {local_openpype}", False) + self.message.emit( + f"Detecting installed OpenPype versions in {bs.data_dir}", + False) + detected = bs.find_openpype(include_zips=True) + if not detected and getattr(sys, 'frozen', False): + self.message.emit("None detected.", True) + self.message.emit(("We will use OpenPype coming with " + "installer."), False) + openpype_version = bs.create_version_from_frozen_code() + if not openpype_version: + self.message.emit( + f"!!! Install failed - {openpype_version}", True) + self._set_result(-1) + return + self.message.emit(f"Using: {openpype_version}", False) + bs.install_version(openpype_version) + self.message.emit(f"Installed as {openpype_version}", False) self.progress.emit(100) self._set_result(1) return - else: - # if we have mongo connection string, validate it, set it to - # user settings and get OPENPYPE_PATH from there. - if self._mongo: - if not validate_mongo_connection(self._mongo): - self.message.emit( - f"!!! invalid mongo url {self._mongo}", True) - self._set_result(-1) - return - bs.secure_registry.set_item("openPypeMongo", self._mongo) - os.environ["OPENPYPE_MONGO"] = self._mongo - self.message.emit(f"processing {self._path}", True) - repo_file = bs.process_entered_location(self._path) + if detected and not OpenPypeVersion.get_installed_version().is_compatible(detected[-1]): # noqa: E501 + self.message.emit(( + f"Latest detected version {detected[-1]} " + "is not compatible with the currently running " + f"{local_version}" + ), True) + self.message.emit(( + "Filtering detected versions to compatible ones..." + ), False) - if not repo_file: - self.message.emit("!!! Cannot install", True) - self._set_result(-1) + # filter results to get only compatible versions + detected = [ + version for version in detected + if version.is_compatible( + OpenPypeVersion.get_installed_version()) + ] + + if detected: + if OpenPypeVersion( + version=local_version, path=Path()) < detected[-1]: + self.message.emit(( + f"Latest installed version {detected[-1]} is newer " + f"then currently running {local_version}" + ), False) + self.message.emit("Skipping OpenPype install ...", False) + if detected[-1].path.suffix.lower() == ".zip": + bs.extract_openpype(detected[-1]) + self._set_result(0) return + if OpenPypeVersion(version=local_version).get_main_version() == detected[-1].get_main_version(): # noqa: E501 + self.message.emit(( + f"Latest installed version is the same as " + f"currently running {local_version}" + ), False) + self.message.emit("Skipping OpenPype install ...", False) + self._set_result(0) + return + + self.message.emit(( + "All installed versions are older then " + f"currently running one {local_version}" + ), False) + + self.message.emit("None detected.", False) + + self.message.emit( + f"We will use local OpenPype version {local_version}", False) + + local_openpype = bs.create_version_from_live_code() + if not local_openpype: + self.message.emit( + f"!!! Install failed - {local_openpype}", True) + self._set_result(-1) + return + + try: + bs.install_version(local_openpype) + except (OpenPypeVersionExists, + OpenPypeVersionInvalid, + OpenPypeVersionIOError) as e: + self.message.emit(f"Installed failed: ", True) + self.message.emit(str(e), True) + self._set_result(-1) + return + + self.message.emit(f"Installed as {local_openpype}", False) + self.progress.emit(100) + self._set_result(1) + return + self.progress.emit(100) self._set_result(1) return diff --git a/igniter/message_dialog.py b/igniter/message_dialog.py index c8e875cc37..a2a8bce3a2 100644 --- a/igniter/message_dialog.py +++ b/igniter/message_dialog.py @@ -1,4 +1,4 @@ -from Qt import QtWidgets, QtGui +from qtpy import QtWidgets, QtGui from .tools import ( load_stylesheet, diff --git a/igniter/nice_progress_bar.py b/igniter/nice_progress_bar.py index 47d695a101..ee16d108d4 100644 --- a/igniter/nice_progress_bar.py +++ b/igniter/nice_progress_bar.py @@ -1,4 +1,4 @@ -from Qt import QtCore, QtGui, QtWidgets # noqa +from qtpy import QtWidgets class NiceProgressBar(QtWidgets.QProgressBar): diff --git a/igniter/tools.py b/igniter/tools.py index 57159b5e52..79235b2329 100644 --- a/igniter/tools.py +++ b/igniter/tools.py @@ -21,6 +21,11 @@ class OpenPypeVersionNotFound(Exception): pass +class OpenPypeVersionIncompatible(Exception): + """OpenPype version is not compatible with the installed one (build).""" + pass + + def should_add_certificate_path_to_mongo_url(mongo_url): """Check if should add ca certificate to mongo url. @@ -148,7 +153,8 @@ def get_openpype_global_settings(url: str) -> dict: # Create mongo connection client = MongoClient(url, **kwargs) # Access settings collection - col = client["openpype"]["settings"] + openpype_db = os.environ.get("OPENPYPE_DATABASE_NAME") or "openpype" + col = client[openpype_db]["settings"] # Query global settings global_settings = col.find_one({"type": "global_settings"}) or {} # Close Mongo connection @@ -179,11 +185,7 @@ def get_openpype_path_from_settings(settings: dict) -> Union[str, None]: if paths and isinstance(paths, str): paths = [paths] - # Loop over paths and return only existing - for path in paths: - if os.path.exists(path): - return path - return None + return next((path for path in paths if os.path.exists(path)), None) def get_expected_studio_version_str( @@ -201,10 +203,7 @@ def get_expected_studio_version_str( mongo_url = os.environ.get("OPENPYPE_MONGO") if global_settings is None: global_settings = get_openpype_global_settings(mongo_url) - if staging: - key = "staging_version" - else: - key = "production_version" + key = "staging_version" if staging else "production_version" return global_settings.get(key) or "" diff --git a/igniter/update_thread.py b/igniter/update_thread.py index f4fc729faf..e98c95f892 100644 --- a/igniter/update_thread.py +++ b/igniter/update_thread.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Working thread for update.""" -from Qt.QtCore import QThread, Signal, QObject # noqa +from qtpy import QtCore from .bootstrap_repos import ( BootstrapRepos, @@ -8,7 +8,7 @@ from .bootstrap_repos import ( ) -class UpdateThread(QThread): +class UpdateThread(QtCore.QThread): """Install Worker thread. This class takes care of finding OpenPype version on user entered path @@ -19,13 +19,13 @@ class UpdateThread(QThread): user data dir. """ - progress = Signal(int) - message = Signal((str, bool)) + progress = QtCore.Signal(int) + message = QtCore.Signal((str, bool)) def __init__(self, parent=None): self._result = None self._openpype_version = None - QThread.__init__(self, parent) + super().__init__(parent) def set_version(self, openpype_version: OpenPypeVersion): self._openpype_version = openpype_version diff --git a/igniter/update_window.py b/igniter/update_window.py index d7908c240b..d51ae18cd0 100644 --- a/igniter/update_window.py +++ b/igniter/update_window.py @@ -1,8 +1,10 @@ # -*- coding: utf-8 -*- """Progress window to show when OpenPype is updating/installing locally.""" import os + +from qtpy import QtCore, QtGui, QtWidgets + from .update_thread import UpdateThread -from Qt import QtCore, QtGui, QtWidgets # noqa from .bootstrap_repos import OpenPypeVersion from .nice_progress_bar import NiceProgressBar from .tools import load_stylesheet @@ -47,7 +49,6 @@ class UpdateWindow(QtWidgets.QDialog): self._update_thread = None - self.resize(QtCore.QSize(self._width, self._height)) self._init_ui() # Set stylesheet @@ -79,6 +80,16 @@ class UpdateWindow(QtWidgets.QDialog): self._progress_bar = progress_bar + def showEvent(self, event): + super().showEvent(event) + current_size = self.size() + new_size = QtCore.QSize( + max(current_size.width(), self._width), + max(current_size.height(), self._height) + ) + if current_size != new_size: + self.resize(new_size) + def _run_update(self): """Start install process. diff --git a/inno_setup.iss b/inno_setup.iss index ead9907955..3adde52a8b 100644 --- a/inno_setup.iss +++ b/inno_setup.iss @@ -18,7 +18,8 @@ AppPublisher=Orbi Tools s.r.o AppPublisherURL=http://pype.club AppSupportURL=http://pype.club AppUpdatesURL=http://pype.club -DefaultDirName={autopf}\{#MyAppName} +DefaultDirName={autopf}\{#MyAppName}\{#AppVer} +UsePreviousAppDir=no DisableProgramGroupPage=yes OutputBaseFilename={#MyAppName}-{#AppVer}-install AllowCancelDuringInstall=yes @@ -27,7 +28,7 @@ AllowCancelDuringInstall=yes PrivilegesRequiredOverridesAllowed=dialog SetupIconFile=igniter\openpype.ico OutputDir=build\ -Compression=lzma +Compression=lzma2 SolidCompression=yes WizardStyle=modern @@ -37,13 +38,18 @@ Name: "english"; MessagesFile: "compiler:Default.isl" [Tasks] Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked +[InstallDelete] +; clean everything in previous installation folder +Type: filesandordirs; Name: "{app}\*" + + [Files] Source: "build\{#build}\*"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs ; NOTE: Don't use "Flags: ignoreversion" on any shared system files [Icons] -Name: "{autoprograms}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe" -Name: "{autodesktop}\{#MyAppName}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon +Name: "{autoprograms}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe" +Name: "{autodesktop}\{#MyAppName} {#AppVer}"; Filename: "{app}\openpype_gui.exe"; Tasks: desktopicon [Run] Filename: "{app}\openpype_gui.exe"; Description: "{cm:LaunchProgram,OpenPype}"; Flags: nowait postinstall skipifsilent diff --git a/openpype/__init__.py b/openpype/__init__.py index 8b94b2dc3f..810664707a 100644 --- a/openpype/__init__.py +++ b/openpype/__init__.py @@ -1,155 +1,5 @@ -# -*- coding: utf-8 -*- -"""Pype module.""" import os -import platform -import functools -import logging - -from .settings import get_project_settings -from .lib import ( - Anatomy, - filter_pyblish_plugins, - set_plugin_attributes_from_settings, - change_timer_to_current_context, - register_event_callback, -) - -pyblish = avalon = _original_discover = None - -log = logging.getLogger(__name__) PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__)) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") - -# Global plugin paths -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") - - -def import_wrapper(func): - """Wrap module imports to specific functions.""" - @functools.wraps(func) - def decorated(*args, **kwargs): - global pyblish - global avalon - global _original_discover - if pyblish is None: - from pyblish import api as pyblish - from avalon import api as avalon - - # we are monkey patching `avalon.api.discover()` to allow us to - # load plugin presets on plugins being discovered by avalon. - # Little bit of hacking, but it allows us to add out own features - # without need to modify upstream code. - - _original_discover = avalon.discover - - return func(*args, **kwargs) - - return decorated - - -@import_wrapper -def patched_discover(superclass): - """Patch `avalon.api.discover()`. - - Monkey patched version of :func:`avalon.api.discover()`. It allows - us to load presets on plugins being discovered. - """ - # run original discover and get plugins - plugins = _original_discover(superclass) - filtered_plugins = [ - plugin - for plugin in plugins - if issubclass(plugin, superclass) - ] - - set_plugin_attributes_from_settings(filtered_plugins, superclass) - - return filtered_plugins - - -@import_wrapper -def install(): - """Install Pype to Avalon.""" - from pyblish.lib import MessageHandler - from openpype.modules import load_modules - from openpype.pipeline import ( - LegacyCreator, - register_loader_plugin_path, - register_inventory_action, - ) - from avalon import pipeline - - # Make sure modules are loaded - load_modules() - - def modified_emit(obj, record): - """Method replacing `emit` in Pyblish's MessageHandler.""" - record.msg = record.getMessage() - obj.records.append(record) - - MessageHandler.emit = modified_emit - - log.info("Registering global plug-ins..") - pyblish.register_plugin_path(PUBLISH_PATH) - pyblish.register_discovery_filter(filter_pyblish_plugins) - register_loader_plugin_path(LOAD_PATH) - - project_name = os.environ.get("AVALON_PROJECT") - - # Register studio specific plugins - if project_name: - anatomy = Anatomy(project_name) - anatomy.set_root_environments() - avalon.register_root(anatomy.roots) - - project_settings = get_project_settings(project_name) - platform_name = platform.system().lower() - project_plugins = ( - project_settings - .get("global", {}) - .get("project_plugins", {}) - .get(platform_name) - ) or [] - for path in project_plugins: - try: - path = str(path.format(**os.environ)) - except KeyError: - pass - - if not path or not os.path.exists(path): - continue - - pyblish.register_plugin_path(path) - register_loader_plugin_path(path) - avalon.register_plugin_path(LegacyCreator, path) - register_inventory_action(path) - - # apply monkey patched discover to original one - log.info("Patching discovery") - - avalon.discover = patched_discover - pipeline.discover = patched_discover - - register_event_callback("taskChanged", _on_task_change) - - -def _on_task_change(): - change_timer_to_current_context() - - -@import_wrapper -def uninstall(): - """Uninstall Pype from Avalon.""" - from openpype.pipeline import deregister_loader_plugin_path - - log.info("Deregistering global plug-ins..") - pyblish.deregister_plugin_path(PUBLISH_PATH) - pyblish.deregister_discovery_filter(filter_pyblish_plugins) - deregister_loader_plugin_path(LOAD_PATH) - log.info("Global plug-ins unregistred") - - # restore original discover - avalon.discover = _original_discover diff --git a/openpype/action.py b/openpype/action.py index 50741875e4..15c96404b6 100644 --- a/openpype/action.py +++ b/openpype/action.py @@ -1,42 +1,75 @@ -# absolute_import is needed to counter the `module has no cmds error` in Maya -from __future__ import absolute_import - +import warnings +import functools import pyblish.api -def get_errored_instances_from_context(context): - - instances = list() - for result in context.data["results"]: - if result["instance"] is None: - # When instance is None we are on the "context" result - continue - - if result["error"]: - instances.append(result["instance"]) - - return instances +class ActionDeprecatedWarning(DeprecationWarning): + pass -def get_errored_plugins_from_data(context): - """Get all failed validation plugins - - Args: - context (object): - - Returns: - list of plugins which failed during validation +def deprecated(new_destination): + """Mark functions as deprecated. + It will result in a warning being emitted when the function is used. """ - plugins = list() - results = context.data.get("results", []) - for result in results: - if result["success"] is True: - continue - plugins.append(result["plugin"]) + func = None + if callable(new_destination): + func = new_destination + new_destination = None - return plugins + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", ActionDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=ActionDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +@deprecated("openpype.pipeline.publish.get_errored_instances_from_context") +def get_errored_instances_from_context(context): + """ + Deprecated: + Since 3.14.* will be removed in 3.16.* or later. + """ + + from openpype.pipeline.publish import get_errored_instances_from_context + + return get_errored_instances_from_context(context) + + +@deprecated("openpype.pipeline.publish.get_errored_plugins_from_context") +def get_errored_plugins_from_data(context): + """ + Deprecated: + Since 3.14.* will be removed in 3.16.* or later. + """ + + from openpype.pipeline.publish import get_errored_plugins_from_context + + return get_errored_plugins_from_context(context) class RepairAction(pyblish.api.Action): @@ -45,6 +78,13 @@ class RepairAction(pyblish.api.Action): To process the repairing this requires a static `repair(instance)` method is available on the plugin. + Deprecated: + 'RepairAction' and 'RepairContextAction' were moved to + 'openpype.pipeline.publish' please change you imports. + There is no "reasonable" way hot mark these classes as deprecated + to show warning of wrong import. Deprecated since 3.14.* will be + removed in 3.16.* + """ label = "Repair" on = "failed" # This action is only available on a failed plug-in @@ -71,6 +111,13 @@ class RepairContextAction(pyblish.api.Action): To process the repairing this requires a static `repair(instance)` method is available on the plugin. + Deprecated: + 'RepairAction' and 'RepairContextAction' were moved to + 'openpype.pipeline.publish' please change you imports. + There is no "reasonable" way hot mark these classes as deprecated + to show warning of wrong import. Deprecated since 3.14.* will be + removed in 3.16.* + """ label = "Repair" on = "failed" # This action is only available on a failed plug-in diff --git a/openpype/api.py b/openpype/api.py deleted file mode 100644 index b692b36065..0000000000 --- a/openpype/api.py +++ /dev/null @@ -1,122 +0,0 @@ -from .settings import ( - get_system_settings, - get_project_settings, - get_current_project_settings, - get_anatomy_settings, - get_environments, - - SystemSettings, - ProjectSettings -) -from .lib import ( - PypeLogger, - Anatomy, - config, - execute, - run_subprocess, - version_up, - get_asset, - get_hierarchy, - get_workdir_data, - get_version_from_path, - get_last_version_from_path, - get_app_environments_for_context, - source_hash, - get_latest_version, - get_global_environments, - get_local_site_id, - change_openpype_mongo_url, - create_project_folders, - get_project_basic_paths -) - -from .lib.mongo import ( - get_default_components -) - -from .lib.applications import ( - ApplicationManager -) - -from .lib.avalon_context import ( - BuildWorkfile -) - -from . import resources - -from .plugin import ( - Extractor, - - ValidatePipelineOrder, - ValidateContentsOrder, - ValidateSceneOrder, - ValidateMeshOrder, - ValidationException -) - -# temporary fix, might -from .action import ( - get_errored_instances_from_context, - RepairAction, - RepairContextAction -) - -# for backward compatibility with Pype 2 -Logger = PypeLogger - -__all__ = [ - "get_system_settings", - "get_project_settings", - "get_current_project_settings", - "get_anatomy_settings", - "get_environments", - "get_project_basic_paths", - - "SystemSettings", - - "PypeLogger", - "Logger", - "Anatomy", - "config", - "execute", - "get_default_components", - "ApplicationManager", - "BuildWorkfile", - - # Resources - "resources", - - # plugin classes - "Extractor", - # ordering - "ValidatePipelineOrder", - "ValidateContentsOrder", - "ValidateSceneOrder", - "ValidateMeshOrder", - # action - "get_errored_instances_from_context", - "RepairAction", - "RepairContextAction", - - "ValidationException", - - # get contextual data - "version_up", - "get_hierarchy", - "get_asset", - "get_version_from_path", - "get_last_version_from_path", - "get_app_environments_for_context", - "source_hash", - - "run_subprocess", - "get_latest_version", - "get_global_environments", - - "get_local_site_id", - "change_openpype_mongo_url", - - "get_project_basic_paths", - "create_project_folders" - -] diff --git a/openpype/cli.py b/openpype/cli.py index cbeb7fef9b..5c47088a44 100644 --- a/openpype/cli.py +++ b/openpype/cli.py @@ -2,7 +2,7 @@ """Package for handling pype command line arguments.""" import os import sys - +import code import click # import sys @@ -16,17 +16,28 @@ from .pype_commands import PypeCommands @click.option("--use-staging", is_flag=True, expose_value=False, help="use staging variants") @click.option("--list-versions", is_flag=True, expose_value=False, - help=("list all detected versions. Use With `--use-staging " - "to list staging versions.")) + help="list all detected versions.") @click.option("--validate-version", expose_value=False, help="validate given version integrity") +@click.option("--debug", is_flag=True, expose_value=False, + help="Enable debug") +@click.option("--verbose", expose_value=False, + help=("Change OpenPype log level (debug - critical or 0-50)")) +@click.option("--automatic-tests", is_flag=True, expose_value=False, + help=("Run in automatic tests mode")) def main(ctx): """Pype is main command serving as entry point to pipeline system. It wraps different commands together. """ + if ctx.invoked_subcommand is None: - ctx.invoke(tray) + # Print help if headless mode is used + if os.environ.get("OPENPYPE_HEADLESS_MODE") == "1": + print(ctx.get_help()) + sys.exit(0) + else: + ctx.invoke(tray) @main.command() @@ -37,30 +48,13 @@ def settings(dev): @main.command() -def standalonepublisher(): - """Show Pype Standalone publisher UI.""" - PypeCommands().launch_standalone_publisher() - - -@main.command() -def traypublisher(): - """Show new OpenPype Standalone publisher UI.""" - PypeCommands().launch_traypublisher() - - -@main.command() -@click.option("-d", "--debug", - is_flag=True, help=("Run pype tray in debug mode")) -def tray(debug=False): +def tray(): """Launch pype tray. Default action of pype command is to launch tray widget to control basic aspects of pype. See documentation for more information. - - Running pype with `--debug` will result in lot of information useful for - debugging to be shown in console. """ - PypeCommands().launch_tray(debug) + PypeCommands().launch_tray() @PypeCommands.add_modules @@ -75,7 +69,6 @@ def module(ctx): @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("--ftrack-url", envvar="FTRACK_SERVER", help="Ftrack server url") @click.option("--ftrack-user", envvar="FTRACK_API_USER", @@ -88,8 +81,7 @@ def module(ctx): help="Clockify API key.") @click.option("--clockify-workspace", envvar="CLOCKIFY_WORKSPACE", help="Clockify workspace") -def eventserver(debug, - ftrack_url, +def eventserver(ftrack_url, ftrack_user, ftrack_api_key, legacy, @@ -100,8 +92,6 @@ def eventserver(debug, This should be ideally used by system service (such us systemd or upstart on linux and window service). """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_eventservercli( ftrack_url, @@ -114,12 +104,11 @@ def eventserver(debug, @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-h", "--host", help="Host", default=None) @click.option("-p", "--port", help="Port", default=None) @click.option("-e", "--executable", help="Executable") @click.option("-u", "--upload_dir", help="Upload dir") -def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): +def webpublisherwebserver(executable, upload_dir, host=None, port=None): """Starts webserver for communication with Webpublish FR via command line OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND @@ -127,8 +116,6 @@ def webpublisherwebserver(debug, executable, upload_dir, host=None, port=None): Expect "pype.club" user created on Ftrack. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" PypeCommands().launch_webpublisher_webservercli( upload_dir=upload_dir, @@ -164,38 +151,34 @@ def extractenvironments(output_json_path, project, asset, task, app, envgroup): @main.command() @click.argument("paths", nargs=-1) -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-t", "--targets", help="Targets module", default=None, multiple=True) @click.option("-g", "--gui", is_flag=True, help="Show Publish UI", default=False) -def publish(debug, paths, targets, gui): +def publish(paths, targets, gui): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.publish(list(paths), targets, gui) @main.command() @click.argument("path") -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-h", "--host", help="Host") @click.option("-u", "--user", help="User email address") @click.option("-p", "--project", help="Project") @click.option("-t", "--targets", help="Targets", default=None, multiple=True) -def remotepublishfromapp(debug, project, path, host, user=None, targets=None): +def remotepublishfromapp(project, path, host, user=None, targets=None): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.remotepublishfromapp( project, path, host, user, targets=targets ) @@ -203,24 +186,21 @@ def remotepublishfromapp(debug, project, path, host, user=None, targets=None): @main.command() @click.argument("path") -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-u", "--user", help="User email address") @click.option("-p", "--project", help="Project") @click.option("-t", "--targets", help="Targets", default=None, multiple=True) -def remotepublish(debug, project, path, user=None, targets=None): +def remotepublish(project, path, user=None, targets=None): """Start CLI publishing. Publish collects json from paths provided as an argument. More than one path is allowed. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands.remotepublish(project, path, user, targets=targets) @main.command() -@click.option("-d", "--debug", is_flag=True, help="Print debug messages") @click.option("-p", "--project", required=True, help="name of project asset is under") @click.option("-a", "--asset", required=True, @@ -228,7 +208,7 @@ def remotepublish(debug, project, path, user=None, targets=None): @click.option("--path", required=True, help="path where textures are found", type=click.Path(exists=True)) -def texturecopy(debug, project, asset, path): +def texturecopy(project, asset, path): """Copy specified textures to provided asset path. It validates if project and asset exists. Then it will use speedcopy to @@ -239,8 +219,7 @@ def texturecopy(debug, project, asset, path): Result will be copied without directory structure so it will be flat then. Nothing is written to database. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands().texture_copy(project, asset, path) @@ -305,6 +284,13 @@ def projectmanager(): PypeCommands().launch_project_manager() +@main.command(context_settings={"ignore_unknown_options": True}) +def publish_report_viewer(): + from openpype.tools.publisher.publish_report_viewer import main + + sys.exit(main()) + + @main.command() @click.argument("output_path") @click.option("--project", help="Define project context") @@ -389,11 +375,9 @@ def runtests(folder, mark, pyargs, test_data_folder, persist, app_variant, @main.command() -@click.option("-d", "--debug", - is_flag=True, help=("Run process in debug mode")) @click.option("-a", "--active_site", required=True, help="Name of active stie") -def syncserver(debug, active_site): +def syncserver(active_site): """Run sync site server in background. Some Site Sync use cases need to expose site to another one. @@ -408,8 +392,7 @@ def syncserver(debug, active_site): Settings (configured by starting OP Tray with env var OPENPYPE_LOCAL_ID set to 'active_site'. """ - if debug: - os.environ["OPENPYPE_DEBUG"] = "1" + PypeCommands().syncserver(active_site) @@ -443,3 +426,43 @@ def pack_project(project, dirpath): def unpack_project(zipfile, root): """Create a package of project with all files and database dump.""" PypeCommands().unpack_project(zipfile, root) + + +@main.command() +def interactive(): + """Interactive (Python like) console. + + Helpful command not only for development to directly work with python + interpreter. + + Warning: + Executable 'openpype_gui' on Windows won't work. + """ + + from openpype.version import __version__ + + banner = f"OpenPype {__version__}\nPython {sys.version} on {sys.platform}" + code.interact(banner) + + +@main.command() +@click.option("--build", help="Print only build version", + is_flag=True, default=False) +def version(build): + """Print OpenPype version.""" + + from openpype.version import __version__ + from igniter.bootstrap_repos import BootstrapRepos, OpenPypeVersion + from pathlib import Path + import os + + if getattr(sys, 'frozen', False): + local_version = BootstrapRepos.get_version( + Path(os.getenv("OPENPYPE_ROOT"))) + else: + local_version = OpenPypeVersion.get_installed_version_str() + + if build: + print(local_version) + return + print(f"{__version__} (booted: {local_version})") diff --git a/openpype/client/__init__.py b/openpype/client/__init__.py new file mode 100644 index 0000000000..7831afd8ad --- /dev/null +++ b/openpype/client/__init__.py @@ -0,0 +1,108 @@ +from .mongo import ( + OpenPypeMongoConnection, +) + +from .entities import ( + get_projects, + get_project, + get_whole_project, + + get_asset_by_id, + get_asset_by_name, + get_assets, + get_archived_assets, + get_asset_ids_with_subsets, + + get_subset_by_id, + get_subset_by_name, + get_subsets, + get_subset_families, + + get_version_by_id, + get_version_by_name, + get_versions, + get_hero_version_by_id, + get_hero_version_by_subset_id, + get_hero_versions, + get_last_versions, + get_last_version_by_subset_id, + get_last_version_by_subset_name, + get_output_link_versions, + + version_is_latest, + + get_representation_by_id, + get_representation_by_name, + get_representations, + get_representation_parents, + get_representations_parents, + get_archived_representations, + + get_thumbnail, + get_thumbnails, + get_thumbnail_id_from_source, + + get_workfile_info, +) + +from .entity_links import ( + get_linked_asset_ids, + get_linked_assets, + get_linked_representation_id, +) + +from .operations import ( + create_project, +) + + +__all__ = ( + "OpenPypeMongoConnection", + + "get_projects", + "get_project", + "get_whole_project", + + "get_asset_by_id", + "get_asset_by_name", + "get_assets", + "get_archived_assets", + "get_asset_ids_with_subsets", + + "get_subset_by_id", + "get_subset_by_name", + "get_subsets", + "get_subset_families", + + "get_version_by_id", + "get_version_by_name", + "get_versions", + "get_hero_version_by_id", + "get_hero_version_by_subset_id", + "get_hero_versions", + "get_last_versions", + "get_last_version_by_subset_id", + "get_last_version_by_subset_name", + "get_output_link_versions", + + "version_is_latest", + + "get_representation_by_id", + "get_representation_by_name", + "get_representations", + "get_representation_parents", + "get_representations_parents", + "get_archived_representations", + + "get_thumbnail", + "get_thumbnails", + "get_thumbnail_id_from_source", + + "get_workfile_info", + + "get_linked_asset_ids", + "get_linked_assets", + "get_linked_representation_id", + + "create_project", +) diff --git a/openpype/client/entities.py b/openpype/client/entities.py new file mode 100644 index 0000000000..c415be8816 --- /dev/null +++ b/openpype/client/entities.py @@ -0,0 +1,1502 @@ +"""Unclear if these will have public functions like these. + +Goal is that most of functions here are called on (or with) an object +that has project name as a context (e.g. on 'ProjectEntity'?). + ++ We will need more specific functions doing wery specific queires really fast. +""" + +import re +import collections + +import six +from bson.objectid import ObjectId + +from .mongo import get_project_database, get_project_connection + +PatternType = type(re.compile("")) + + +def _prepare_fields(fields, required_fields=None): + if not fields: + return None + + output = { + field: True + for field in fields + } + if "_id" not in output: + output["_id"] = True + + if required_fields: + for key in required_fields: + output[key] = True + return output + + +def convert_id(in_id): + """Helper function for conversion of id from string to ObjectId. + + Args: + in_id (Union[str, ObjectId, Any]): Entity id that should be converted + to right type for queries. + + Returns: + Union[ObjectId, Any]: Converted ids to ObjectId or in type. + """ + + if isinstance(in_id, six.string_types): + return ObjectId(in_id) + return in_id + + +def convert_ids(in_ids): + """Helper function for conversion of ids from string to ObjectId. + + Args: + in_ids (Iterable[Union[str, ObjectId, Any]]): List of entity ids that + should be converted to right type for queries. + + Returns: + List[ObjectId]: Converted ids to ObjectId. + """ + + _output = set() + for in_id in in_ids: + if in_id is not None: + _output.add(convert_id(in_id)) + return list(_output) + + +def get_projects(active=True, inactive=False, fields=None): + mongodb = get_project_database() + for project_name in mongodb.collection_names(): + if project_name in ("system.indexes",): + continue + project_doc = get_project( + project_name, active=active, inactive=inactive, fields=fields + ) + if project_doc is not None: + yield project_doc + + +def get_project(project_name, active=True, inactive=True, fields=None): + # Skip if both are disabled + if not active and not inactive: + return None + + query_filter = {"type": "project"} + # Keep query untouched if both should be available + if active and inactive: + pass + + # Add filter to keep only active + elif active: + query_filter["$or"] = [ + {"data.active": {"$exists": False}}, + {"data.active": True}, + ] + + # Add filter to keep only inactive + elif inactive: + query_filter["$or"] = [ + {"data.active": {"$exists": False}}, + {"data.active": False}, + ] + + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_whole_project(project_name): + """Receive all documents from project. + + Helper that can be used to get all document from whole project. For example + for backups etc. + + Returns: + Cursor: Query cursor as iterable which returns all documents from + project collection. + """ + + conn = get_project_connection(project_name) + return conn.find({}) + + +def get_asset_by_id(project_name, asset_id, fields=None): + """Receive asset data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_id (Union[str, ObjectId]): Asset's id. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + dict: Asset entity data. + None: Asset was not found by id. + """ + + asset_id = convert_id(asset_id) + if not asset_id: + return None + + query_filter = {"type": "asset", "_id": asset_id} + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_asset_by_name(project_name, asset_name, fields=None): + """Receive asset data by it's name. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_name (str): Asset's name. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + dict: Asset entity data. + None: Asset was not found by name. + """ + + if not asset_name: + return None + + query_filter = {"type": "asset", "name": asset_name} + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +# NOTE this could be just public function? +# - any better variable name instead of 'standard'? +# - same approach can be used for rest of types +def _get_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + standard=True, + archived=False, + fields=None +): + """Assets for specified project by passed filters. + + Passed filters (ids and names) are always combined so all conditions must + match. + + To receive all assets from project just keep filters empty. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + standard (bool): Query standart assets (type 'asset'). + archived (bool): Query archived assets (type 'archived_asset'). + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Query cursor as iterable which returns asset documents matching + passed filters. + """ + + asset_types = [] + if standard: + asset_types.append("asset") + if archived: + asset_types.append("archived_asset") + + if not asset_types: + return [] + + if len(asset_types) == 1: + query_filter = {"type": asset_types[0]} + else: + query_filter = {"type": {"$in": asset_types}} + + if asset_ids is not None: + asset_ids = convert_ids(asset_ids) + if not asset_ids: + return [] + query_filter["_id"] = {"$in": asset_ids} + + if asset_names is not None: + if not asset_names: + return [] + query_filter["name"] = {"$in": list(asset_names)} + + if parent_ids is not None: + parent_ids = convert_ids(parent_ids) + if not parent_ids: + return [] + query_filter["data.visualParent"] = {"$in": parent_ids} + + conn = get_project_connection(project_name) + + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + archived=False, + fields=None +): + """Assets for specified project by passed filters. + + Passed filters (ids and names) are always combined so all conditions must + match. + + To receive all assets from project just keep filters empty. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + archived (bool): Add also archived assets. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Query cursor as iterable which returns asset documents matching + passed filters. + """ + + return _get_assets( + project_name, + asset_ids, + asset_names, + parent_ids, + True, + archived, + fields + ) + + +def get_archived_assets( + project_name, + asset_ids=None, + asset_names=None, + parent_ids=None, + fields=None +): + """Archived assets for specified project by passed filters. + + Passed filters (ids and names) are always combined so all conditions must + match. + + To receive all archived assets from project just keep filters empty. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids that should + be found. + asset_names (Iterable[str]): Name assets that should be found. + parent_ids (Iterable[Union[str, ObjectId]]): Parent asset ids. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Query cursor as iterable which returns asset documents matching + passed filters. + """ + + return _get_assets( + project_name, asset_ids, asset_names, parent_ids, False, True, fields + ) + + +def get_asset_ids_with_subsets(project_name, asset_ids=None): + """Find out which assets have existing subsets. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_ids (Iterable[Union[str, ObjectId]]): Look only for entered + asset ids. + + Returns: + Iterable[ObjectId]: Asset ids that have existing subsets. + """ + + subset_query = { + "type": "subset" + } + if asset_ids is not None: + asset_ids = convert_ids(asset_ids) + if not asset_ids: + return [] + subset_query["parent"] = {"$in": asset_ids} + + conn = get_project_connection(project_name) + result = conn.aggregate([ + { + "$match": subset_query + }, + { + "$group": { + "_id": "$parent", + "count": {"$sum": 1} + } + } + ]) + asset_ids_with_subsets = [] + for item in result: + asset_id = item["_id"] + count = item["count"] + if count > 0: + asset_ids_with_subsets.append(asset_id) + return asset_ids_with_subsets + + +def get_subset_by_id(project_name, subset_id, fields=None): + """Single subset entity data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_id (Union[str, ObjectId]): Id of subset which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If subset with specified filters was not found. + Dict: Subset document which can be reduced to specified 'fields'. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + query_filters = {"type": "subset", "_id": subset_id} + conn = get_project_connection(project_name) + return conn.find_one(query_filters, _prepare_fields(fields)) + + +def get_subset_by_name(project_name, subset_name, asset_id, fields=None): + """Single subset entity data by it's name and it's version id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_name (str): Name of subset. + asset_id (Union[str, ObjectId]): Id of parent asset. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Union[None, Dict[str, Any]]: None if subset with specified filters was + not found or dict subset document which can be reduced to + specified 'fields'. + + """ + if not subset_name: + return None + + asset_id = convert_id(asset_id) + if not asset_id: + return None + + query_filters = { + "type": "subset", + "name": subset_name, + "parent": asset_id + } + conn = get_project_connection(project_name) + return conn.find_one(query_filters, _prepare_fields(fields)) + + +def get_subsets( + project_name, + subset_ids=None, + subset_names=None, + asset_ids=None, + names_by_asset_ids=None, + archived=False, + fields=None +): + """Subset entities data from one project filtered by entered filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should be + queried. Filter ignored if 'None' is passed. + subset_names (Iterable[str]): Subset names that should be queried. + Filter ignored if 'None' is passed. + asset_ids (Iterable[Union[str, ObjectId]]): Asset ids under which + should look for the subsets. Filter ignored if 'None' is passed. + names_by_asset_ids (dict[ObjectId, List[str]]): Complex filtering + using asset ids and list of subset names under the asset. + archived (bool): Look for archived subsets too. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching subsets. + """ + + subset_types = ["subset"] + if archived: + subset_types.append("archived_subset") + + if len(subset_types) == 1: + query_filter = {"type": subset_types[0]} + else: + query_filter = {"type": {"$in": subset_types}} + + if asset_ids is not None: + asset_ids = convert_ids(asset_ids) + if not asset_ids: + return [] + query_filter["parent"] = {"$in": asset_ids} + + if subset_ids is not None: + subset_ids = convert_ids(subset_ids) + if not subset_ids: + return [] + query_filter["_id"] = {"$in": subset_ids} + + if subset_names is not None: + if not subset_names: + return [] + query_filter["name"] = {"$in": list(subset_names)} + + if names_by_asset_ids is not None: + or_query = [] + for asset_id, names in names_by_asset_ids.items(): + if asset_id and names: + or_query.append({ + "parent": convert_id(asset_id), + "name": {"$in": list(names)} + }) + if not or_query: + return [] + query_filter["$or"] = or_query + + conn = get_project_connection(project_name) + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_subset_families(project_name, subset_ids=None): + """Set of main families of subsets. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids that should + be queried. All subsets from project are used if 'None' is passed. + + Returns: + set[str]: Main families of matching subsets. + """ + + subset_filter = { + "type": "subset" + } + if subset_ids is not None: + if not subset_ids: + return set() + subset_filter["_id"] = {"$in": list(subset_ids)} + + conn = get_project_connection(project_name) + result = list(conn.aggregate([ + {"$match": subset_filter}, + {"$project": { + "family": {"$arrayElemAt": ["$data.families", 0]} + }}, + {"$group": { + "_id": "family_group", + "families": {"$addToSet": "$family"} + }} + ])) + if result: + return set(result[0]["families"]) + return set() + + +def get_version_by_id(project_name, version_id, fields=None): + """Single version entity data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + version_id = convert_id(version_id) + if not version_id: + return None + + query_filter = { + "type": {"$in": ["version", "hero_version"]}, + "_id": version_id + } + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_version_by_name(project_name, version, subset_id, fields=None): + """Single version entity data by it's name and subset id. + + Args: + project_name (str): Name of project where to look for queried entities. + version (int): name of version entity (it's version). + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + conn = get_project_connection(project_name) + query_filter = { + "type": "version", + "parent": subset_id, + "name": version + } + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def version_is_latest(project_name, version_id): + """Is version the latest from it's subset. + + Note: + Hero versions are considered as latest. + + Todo: + Maybe raise exception when version was not found? + + Args: + project_name (str):Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Version id which is checked. + + Returns: + bool: True if is latest version from subset else False. + """ + + version_id = convert_id(version_id) + if not version_id: + return False + version_doc = get_version_by_id( + project_name, version_id, fields=["_id", "type", "parent"] + ) + # What to do when version is not found? + if not version_doc: + return False + + if version_doc["type"] == "hero_version": + return True + + last_version = get_last_version_by_subset_id( + project_name, version_doc["parent"], fields=["_id"] + ) + return last_version["_id"] == version_id + + +def _get_versions( + project_name, + subset_ids=None, + version_ids=None, + versions=None, + standard=True, + hero=False, + fields=None +): + version_types = [] + if standard: + version_types.append("version") + + if hero: + version_types.append("hero_version") + + if not version_types: + return [] + elif len(version_types) == 1: + query_filter = {"type": version_types[0]} + else: + query_filter = {"type": {"$in": version_types}} + + if subset_ids is not None: + subset_ids = convert_ids(subset_ids) + if not subset_ids: + return [] + query_filter["parent"] = {"$in": subset_ids} + + if version_ids is not None: + version_ids = convert_ids(version_ids) + if not version_ids: + return [] + query_filter["_id"] = {"$in": version_ids} + + if versions is not None: + versions = list(versions) + if not versions: + return [] + + if len(versions) == 1: + query_filter["name"] = versions[0] + else: + query_filter["name"] = {"$in": versions} + + conn = get_project_connection(project_name) + + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_versions( + project_name, + version_ids=None, + subset_ids=None, + versions=None, + hero=False, + fields=None +): + """Version entities data from one project filtered by entered filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + version_ids (Iterable[Union[str, ObjectId]]): Version ids that will + be queried. Filter ignored if 'None' is passed. + subset_ids (Iterable[str]): Subset ids that will be queried. + Filter ignored if 'None' is passed. + versions (Iterable[int]): Version names (as integers). + Filter ignored if 'None' is passed. + hero (bool): Look also for hero versions. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching versions. + """ + + return _get_versions( + project_name, + subset_ids, + version_ids, + versions, + standard=True, + hero=hero, + fields=fields + ) + + +def get_hero_version_by_subset_id(project_name, subset_id, fields=None): + """Hero version by subset id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_id (Union[str, ObjectId]): Subset id under which + is hero version. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If hero version for passed subset id does not exists. + Dict: Hero version entity data. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + versions = list(_get_versions( + project_name, + subset_ids=[subset_id], + standard=False, + hero=True, + fields=fields + )) + if versions: + return versions[0] + return None + + +def get_hero_version_by_id(project_name, version_id, fields=None): + """Hero version by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Hero version id. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If hero version with passed id was not found. + Dict: Hero version entity data. + """ + + version_id = convert_id(version_id) + if not version_id: + return None + + versions = list(_get_versions( + project_name, + version_ids=[version_id], + standard=False, + hero=True, + fields=fields + )) + if versions: + return versions[0] + return None + + +def get_hero_versions( + project_name, + subset_ids=None, + version_ids=None, + fields=None +): + """Hero version entities data from one project filtered by entered filters. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): Subset ids for which + should look for hero versions. Filter ignored if 'None' is passed. + version_ids (Iterable[Union[str, ObjectId]]): Hero version ids. Filter + ignored if 'None' is passed. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor|list: Iterable yielding hero versions matching passed filters. + """ + + return _get_versions( + project_name, + subset_ids, + version_ids, + standard=False, + hero=True, + fields=fields + ) + + +def get_output_link_versions(project_name, version_id, fields=None): + """Versions where passed version was used as input. + + Question: + Not 100% sure about the usage of the function so the name and docstring + maybe does not match what it does? + + Args: + project_name (str): Name of project where to look for queried entities. + version_id (Union[str, ObjectId]): Version id which can be used + as input link for other versions. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Iterable: Iterable cursor yielding versions that are used as input + links for passed version. + """ + + version_id = convert_id(version_id) + if not version_id: + return [] + + conn = get_project_connection(project_name) + # Does make sense to look for hero versions? + query_filter = { + "type": "version", + "data.inputLinks.id": version_id + } + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_last_versions(project_name, subset_ids, fields=None): + """Latest versions for entered subset_ids. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_ids (Iterable[Union[str, ObjectId]]): List of subset ids. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + dict[ObjectId, int]: Key is subset id and value is last version name. + """ + + subset_ids = convert_ids(subset_ids) + if not subset_ids: + return {} + + if fields is not None: + fields = list(fields) + if not fields: + return {} + + # Avoid double query if only name and _id are requested + name_needed = False + limit_query = False + if fields: + fields_s = set(fields) + if "name" in fields_s: + name_needed = True + fields_s.remove("name") + + for field in ("_id", "parent"): + if field in fields_s: + fields_s.remove(field) + limit_query = len(fields_s) == 0 + + group_item = { + "_id": "$parent", + "_version_id": {"$last": "$_id"} + } + # Add name if name is needed (only for limit query) + if name_needed: + group_item["name"] = {"$last": "$name"} + + aggregation_pipeline = [ + # Find all versions of those subsets + {"$match": { + "type": "version", + "parent": {"$in": subset_ids} + }}, + # Sorting versions all together + {"$sort": {"name": 1}}, + # Group them by "parent", but only take the last + {"$group": group_item} + ] + + conn = get_project_connection(project_name) + aggregate_result = conn.aggregate(aggregation_pipeline) + if limit_query: + output = {} + for item in aggregate_result: + subset_id = item["_id"] + item_data = {"_id": item["_version_id"], "parent": subset_id} + if name_needed: + item_data["name"] = item["name"] + output[subset_id] = item_data + return output + + version_ids = [ + doc["_version_id"] + for doc in aggregate_result + ] + + fields = _prepare_fields(fields, ["parent"]) + + version_docs = get_versions( + project_name, version_ids=version_ids, fields=fields + ) + + return { + version_doc["parent"]: version_doc + for version_doc in version_docs + } + + +def get_last_version_by_subset_id(project_name, subset_id, fields=None): + """Last version for passed subset id. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_id (Union[str, ObjectId]): Id of version which should be found. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + subset_id = convert_id(subset_id) + if not subset_id: + return None + + last_versions = get_last_versions( + project_name, subset_ids=[subset_id], fields=fields + ) + return last_versions.get(subset_id) + + +def get_last_version_by_subset_name( + project_name, subset_name, asset_id=None, asset_name=None, fields=None +): + """Last version for passed subset name under asset id/name. + + It is required to pass 'asset_id' or 'asset_name'. Asset id is recommended + if is available. + + Args: + project_name (str): Name of project where to look for queried entities. + subset_name (str): Name of subset. + asset_id (Union[str, ObjectId]): Asset id which is parent of passed + subset name. + asset_name (str): Asset name which is parent of passed subset name. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If version with specified filters was not found. + Dict: Version document which can be reduced to specified 'fields'. + """ + + if not asset_id and not asset_name: + return None + + if not asset_id: + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) + if not asset_doc: + return None + asset_id = asset_doc["_id"] + subset_doc = get_subset_by_name( + project_name, subset_name, asset_id, fields=["_id"] + ) + if not subset_doc: + return None + return get_last_version_by_subset_id( + project_name, subset_doc["_id"], fields=fields + ) + + +def get_representation_by_id(project_name, representation_id, fields=None): + """Representation entity data by it's id. + + Args: + project_name (str): Name of project where to look for queried entities. + representation_id (Union[str, ObjectId]): Representation id. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If representation with specified filters was not found. + Dict: Representation entity data which can be reduced + to specified 'fields'. + """ + + if not representation_id: + return None + + repre_types = ["representation", "archived_representation"] + query_filter = { + "type": {"$in": repre_types} + } + if representation_id is not None: + query_filter["_id"] = convert_id(representation_id) + + conn = get_project_connection(project_name) + + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_representation_by_name( + project_name, representation_name, version_id, fields=None +): + """Representation entity data by it's name and it's version id. + + Args: + project_name (str): Name of project where to look for queried entities. + representation_name (str): Representation name. + version_id (Union[str, ObjectId]): Id of parent version entity. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If representation with specified filters was not found. + Dict: Representation entity data which can be reduced + to specified 'fields'. + """ + + version_id = convert_id(version_id) + if not version_id or not representation_name: + return None + repre_types = ["representation", "archived_representations"] + query_filter = { + "type": {"$in": repre_types}, + "name": representation_name, + "parent": version_id + } + + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def _flatten_dict(data): + flatten_queue = collections.deque() + flatten_queue.append(data) + output = {} + while flatten_queue: + item = flatten_queue.popleft() + for key, value in item.items(): + if not isinstance(value, dict): + output[key] = value + continue + + tmp = {} + for subkey, subvalue in value.items(): + new_key = "{}.{}".format(key, subkey) + tmp[new_key] = subvalue + flatten_queue.append(tmp) + return output + + +def _regex_filters(filters): + output = [] + for key, value in filters.items(): + regexes = [] + a_values = [] + if isinstance(value, PatternType): + regexes.append(value) + elif isinstance(value, (list, tuple, set)): + for item in value: + if isinstance(item, PatternType): + regexes.append(item) + else: + a_values.append(item) + else: + a_values.append(value) + + key_filters = [] + if len(a_values) == 1: + key_filters.append({key: a_values[0]}) + elif a_values: + key_filters.append({key: {"$in": a_values}}) + + for regex in regexes: + key_filters.append({key: {"$regex": regex}}) + + if len(key_filters) == 1: + output.append(key_filters[0]) + else: + output.append({"$or": key_filters}) + + return output + + +def _get_representations( + project_name, + representation_ids, + representation_names, + version_ids, + context_filters, + names_by_version_ids, + standard, + archived, + fields +): + default_output = [] + repre_types = [] + if standard: + repre_types.append("representation") + if archived: + repre_types.append("archived_representation") + + if not repre_types: + return default_output + + if len(repre_types) == 1: + query_filter = {"type": repre_types[0]} + else: + query_filter = {"type": {"$in": repre_types}} + + if representation_ids is not None: + representation_ids = convert_ids(representation_ids) + if not representation_ids: + return default_output + query_filter["_id"] = {"$in": representation_ids} + + if representation_names is not None: + if not representation_names: + return default_output + query_filter["name"] = {"$in": list(representation_names)} + + if version_ids is not None: + version_ids = convert_ids(version_ids) + if not version_ids: + return default_output + query_filter["parent"] = {"$in": version_ids} + + or_queries = [] + if names_by_version_ids is not None: + or_query = [] + for version_id, names in names_by_version_ids.items(): + if version_id and names: + or_query.append({ + "parent": convert_id(version_id), + "name": {"$in": list(names)} + }) + if not or_query: + return default_output + or_queries.append(or_query) + + if context_filters is not None: + if not context_filters: + return [] + _flatten_filters = _flatten_dict(context_filters) + flatten_filters = {} + for key, value in _flatten_filters.items(): + if not key.startswith("context"): + key = "context.{}".format(key) + flatten_filters[key] = value + + for item in _regex_filters(flatten_filters): + for key, value in item.items(): + if key != "$or": + query_filter[key] = value + + elif value: + or_queries.append(value) + + if len(or_queries) == 1: + query_filter["$or"] = or_queries[0] + elif or_queries: + and_query = [] + for or_query in or_queries: + if isinstance(or_query, list): + or_query = {"$or": or_query} + and_query.append(or_query) + query_filter["$and"] = and_query + + conn = get_project_connection(project_name) + + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + context_filters=None, + names_by_version_ids=None, + archived=False, + standard=True, + fields=None +): + """Representaion entities data from one project filtered by filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter + ignored if 'None' is passed. + context_filters (Dict[str, List[str, PatternType]]): Filter by + representation context fields. + names_by_version_ids (dict[ObjectId, list[str]]): Complex filtering + using version ids and list of names under the version. + archived (bool): Output will also contain archived representations. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching representations. + """ + + return _get_representations( + project_name=project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + context_filters=context_filters, + names_by_version_ids=names_by_version_ids, + standard=True, + archived=archived, + fields=fields + ) + + +def get_archived_representations( + project_name, + representation_ids=None, + representation_names=None, + version_ids=None, + context_filters=None, + names_by_version_ids=None, + fields=None +): + """Archived representaion entities data from project with applied filters. + + Filters are additive (all conditions must pass to return subset). + + Args: + project_name (str): Name of project where to look for queried entities. + representation_ids (Iterable[Union[str, ObjectId]]): Representation ids + used as filter. Filter ignored if 'None' is passed. + representation_names (Iterable[str]): Representations names used + as filter. Filter ignored if 'None' is passed. + version_ids (Iterable[str]): Subset ids used as parent filter. Filter + ignored if 'None' is passed. + context_filters (Dict[str, List[str, PatternType]]): Filter by + representation context fields. + names_by_version_ids (dict[ObjectId, List[str]]): Complex filtering + using version ids and list of names under the version. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + Cursor: Iterable cursor yielding all matching representations. + """ + + return _get_representations( + project_name=project_name, + representation_ids=representation_ids, + representation_names=representation_names, + version_ids=version_ids, + context_filters=context_filters, + names_by_version_ids=names_by_version_ids, + standard=False, + archived=True, + fields=fields + ) + + +def get_representations_parents(project_name, representations): + """Prepare parents of representation entities. + + Each item of returned dictionary contains version, subset, asset + and project in that order. + + Args: + project_name (str): Name of project where to look for queried entities. + representations (List[dict]): Representation entities with at least + '_id' and 'parent' keys. + + Returns: + dict[ObjectId, tuple]: Parents by representation id. + """ + + repre_docs_by_version_id = collections.defaultdict(list) + version_docs_by_version_id = {} + version_docs_by_subset_id = collections.defaultdict(list) + subset_docs_by_subset_id = {} + subset_docs_by_asset_id = collections.defaultdict(list) + output = {} + for repre_doc in representations: + repre_id = repre_doc["_id"] + version_id = repre_doc["parent"] + output[repre_id] = (None, None, None, None) + repre_docs_by_version_id[version_id].append(repre_doc) + + version_docs = get_versions( + project_name, + version_ids=repre_docs_by_version_id.keys(), + hero=True + ) + for version_doc in version_docs: + version_id = version_doc["_id"] + subset_id = version_doc["parent"] + version_docs_by_version_id[version_id] = version_doc + version_docs_by_subset_id[subset_id].append(version_doc) + + subset_docs = get_subsets( + project_name, subset_ids=version_docs_by_subset_id.keys() + ) + for subset_doc in subset_docs: + subset_id = subset_doc["_id"] + asset_id = subset_doc["parent"] + subset_docs_by_subset_id[subset_id] = subset_doc + subset_docs_by_asset_id[asset_id].append(subset_doc) + + asset_docs = get_assets( + project_name, asset_ids=subset_docs_by_asset_id.keys() + ) + asset_docs_by_id = { + asset_doc["_id"]: asset_doc + for asset_doc in asset_docs + } + + project_doc = get_project(project_name) + + for version_id, repre_docs in repre_docs_by_version_id.items(): + asset_doc = None + subset_doc = None + version_doc = version_docs_by_version_id.get(version_id) + if version_doc: + subset_id = version_doc["parent"] + subset_doc = subset_docs_by_subset_id.get(subset_id) + if subset_doc: + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_id.get(asset_id) + + for repre_doc in repre_docs: + repre_id = repre_doc["_id"] + output[repre_id] = ( + version_doc, subset_doc, asset_doc, project_doc + ) + return output + + +def get_representation_parents(project_name, representation): + """Prepare parents of representation entity. + + Each item of returned dictionary contains version, subset, asset + and project in that order. + + Args: + project_name (str): Name of project where to look for queried entities. + representation (dict): Representation entities with at least + '_id' and 'parent' keys. + + Returns: + dict[ObjectId, tuple]: Parents by representation id. + """ + + if not representation: + return None + + repre_id = representation["_id"] + parents_by_repre_id = get_representations_parents( + project_name, [representation] + ) + return parents_by_repre_id[repre_id] + + +def get_thumbnail_id_from_source(project_name, src_type, src_id): + """Receive thumbnail id from source entity. + + Args: + project_name (str): Name of project where to look for queried entities. + src_type (str): Type of source entity ('asset', 'version'). + src_id (Union[str, ObjectId]): Id of source entity. + + Returns: + ObjectId: Thumbnail id assigned to entity. + None: If Source entity does not have any thumbnail id assigned. + """ + + if not src_type or not src_id: + return None + + query_filter = {"_id": convert_id(src_id)} + + conn = get_project_connection(project_name) + src_doc = conn.find_one(query_filter, {"data.thumbnail_id"}) + if src_doc: + return src_doc.get("data", {}).get("thumbnail_id") + return None + + +def get_thumbnails(project_name, thumbnail_ids, fields=None): + """Receive thumbnails entity data. + + Thumbnail entity can be used to receive binary content of thumbnail based + on it's content and ThumbnailResolvers. + + Args: + project_name (str): Name of project where to look for queried entities. + thumbnail_ids (Iterable[Union[str, ObjectId]]): Ids of thumbnail + entities. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + cursor: Cursor of queried documents. + """ + + if thumbnail_ids: + thumbnail_ids = convert_ids(thumbnail_ids) + + if not thumbnail_ids: + return [] + query_filter = { + "type": "thumbnail", + "_id": {"$in": thumbnail_ids} + } + conn = get_project_connection(project_name) + return conn.find(query_filter, _prepare_fields(fields)) + + +def get_thumbnail(project_name, thumbnail_id, fields=None): + """Receive thumbnail entity data. + + Args: + project_name (str): Name of project where to look for queried entities. + thumbnail_id (Union[str, ObjectId]): Id of thumbnail entity. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + None: If thumbnail with specified id was not found. + Dict: Thumbnail entity data which can be reduced to specified 'fields'. + """ + + if not thumbnail_id: + return None + query_filter = {"type": "thumbnail", "_id": convert_id(thumbnail_id)} + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +def get_workfile_info( + project_name, asset_id, task_name, filename, fields=None +): + """Document with workfile information. + + Warning: + Query is based on filename and context which does not meant it will + find always right and expected result. Information have limited usage + and is not recommended to use it as source information about workfile. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_id (Union[str, ObjectId]): Id of asset entity. + task_name (str): Task name on asset. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + """ + + if not asset_id or not task_name or not filename: + return None + + query_filter = { + "type": "workfile", + "parent": convert_id(asset_id), + "task_name": task_name, + "filename": filename + } + conn = get_project_connection(project_name) + return conn.find_one(query_filter, _prepare_fields(fields)) + + +""" +## Custom data storage: +- Settings - OP settings overrides and local settings +- Logging - logs from Logger +- Webpublisher - jobs +- Ftrack - events +- Maya - Shaders + - openpype/hosts/maya/api/shader_definition_editor.py + - openpype/hosts/maya/plugins/publish/validate_model_name.py + +## Global publish plugins +- openpype/plugins/publish/extract_hierarchy_avalon.py + Create: + - asset + Update: + - asset + +## Lib +- openpype/lib/avalon_context.py + Update: + - workfile data +- openpype/lib/project_backpack.py + Update: + - project +""" diff --git a/openpype/client/entity_links.py b/openpype/client/entity_links.py new file mode 100644 index 0000000000..b74b4ce7f6 --- /dev/null +++ b/openpype/client/entity_links.py @@ -0,0 +1,243 @@ +from .mongo import get_project_connection +from .entities import ( + get_assets, + get_asset_by_id, + get_version_by_id, + get_representation_by_id, + convert_id, +) + + +def get_linked_asset_ids(project_name, asset_doc=None, asset_id=None): + """Extract linked asset ids from asset document. + + One of asset document or asset id must be passed. + + Note: + Asset links now works only from asset to assets. + + Args: + asset_doc (dict): Asset document from DB. + + Returns: + List[Union[ObjectId, str]]: Asset ids of input links. + """ + + output = [] + if not asset_doc and not asset_id: + return output + + if not asset_doc: + asset_doc = get_asset_by_id( + project_name, asset_id, fields=["data.inputLinks"] + ) + + input_links = asset_doc["data"].get("inputLinks") + if not input_links: + return output + + for item in input_links: + # Backwards compatibility for "_id" key which was replaced with + # "id" + if "_id" in item: + link_id = item["_id"] + else: + link_id = item["id"] + output.append(link_id) + return output + + +def get_linked_assets( + project_name, asset_doc=None, asset_id=None, fields=None +): + """Return linked assets based on passed asset document. + + One of asset document or asset id must be passed. + + Args: + project_name (str): Name of project where to look for queried entities. + asset_doc (Dict[str, Any]): Asset document from database. + asset_id (Union[ObjectId, str]): Asset id. Can be used instead of + asset document. + fields (Iterable[str]): Fields that should be returned. All fields are + returned if 'None' is passed. + + Returns: + List[Dict[str, Any]]: Asset documents of input links for passed + asset doc. + """ + + if not asset_doc: + if not asset_id: + return [] + asset_doc = get_asset_by_id( + project_name, + asset_id, + fields=["data.inputLinks"] + ) + if not asset_doc: + return [] + + link_ids = get_linked_asset_ids(project_name, asset_doc=asset_doc) + if not link_ids: + return [] + + return list(get_assets(project_name, asset_ids=link_ids, fields=fields)) + + +def get_linked_representation_id( + project_name, repre_doc=None, repre_id=None, link_type=None, max_depth=None +): + """Returns list of linked ids of particular type (if provided). + + One of representation document or representation id must be passed. + Note: + Representation links now works only from representation through version + back to representations. + + Args: + project_name (str): Name of project where look for links. + repre_doc (Dict[str, Any]): Representation document. + repre_id (Union[ObjectId, str]): Representation id. + link_type (str): Type of link (e.g. 'reference', ...). + max_depth (int): Limit recursion level. Default: 0 + + Returns: + List[ObjectId] Linked representation ids. + """ + + if repre_doc: + repre_id = repre_doc["_id"] + + if repre_id: + repre_id = convert_id(repre_id) + + if not repre_id and not repre_doc: + return [] + + version_id = None + if repre_doc: + version_id = repre_doc.get("parent") + + if not version_id: + repre_doc = get_representation_by_id( + project_name, repre_id, fields=["parent"] + ) + version_id = repre_doc["parent"] + + if not version_id: + return [] + + version_doc = get_version_by_id( + project_name, version_id, fields=["type", "version_id"] + ) + if version_doc["type"] == "hero_version": + version_id = version_doc["version_id"] + + if max_depth is None: + max_depth = 0 + + match = { + "_id": version_id, + # Links are not stored to hero versions at this moment so filter + # is limited to just versions + "type": "version" + } + + graph_lookup = { + "from": project_name, + "startWith": "$data.inputLinks.id", + "connectFromField": "data.inputLinks.id", + "connectToField": "_id", + "as": "outputs_recursive", + "depthField": "depth" + } + if max_depth != 0: + # We offset by -1 since 0 basically means no recursion + # but the recursion only happens after the initial lookup + # for outputs. + graph_lookup["maxDepth"] = max_depth - 1 + + query_pipeline = [ + # Match + {"$match": match}, + # Recursive graph lookup for inputs + {"$graphLookup": graph_lookup} + ] + conn = get_project_connection(project_name) + result = conn.aggregate(query_pipeline) + referenced_version_ids = _process_referenced_pipeline_result( + result, link_type + ) + if not referenced_version_ids: + return [] + + ref_ids = conn.distinct( + "_id", + filter={ + "parent": {"$in": list(referenced_version_ids)}, + "type": "representation" + } + ) + + return list(ref_ids) + + +def _process_referenced_pipeline_result(result, link_type): + """Filters result from pipeline for particular link_type. + + Pipeline cannot use link_type directly in a query. + + Returns: + (list) + """ + + referenced_version_ids = set() + correctly_linked_ids = set() + for item in result: + input_links = item.get("data", {}).get("inputLinks") + if not input_links: + continue + + _filter_input_links( + input_links, + link_type, + correctly_linked_ids + ) + + # outputs_recursive in random order, sort by depth + outputs_recursive = item.get("outputs_recursive") + if not outputs_recursive: + continue + + for output in sorted(outputs_recursive, key=lambda o: o["depth"]): + output_links = output.get("data", {}).get("inputLinks") + if not output_links and output["type"] != "hero_version": + continue + + # Leaf + if output["_id"] not in correctly_linked_ids: + continue + + _filter_input_links( + output_links, + link_type, + correctly_linked_ids + ) + + referenced_version_ids.add(output["_id"]) + + return referenced_version_ids + + +def _filter_input_links(input_links, link_type, correctly_linked_ids): + if not input_links: # to handle hero versions + return + + for input_link in input_links: + if link_type and input_link["type"] != link_type: + continue + + link_id = input_link.get("id") or input_link.get("_id") + if link_id is not None: + correctly_linked_ids.add(link_id) diff --git a/openpype/client/mongo.py b/openpype/client/mongo.py new file mode 100644 index 0000000000..72acbc5476 --- /dev/null +++ b/openpype/client/mongo.py @@ -0,0 +1,235 @@ +import os +import sys +import time +import logging +import pymongo +import certifi + +if sys.version_info[0] == 2: + from urlparse import urlparse, parse_qs +else: + from urllib.parse import urlparse, parse_qs + + +class MongoEnvNotSet(Exception): + pass + + +def _decompose_url(url): + """Decompose mongo url to basic components. + + Used for creation of MongoHandler which expect mongo url components as + separated kwargs. Components are at the end not used as we're setting + connection directly this is just a dumb components for MongoHandler + validation pass. + """ + + # Use first url from passed url + # - this is because it is possible to pass multiple urls for multiple + # replica sets which would crash on urlparse otherwise + # - please don't use comma in username of password + url = url.split(",")[0] + components = { + "scheme": None, + "host": None, + "port": None, + "username": None, + "password": None, + "auth_db": None + } + + result = urlparse(url) + if result.scheme is None: + _url = "mongodb://{}".format(url) + result = urlparse(_url) + + components["scheme"] = result.scheme + components["host"] = result.hostname + try: + components["port"] = result.port + except ValueError: + raise RuntimeError("invalid port specified") + components["username"] = result.username + components["password"] = result.password + + try: + components["auth_db"] = parse_qs(result.query)['authSource'][0] + except KeyError: + # no auth db provided, mongo will use the one we are connecting to + pass + + return components + + +def get_default_components(): + mongo_url = os.environ.get("OPENPYPE_MONGO") + if mongo_url is None: + raise MongoEnvNotSet( + "URL for Mongo logging connection is not set." + ) + return _decompose_url(mongo_url) + + +def should_add_certificate_path_to_mongo_url(mongo_url): + """Check if should add ca certificate to mongo url. + + Since 30.9.2021 cloud mongo requires newer certificates that are not + available on most of workstation. This adds path to certifi certificate + which is valid for it. To add the certificate path url must have scheme + 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. + """ + + parsed = urlparse(mongo_url) + query = parse_qs(parsed.query) + lowered_query_keys = set(key.lower() for key in query.keys()) + add_certificate = False + # Check if url 'ssl' or 'tls' are set to 'true' + for key in ("ssl", "tls"): + if key in query and "true" in query["ssl"]: + add_certificate = True + break + + # Check if url contains 'mongodb+srv' + if not add_certificate and parsed.scheme == "mongodb+srv": + add_certificate = True + + # Check if url does already contain certificate path + if add_certificate and "tlscafile" in lowered_query_keys: + add_certificate = False + + return add_certificate + + +def validate_mongo_connection(mongo_uri): + """Check if provided mongodb URL is valid. + + Args: + mongo_uri (str): URL to validate. + + Raises: + ValueError: When port in mongo uri is not valid. + pymongo.errors.InvalidURI: If passed mongo is invalid. + pymongo.errors.ServerSelectionTimeoutError: If connection timeout + passed so probably couldn't connect to mongo server. + + """ + + client = OpenPypeMongoConnection.create_connection( + mongo_uri, retry_attempts=1 + ) + client.close() + + +class OpenPypeMongoConnection: + """Singleton MongoDB connection. + + Keeps MongoDB connections by url. + """ + + mongo_clients = {} + log = logging.getLogger("OpenPypeMongoConnection") + + @staticmethod + def get_default_mongo_url(): + return os.environ["OPENPYPE_MONGO"] + + @classmethod + def get_mongo_client(cls, mongo_url=None): + if mongo_url is None: + mongo_url = cls.get_default_mongo_url() + + connection = cls.mongo_clients.get(mongo_url) + if connection: + # Naive validation of existing connection + try: + connection.server_info() + with connection.start_session(): + pass + except Exception: + connection = None + + if not connection: + cls.log.debug("Creating mongo connection to {}".format(mongo_url)) + connection = cls.create_connection(mongo_url) + cls.mongo_clients[mongo_url] = connection + + return connection + + @classmethod + def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): + parsed = urlparse(mongo_url) + # Force validation of scheme + if parsed.scheme not in ["mongodb", "mongodb+srv"]: + raise pymongo.errors.InvalidURI(( + "Invalid URI scheme:" + " URI must begin with 'mongodb://' or 'mongodb+srv://'" + )) + + if timeout is None: + timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) + + kwargs = { + "serverSelectionTimeoutMS": timeout + } + if should_add_certificate_path_to_mongo_url(mongo_url): + kwargs["ssl_ca_certs"] = certifi.where() + + mongo_client = pymongo.MongoClient(mongo_url, **kwargs) + + if retry_attempts is None: + retry_attempts = 3 + + elif not retry_attempts: + retry_attempts = 1 + + last_exc = None + valid = False + t1 = time.time() + for attempt in range(1, retry_attempts + 1): + try: + mongo_client.server_info() + with mongo_client.start_session(): + pass + valid = True + break + + except Exception as exc: + last_exc = exc + if attempt < retry_attempts: + cls.log.warning( + "Attempt {} failed. Retrying... ".format(attempt) + ) + time.sleep(1) + + if not valid: + raise last_exc + + cls.log.info("Connected to {}, delay {:.3f}s".format( + mongo_url, time.time() - t1 + )) + return mongo_client + + +def get_project_database(): + db_name = os.environ.get("AVALON_DB") or "avalon" + return OpenPypeMongoConnection.get_mongo_client()[db_name] + + +def get_project_connection(project_name): + """Direct access to mongo collection. + + We're trying to avoid using direct access to mongo. This should be used + only for Create, Update and Remove operations until there are implemented + api calls for that. + + Args: + project_name(str): Project name for which collection should be + returned. + + Returns: + pymongo.Collection: Collection realated to passed project. + """ + + if not project_name: + raise ValueError("Invalid project name {}".format(str(project_name))) + return get_project_database()[project_name] diff --git a/openpype/client/notes.md b/openpype/client/notes.md new file mode 100644 index 0000000000..a261b86eca --- /dev/null +++ b/openpype/client/notes.md @@ -0,0 +1,39 @@ +# Client functionality +## Reason +Preparation for OpenPype v4 server. Goal is to remove direct mongo calls in code to prepare a little bit for different source of data for code before. To start think about database calls less as mongo calls but more universally. To do so was implemented simple wrapper around database calls to not use pymongo specific code. + +Current goal is not to make universal database model which can be easily replaced with any different source of data but to make it close as possible. Current implementation of OpenPype is too tighly connected to pymongo and it's abilities so we're trying to get closer with long term changes that can be used even in current state. + +## Queries +Query functions don't use full potential of mongo queries like very specific queries based on subdictionaries or unknown structures. We try to avoid these calls as much as possible because they'll probably won't be available in future. If it's really necessary a new function can be added but only if it's reasonable for overall logic. All query functions were moved to `~/client/entities.py`. Each function has arguments with available filters and possible reduce of returned keys for each entity. + +## Changes +Changes are a little bit complicated. Mongo has many options how update can happen which had to be reduced also it would be at this stage complicated to validate values which are created or updated thus automation is at this point almost none. Changes can be made using operations available in `~/client/operations.py`. Each operation require project name and entity type, but may require operation specific data. + +### Create +Create operations expect already prepared document data, for that are prepared functions creating skeletal structures of documents (do not fill all required data), except `_id` all data should be right. Existence of entity is not validated so if the same creation operation is send n times it will create the entity n times which can cause issues. + +### Update +Update operation require entity id and keys that should be changed, update dictionary must have {"key": value}. If value should be set in nested dictionary the key must have also all subkeys joined with dot `.` (e.g. `{"data": {"fps": 25}}` -> `{"data.fps": 25}`). To simplify update dictionaries were prepared functions which does that for you, their name has template `prepare__update_data` - they work on comparison of previous document and new document. If there is missing function for requested entity type it is because we didn't need it yet and require implementaion. + +### Delete +Delete operation need entity id. Entity will be deleted from mongo. + + +## What (probably) won't be replaced +Some parts of code are still using direct mongo calls. In most of cases it is for very specific calls that are module specific or their usage will completely change in future. +- Mongo calls that are not project specific (out of `avalon` collection) will be removed or will have to use different mechanism how the data are stored. At this moment it is related to OpenPype settings and logs, ftrack server events, some other data. +- Sync server queries. They're complex and very specific for sync server module. Their replacement will require specific calls to OpenPype server in v4 thus their abstraction with wrapper is irrelevant and would complicate production in v3. +- Project managers (ftrack, kitsu, shotgrid, embedded Project Manager, etc.). Project managers are creating, updating or removing assets in v3, but in v4 will create folders with different structure. Wrapping creation of assets would not help to prepare for v4 because of new data structures. The same can be said about editorial Extract Hierarchy Avalon plugin which create project structure. +- Code parts that is marked as deprecated in v3 or will be deprecated in v4. + - integrate asset legacy publish plugin - already is legacy kept for safety + - integrate thumbnail - thumbnails will be stored in different way in v4 + - input links - link will be stored in different way and will have different mechanism of linking. In v3 are links limited to same entity type "asset <-> asset" or "representation <-> representation". + +## Known missing replacements +- change subset group in loader tool +- integrate subset group +- query input links in openpype lib +- create project in openpype lib +- save/create workfile doc in openpype lib +- integrate hero version diff --git a/openpype/client/operations.py b/openpype/client/operations.py new file mode 100644 index 0000000000..fd639c34a7 --- /dev/null +++ b/openpype/client/operations.py @@ -0,0 +1,794 @@ +import re +import uuid +import copy +import collections +from abc import ABCMeta, abstractmethod, abstractproperty + +import six +from bson.objectid import ObjectId +from pymongo import DeleteOne, InsertOne, UpdateOne + +from .mongo import get_project_connection +from .entities import get_project + +REMOVED_VALUE = object() + +PROJECT_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_" +PROJECT_NAME_REGEX = re.compile( + "^[{}]+$".format(PROJECT_NAME_ALLOWED_SYMBOLS) +) + +CURRENT_PROJECT_SCHEMA = "openpype:project-3.0" +CURRENT_PROJECT_CONFIG_SCHEMA = "openpype:config-2.0" +CURRENT_ASSET_DOC_SCHEMA = "openpype:asset-3.0" +CURRENT_SUBSET_SCHEMA = "openpype:subset-3.0" +CURRENT_VERSION_SCHEMA = "openpype:version-3.0" +CURRENT_HERO_VERSION_SCHEMA = "openpype:hero_version-1.0" +CURRENT_REPRESENTATION_SCHEMA = "openpype:representation-2.0" +CURRENT_WORKFILE_INFO_SCHEMA = "openpype:workfile-1.0" +CURRENT_THUMBNAIL_SCHEMA = "openpype:thumbnail-1.0" + + +def _create_or_convert_to_mongo_id(mongo_id): + if mongo_id is None: + return ObjectId() + return ObjectId(mongo_id) + + +def new_project_document( + project_name, project_code, config, data=None, entity_id=None +): + """Create skeleton data of project document. + + Args: + project_name (str): Name of project. Used as identifier of a project. + project_code (str): Shorter version of projet without spaces and + special characters (in most of cases). Should be also considered + as unique name across projects. + config (Dic[str, Any]): Project config consist of roots, templates, + applications and other project Anatomy related data. + data (Dict[str, Any]): Project data with information about it's + attributes (e.g. 'fps' etc.) or integration specific keys. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of project document. + """ + + if data is None: + data = {} + + data["code"] = project_code + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "name": project_name, + "type": CURRENT_PROJECT_SCHEMA, + "entity_data": data, + "config": config + } + + +def new_asset_document( + name, project_id, parent_id, parents, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + name (str): Is considered as unique identifier of asset in project. + project_id (Union[str, ObjectId]): Id of project doument. + parent_id (Union[str, ObjectId]): Id of parent asset. + parents (List[str]): List of parent assets names. + data (Dict[str, Any]): Asset document data. Empty dictionary is used + if not passed. Value of 'parent_id' is used to fill 'visualParent'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of asset document. + """ + + if data is None: + data = {} + if parent_id is not None: + parent_id = ObjectId(parent_id) + data["visualParent"] = parent_id + data["parents"] = parents + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "asset", + "name": name, + "parent": ObjectId(project_id), + "data": data, + "schema": CURRENT_ASSET_DOC_SCHEMA + } + + +def new_subset_document(name, family, asset_id, data=None, entity_id=None): + """Create skeleton data of subset document. + + Args: + name (str): Is considered as unique identifier of subset under asset. + family (str): Subset's family. + asset_id (Union[str, ObjectId]): Id of parent asset. + data (Dict[str, Any]): Subset document data. Empty dictionary is used + if not passed. Value of 'family' is used to fill 'family'. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of subset document. + """ + + if data is None: + data = {} + data["family"] = family + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_SUBSET_SCHEMA, + "type": "subset", + "name": name, + "data": data, + "parent": asset_id + } + + +def new_version_doc(version, subset_id, data=None, entity_id=None): + """Create skeleton data of version document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_VERSION_SCHEMA, + "type": "version", + "name": int(version), + "parent": subset_id, + "data": data + } + + +def new_hero_version_doc(version_id, subset_id, data=None, entity_id=None): + """Create skeleton data of hero version document. + + Args: + version_id (ObjectId): Is considered as unique identifier of version + under subset. + subset_id (Union[str, ObjectId]): Id of parent subset. + data (Dict[str, Any]): Version document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_HERO_VERSION_SCHEMA, + "type": "hero_version", + "version_id": version_id, + "parent": subset_id, + "data": data + } + + +def new_representation_doc( + name, version_id, context, data=None, entity_id=None +): + """Create skeleton data of asset document. + + Args: + version (int): Is considered as unique identifier of version + under subset. + version_id (Union[str, ObjectId]): Id of parent version. + context (Dict[str, Any]): Representation context used for fill template + of to query. + data (Dict[str, Any]): Representation document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of version document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "schema": CURRENT_REPRESENTATION_SCHEMA, + "type": "representation", + "parent": version_id, + "name": name, + "data": data, + + # Imprint shortcut to context for performance reasons. + "context": context + } + + +def new_thumbnail_doc(data=None, entity_id=None): + """Create skeleton data of thumbnail document. + + Args: + data (Dict[str, Any]): Thumbnail document data. + entity_id (Union[str, ObjectId]): Predefined id of document. New id is + created if not passed. + + Returns: + Dict[str, Any]: Skeleton of thumbnail document. + """ + + if data is None: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "thumbnail", + "schema": CURRENT_THUMBNAIL_SCHEMA, + "data": data + } + + +def new_workfile_info_doc( + filename, asset_id, task_name, files, data=None, entity_id=None +): + """Create skeleton data of workfile info document. + + Workfile document is at this moment used primarily for artist notes. + + Args: + filename (str): Filename of workfile. + asset_id (Union[str, ObjectId]): Id of asset under which workfile live. + task_name (str): Task under which was workfile created. + files (List[str]): List of rootless filepaths related to workfile. + data (Dict[str, Any]): Additional metadata. + + Returns: + Dict[str, Any]: Skeleton of workfile info document. + """ + + if not data: + data = {} + + return { + "_id": _create_or_convert_to_mongo_id(entity_id), + "type": "workfile", + "parent": ObjectId(asset_id), + "task_name": task_name, + "filename": filename, + "data": data, + "files": files + } + + +def _prepare_update_data(old_doc, new_doc, replace): + changes = {} + for key, value in new_doc.items(): + if key not in old_doc or value != old_doc[key]: + changes[key] = value + + if replace: + for key in old_doc.keys(): + if key not in new_doc: + changes[key] = REMOVED_VALUE + return changes + + +def prepare_subset_update_data(old_doc, new_doc, replace=True): + """Compare two subset documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_version_update_data(old_doc, new_doc, replace=True): + """Compare two version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_hero_version_update_data(old_doc, new_doc, replace=True): + """Compare two hero version documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_representation_update_data(old_doc, new_doc, replace=True): + """Compare two representation documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +def prepare_workfile_info_update_data(old_doc, new_doc, replace=True): + """Compare two workfile info documents and prepare update data. + + Based on compared values will create update data for 'UpdateOperation'. + + Empty output means that documents are identical. + + Returns: + Dict[str, Any]: Changes between old and new document. + """ + + return _prepare_update_data(old_doc, new_doc, replace) + + +@six.add_metaclass(ABCMeta) +class AbstractOperation(object): + """Base operation class. + + Opration represent a call into database. The call can create, change or + remove data. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + """ + + def __init__(self, project_name, entity_type): + self._project_name = project_name + self._entity_type = entity_type + self._id = str(uuid.uuid4()) + + @property + def project_name(self): + return self._project_name + + @property + def id(self): + """Identifier of operation.""" + + return self._id + + @property + def entity_type(self): + return self._entity_type + + @abstractproperty + def operation_name(self): + """Stringified type of operation.""" + + pass + + @abstractmethod + def to_mongo_operation(self): + """Convert operation to Mongo batch operation.""" + + pass + + def to_data(self): + """Convert opration to data that can be converted to json or others. + + Warning: + Current state returns ObjectId objects which cannot be parsed by + json. + + Returns: + Dict[str, Any]: Description of operation. + """ + + return { + "id": self._id, + "entity_type": self.entity_type, + "project_name": self.project_name, + "operation": self.operation_name + } + + +class CreateOperation(AbstractOperation): + """Opeartion to create an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + data (Dict[str, Any]): Data of entity that will be created. + """ + + operation_name = "create" + + def __init__(self, project_name, entity_type, data): + super(CreateOperation, self).__init__(project_name, entity_type) + + if not data: + data = {} + else: + data = copy.deepcopy(dict(data)) + + if "_id" not in data: + data["_id"] = ObjectId() + else: + data["_id"] = ObjectId(data["_id"]) + + self._entity_id = data["_id"] + self._data = data + + def __setitem__(self, key, value): + self.set_value(key, value) + + def __getitem__(self, key): + return self.data[key] + + def set_value(self, key, value): + self.data[key] = value + + def get(self, key, *args, **kwargs): + return self.data.get(key, *args, **kwargs) + + @property + def entity_id(self): + return self._entity_id + + @property + def data(self): + return self._data + + def to_mongo_operation(self): + return InsertOne(copy.deepcopy(self._data)) + + def to_data(self): + output = super(CreateOperation, self).to_data() + output["data"] = copy.deepcopy(self.data) + return output + + +class UpdateOperation(AbstractOperation): + """Opeartion to update an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Identifier of an entity. + update_data (Dict[str, Any]): Key -> value changes that will be set in + database. If value is set to 'REMOVED_VALUE' the key will be + removed. Only first level of dictionary is checked (on purpose). + """ + + operation_name = "update" + + def __init__(self, project_name, entity_type, entity_id, update_data): + super(UpdateOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + self._update_data = update_data + + @property + def entity_id(self): + return self._entity_id + + @property + def update_data(self): + return self._update_data + + def to_mongo_operation(self): + unset_data = {} + set_data = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + unset_data[key] = None + else: + set_data[key] = value + + op_data = {} + if unset_data: + op_data["$unset"] = unset_data + if set_data: + op_data["$set"] = set_data + + if not op_data: + return None + + return UpdateOne( + {"_id": self.entity_id}, + op_data + ) + + def to_data(self): + changes = {} + for key, value in self._update_data.items(): + if value is REMOVED_VALUE: + value = None + changes[key] = value + + output = super(UpdateOperation, self).to_data() + output.update({ + "entity_id": self.entity_id, + "changes": changes + }) + return output + + +class DeleteOperation(AbstractOperation): + """Opeartion to delete an entity. + + Args: + project_name (str): On which project operation will happen. + entity_type (str): Type of entity on which change happens. + e.g. 'asset', 'representation' etc. + entity_id (Union[str, ObjectId]): Entity id that will be removed. + """ + + operation_name = "delete" + + def __init__(self, project_name, entity_type, entity_id): + super(DeleteOperation, self).__init__(project_name, entity_type) + + self._entity_id = ObjectId(entity_id) + + @property + def entity_id(self): + return self._entity_id + + def to_mongo_operation(self): + return DeleteOne({"_id": self.entity_id}) + + def to_data(self): + output = super(DeleteOperation, self).to_data() + output["entity_id"] = self.entity_id + return output + + +class OperationsSession(object): + """Session storing operations that should happen in an order. + + At this moment does not handle anything special can be sonsidered as + stupid list of operations that will happen after each other. If creation + of same entity is there multiple times it's handled in any way and document + values are not validated. + + All operations must be related to single project. + + Args: + project_name (str): Project name to which are operations related. + """ + + def __init__(self): + self._operations = [] + + def add(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + if not isinstance( + operation, + (CreateOperation, UpdateOperation, DeleteOperation) + ): + raise TypeError("Expected Operation object got {}".format( + str(type(operation)) + )) + + self._operations.append(operation) + + def append(self, operation): + """Add operation to be processed. + + Args: + operation (BaseOperation): Operation that should be processed. + """ + + self.add(operation) + + def extend(self, operations): + """Add operations to be processed. + + Args: + operations (List[BaseOperation]): Operations that should be + processed. + """ + + for operation in operations: + self.add(operation) + + def remove(self, operation): + """Remove operation.""" + + self._operations.remove(operation) + + def clear(self): + """Clear all registered operations.""" + + self._operations = [] + + def to_data(self): + return [ + operation.to_data() + for operation in self._operations + ] + + def commit(self): + """Commit session operations.""" + + operations, self._operations = self._operations, [] + if not operations: + return + + operations_by_project = collections.defaultdict(list) + for operation in operations: + operations_by_project[operation.project_name].append(operation) + + for project_name, operations in operations_by_project.items(): + bulk_writes = [] + for operation in operations: + mongo_op = operation.to_mongo_operation() + if mongo_op is not None: + bulk_writes.append(mongo_op) + + if bulk_writes: + collection = get_project_connection(project_name) + collection.bulk_write(bulk_writes) + + def create_entity(self, project_name, entity_type, data): + """Fast access to 'CreateOperation'. + + Returns: + CreateOperation: Object of update operation. + """ + + operation = CreateOperation(project_name, entity_type, data) + self.add(operation) + return operation + + def update_entity(self, project_name, entity_type, entity_id, update_data): + """Fast access to 'UpdateOperation'. + + Returns: + UpdateOperation: Object of update operation. + """ + + operation = UpdateOperation( + project_name, entity_type, entity_id, update_data + ) + self.add(operation) + return operation + + def delete_entity(self, project_name, entity_type, entity_id): + """Fast access to 'DeleteOperation'. + + Returns: + DeleteOperation: Object of delete operation. + """ + + operation = DeleteOperation(project_name, entity_type, entity_id) + self.add(operation) + return operation + + +def create_project(project_name, project_code, library_project=False): + """Create project using OpenPype settings. + + This project creation function is not validating project document on + creation. It is because project document is created blindly with only + minimum required information about project which is it's name, code, type + and schema. + + Entered project name must be unique and project must not exist yet. + + Note: + This function is here to be OP v4 ready but in v3 has more logic + to do. That's why inner imports are in the body. + + Args: + project_name(str): New project name. Should be unique. + project_code(str): Project's code should be unique too. + library_project(bool): Project is library project. + + Raises: + ValueError: When project name already exists in MongoDB. + + Returns: + dict: Created project document. + """ + + from openpype.settings import ProjectSettings, SaveWarningExc + from openpype.pipeline.schema import validate + + if get_project(project_name, fields=["name"]): + raise ValueError("Project with name \"{}\" already exists".format( + project_name + )) + + if not PROJECT_NAME_REGEX.match(project_name): + raise ValueError(( + "Project name \"{}\" contain invalid characters" + ).format(project_name)) + + project_doc = { + "type": "project", + "name": project_name, + "data": { + "code": project_code, + "library_project": library_project + }, + "schema": CURRENT_PROJECT_SCHEMA + } + + op_session = OperationsSession() + # Insert document with basic data + create_op = op_session.create_entity( + project_name, project_doc["type"], project_doc + ) + op_session.commit() + + # Load ProjectSettings for the project and save it to store all attributes + # and Anatomy + try: + project_settings_entity = ProjectSettings(project_name) + project_settings_entity.save() + except SaveWarningExc as exc: + print(str(exc)) + except Exception: + op_session.delete_entity( + project_name, project_doc["type"], create_op.entity_id + ) + op_session.commit() + raise + + project_doc = get_project(project_name) + + try: + # Validate created project document + validate(project_doc) + except Exception: + # Remove project if is not valid + op_session.delete_entity( + project_name, project_doc["type"], create_op.entity_id + ) + op_session.commit() + raise + + return project_doc diff --git a/openpype/hooks/pre_add_last_workfile_arg.py b/openpype/hooks/pre_add_last_workfile_arg.py index 8edccd48d4..1c8746c559 100644 --- a/openpype/hooks/pre_add_last_workfile_arg.py +++ b/openpype/hooks/pre_add_last_workfile_arg.py @@ -1,4 +1,5 @@ import os + from openpype.lib import PreLaunchHook @@ -19,6 +20,7 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook): "hiero", "houdini", "nukestudio", + "fusion", "blender", "photoshop", "tvpaint", @@ -39,5 +41,13 @@ class AddLastWorkfileToLaunchArgs(PreLaunchHook): self.log.info("Current context does not have any workfile yet.") return + # Determine whether to open workfile post initialization. + if self.host_name == "maya": + key = "open_workfile_post_initialization" + if self.data["project_settings"]["maya"][key]: + self.log.debug("Opening workfile post initialization.") + self.data["env"]["OPENPYPE_" + key.upper()] = "1" + return + # Add path to workfile to arguments self.launch_context.launch_args.append(last_workfile) diff --git a/openpype/hooks/pre_copy_last_published_workfile.py b/openpype/hooks/pre_copy_last_published_workfile.py new file mode 100644 index 0000000000..26b43c39cb --- /dev/null +++ b/openpype/hooks/pre_copy_last_published_workfile.py @@ -0,0 +1,177 @@ +import os +import shutil +from time import sleep +from openpype.client.entities import ( + get_last_version_by_subset_id, + get_representations, + get_subsets, +) +from openpype.lib import PreLaunchHook +from openpype.lib.local_settings import get_local_site_id +from openpype.lib.profiles_filtering import filter_profiles +from openpype.pipeline.load.utils import get_representation_path +from openpype.settings.lib import get_project_settings + + +class CopyLastPublishedWorkfile(PreLaunchHook): + """Copy last published workfile as first workfile. + + Prelaunch hook works only if last workfile leads to not existing file. + - That is possible only if it's first version. + """ + + # Before `AddLastWorkfileToLaunchArgs` + order = -1 + app_groups = ["blender", "photoshop", "tvpaint", "aftereffects"] + + def execute(self): + """Check if local workfile doesn't exist, else copy it. + + 1- Check if setting for this feature is enabled + 2- Check if workfile in work area doesn't exist + 3- Check if published workfile exists and is copied locally in publish + 4- Substitute copied published workfile as first workfile + + Returns: + None: This is a void method. + """ + + sync_server = self.modules_manager.get("sync_server") + if not sync_server or not sync_server.enabled: + self.log.debug("Sync server module is not enabled or available") + return + + # Check there is no workfile available + last_workfile = self.data.get("last_workfile_path") + if os.path.exists(last_workfile): + self.log.debug( + "Last workfile exists. Skipping {} process.".format( + self.__class__.__name__ + ) + ) + return + + # Get data + project_name = self.data["project_name"] + task_name = self.data["task_name"] + task_type = self.data["task_type"] + host_name = self.application.host_name + + # Check settings has enabled it + project_settings = get_project_settings(project_name) + profiles = project_settings["global"]["tools"]["Workfiles"][ + "last_workfile_on_startup" + ] + filter_data = { + "tasks": task_name, + "task_types": task_type, + "hosts": host_name, + } + last_workfile_settings = filter_profiles(profiles, filter_data) + use_last_published_workfile = last_workfile_settings.get( + "use_last_published_workfile" + ) + if use_last_published_workfile is None: + self.log.info( + ( + "Seems like old version of settings is used." + ' Can\'t access custom templates in host "{}".'.format( + host_name + ) + ) + ) + return + elif use_last_published_workfile is False: + self.log.info( + ( + 'Project "{}" has turned off to use last published' + ' workfile as first workfile for host "{}"'.format( + project_name, host_name + ) + ) + ) + return + + self.log.info("Trying to fetch last published workfile...") + + project_doc = self.data.get("project_doc") + asset_doc = self.data.get("asset_doc") + anatomy = self.data.get("anatomy") + + # Check it can proceed + if not project_doc and not asset_doc: + return + + # Get subset id + subset_id = next( + ( + subset["_id"] + for subset in get_subsets( + project_name, + asset_ids=[asset_doc["_id"]], + fields=["_id", "data.family", "data.families"], + ) + if subset["data"].get("family") == "workfile" + # Legacy compatibility + or "workfile" in subset["data"].get("families", {}) + ), + None, + ) + if not subset_id: + self.log.debug( + 'No any workfile for asset "{}".'.format(asset_doc["name"]) + ) + return + + # Get workfile representation + last_version_doc = get_last_version_by_subset_id( + project_name, subset_id, fields=["_id"] + ) + if not last_version_doc: + self.log.debug("Subset does not have any versions") + return + + workfile_representation = next( + ( + representation + for representation in get_representations( + project_name, version_ids=[last_version_doc["_id"]] + ) + if representation["context"]["task"]["name"] == task_name + ), + None, + ) + + if not workfile_representation: + self.log.debug( + 'No published workfile for task "{}" and host "{}".'.format( + task_name, host_name + ) + ) + return + + local_site_id = get_local_site_id() + sync_server.add_site( + project_name, + workfile_representation["_id"], + local_site_id, + force=True, + priority=99, + reset_timer=True, + ) + + while not sync_server.is_representation_on_site( + project_name, workfile_representation["_id"], local_site_id + ): + sleep(5) + + # Get paths + published_workfile_path = get_representation_path( + workfile_representation, root=anatomy.roots + ) + local_workfile_dir = os.path.dirname(last_workfile) + + # Copy file and substitute path + self.data["last_workfile_path"] = shutil.copy( + published_workfile_path, local_workfile_dir + ) diff --git a/openpype/hooks/pre_copy_template_workfile.py b/openpype/hooks/pre_copy_template_workfile.py index dffac22ee2..70c549919f 100644 --- a/openpype/hooks/pre_copy_template_workfile.py +++ b/openpype/hooks/pre_copy_template_workfile.py @@ -1,11 +1,11 @@ import os import shutil -from openpype.lib import ( - PreLaunchHook, - get_custom_workfile_template_by_context, +from openpype.lib import PreLaunchHook +from openpype.settings import get_project_settings +from openpype.pipeline.workfile import ( + get_custom_workfile_template, get_custom_workfile_template_by_string_context ) -from openpype.settings import get_project_settings class CopyTemplateWorkfile(PreLaunchHook): @@ -54,41 +54,22 @@ class CopyTemplateWorkfile(PreLaunchHook): project_name = self.data["project_name"] asset_name = self.data["asset_name"] task_name = self.data["task_name"] + host_name = self.application.host_name project_settings = get_project_settings(project_name) - host_settings = project_settings[self.application.host_name] - - workfile_builder_settings = host_settings.get("workfile_builder") - if not workfile_builder_settings: - # TODO remove warning when deprecated - self.log.warning(( - "Seems like old version of settings is used." - " Can't access custom templates in host \"{}\"." - ).format(self.application.full_label)) - return - - if not workfile_builder_settings["create_first_version"]: - self.log.info(( - "Project \"{}\" has turned off to create first workfile for" - " application \"{}\"" - ).format(project_name, self.application.full_label)) - return - - # Backwards compatibility - template_profiles = workfile_builder_settings.get("custom_templates") - if not template_profiles: - self.log.info( - "Custom templates are not filled. Skipping template copy." - ) - return project_doc = self.data.get("project_doc") asset_doc = self.data.get("asset_doc") anatomy = self.data.get("anatomy") if project_doc and asset_doc: self.log.debug("Started filtering of custom template paths.") - template_path = get_custom_workfile_template_by_context( - template_profiles, project_doc, asset_doc, task_name, anatomy + template_path = get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy, + project_settings ) else: @@ -96,10 +77,13 @@ class CopyTemplateWorkfile(PreLaunchHook): "Global data collection probably did not execute." " Using backup solution." )) - dbcon = self.data.get("dbcon") template_path = get_custom_workfile_template_by_string_context( - template_profiles, project_name, asset_name, task_name, - dbcon, anatomy + project_name, + asset_name, + task_name, + host_name, + anatomy, + project_settings ) if not template_path: diff --git a/openpype/hooks/pre_create_extra_workdir_folders.py b/openpype/hooks/pre_create_extra_workdir_folders.py index d79c5831ee..c5af620c87 100644 --- a/openpype/hooks/pre_create_extra_workdir_folders.py +++ b/openpype/hooks/pre_create_extra_workdir_folders.py @@ -1,8 +1,6 @@ import os -from openpype.lib import ( - PreLaunchHook, - create_workdir_extra_folders -) +from openpype.lib import PreLaunchHook +from openpype.pipeline.workfile import create_workdir_extra_folders class AddLastWorkfileToLaunchArgs(PreLaunchHook): diff --git a/openpype/hooks/pre_global_host_data.py b/openpype/hooks/pre_global_host_data.py index 4c85a511ed..8a178915fb 100644 --- a/openpype/hooks/pre_global_host_data.py +++ b/openpype/hooks/pre_global_host_data.py @@ -1,12 +1,11 @@ -from openpype.api import Anatomy +from openpype.client import get_project, get_asset_by_name from openpype.lib import ( PreLaunchHook, EnvironmentPrepData, prepare_app_environments, prepare_context_environments ) - -import avalon.api +from openpype.pipeline import AvalonMongoDB, Anatomy class GlobalHostDataHook(PreLaunchHook): @@ -64,14 +63,14 @@ class GlobalHostDataHook(PreLaunchHook): self.data["anatomy"] = Anatomy(project_name) # Mongo connection - dbcon = avalon.api.AvalonMongoDB() + dbcon = AvalonMongoDB() dbcon.Session["AVALON_PROJECT"] = project_name dbcon.install() self.data["dbcon"] = dbcon # Project document - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) self.data["project_doc"] = project_doc asset_name = self.data.get("asset_name") @@ -81,8 +80,5 @@ class GlobalHostDataHook(PreLaunchHook): ) return - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) self.data["asset_doc"] = asset_doc diff --git a/openpype/host/__init__.py b/openpype/host/__init__.py new file mode 100644 index 0000000000..da1237c739 --- /dev/null +++ b/openpype/host/__init__.py @@ -0,0 +1,24 @@ +from .host import ( + HostBase, +) + +from .interfaces import ( + IWorkfileHost, + ILoadHost, + IPublishHost, + INewPublisher, +) + +from .dirmap import HostDirmap + + +__all__ = ( + "HostBase", + + "IWorkfileHost", + "ILoadHost", + "IPublishHost", + "INewPublisher", + + "HostDirmap", +) diff --git a/openpype/host/dirmap.py b/openpype/host/dirmap.py new file mode 100644 index 0000000000..347c5fbf85 --- /dev/null +++ b/openpype/host/dirmap.py @@ -0,0 +1,214 @@ +"""Dirmap functionality used in host integrations inside DCCs. + +Idea for current dirmap implementation was used from Maya where is possible to +enter source and destination roots and maya will try each found source +in referenced file replace with each destionation paths. First path which +exists is used. +""" + +import os +from abc import ABCMeta, abstractmethod +import platform + +import six + +from openpype.lib import Logger +from openpype.modules import ModulesManager +from openpype.settings import get_project_settings +from openpype.settings.lib import get_site_local_overrides + + +@six.add_metaclass(ABCMeta) +class HostDirmap(object): + """Abstract class for running dirmap on a workfile in a host. + + Dirmap is used to translate paths inside of host workfile from one + OS to another. (Eg. arstist created workfile on Win, different artists + opens same file on Linux.) + + Expects methods to be implemented inside of host: + on_dirmap_enabled: run host code for enabling dirmap + do_dirmap: run host code to do actual remapping + """ + + def __init__( + self, host_name, project_name, project_settings=None, sync_module=None + ): + self.host_name = host_name + self.project_name = project_name + self._project_settings = project_settings + self._sync_module = sync_module # to limit reinit of Modules + self._log = None + self._mapping = None # cache mapping + + @property + def sync_module(self): + if self._sync_module is None: + manager = ModulesManager() + self._sync_module = manager["sync_server"] + return self._sync_module + + @property + def project_settings(self): + if self._project_settings is None: + self._project_settings = get_project_settings(self.project_name) + return self._project_settings + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @abstractmethod + def on_enable_dirmap(self): + """Run host dependent operation for enabling dirmap if necessary.""" + pass + + @abstractmethod + def dirmap_routine(self, source_path, destination_path): + """Run host dependent remapping from source_path to destination_path""" + pass + + def process_dirmap(self): + # type: (dict) -> None + """Go through all paths in Settings and set them using `dirmap`. + + If artists has Site Sync enabled, take dirmap mapping directly from + Local Settings when artist is syncing workfile locally. + + Args: + project_settings (dict): Settings for current project. + """ + + if not self._mapping: + self._mapping = self.get_mappings(self.project_settings) + if not self._mapping: + return + + self.log.info("Processing directory mapping ...") + self.on_enable_dirmap() + self.log.info("mapping:: {}".format(self._mapping)) + + for k, sp in enumerate(self._mapping["source-path"]): + dst = self._mapping["destination-path"][k] + try: + print("{} -> {}".format(sp, dst)) + self.dirmap_routine(sp, dst) + except IndexError: + # missing corresponding destination path + self.log.error(( + "invalid dirmap mapping, missing corresponding" + " destination directory." + )) + break + except RuntimeError: + self.log.error( + "invalid path {} -> {}, mapping not registered".format( + sp, dst + ) + ) + continue + + def get_mappings(self, project_settings): + """Get translation from source-path to destination-path. + + It checks if Site Sync is enabled and user chose to use local + site, in that case configuration in Local Settings takes precedence + """ + + local_mapping = self._get_local_sync_dirmap(project_settings) + dirmap_label = "{}-dirmap".format(self.host_name) + if ( + not self.project_settings[self.host_name].get(dirmap_label) + and not local_mapping + ): + return {} + mapping_settings = self.project_settings[self.host_name][dirmap_label] + mapping_enabled = mapping_settings["enabled"] or bool(local_mapping) + if not mapping_enabled: + return {} + + mapping = ( + local_mapping + or mapping_settings["paths"] + or {} + ) + + if ( + not mapping + or not mapping.get("destination-path") + or not mapping.get("source-path") + ): + return {} + return mapping + + def _get_local_sync_dirmap(self, project_settings): + """ + Returns dirmap if synch to local project is enabled. + + Only valid mapping is from roots of remote site to local site set + in Local Settings. + + Args: + project_settings (dict) + Returns: + dict : { "source-path": [XXX], "destination-path": [YYYY]} + """ + + mapping = {} + + if not project_settings["global"]["sync_server"]["enabled"]: + return mapping + + project_name = os.getenv("AVALON_PROJECT") + + active_site = self.sync_module.get_local_normalized_site( + self.sync_module.get_active_site(project_name)) + remote_site = self.sync_module.get_local_normalized_site( + self.sync_module.get_remote_site(project_name)) + self.log.debug( + "active {} - remote {}".format(active_site, remote_site) + ) + + if ( + active_site == "local" + and project_name in self.sync_module.get_enabled_projects() + and active_site != remote_site + ): + sync_settings = self.sync_module.get_sync_project_setting( + project_name, + exclude_locals=False, + cached=False) + + active_overrides = get_site_local_overrides( + project_name, active_site) + remote_overrides = get_site_local_overrides( + project_name, remote_site) + + self.log.debug("local overrides {}".format(active_overrides)) + self.log.debug("remote overrides {}".format(remote_overrides)) + current_platform = platform.system().lower() + for root_name, active_site_dir in active_overrides.items(): + remote_site_dir = ( + remote_overrides.get(root_name) + or sync_settings["sites"][remote_site]["root"][root_name] + ) + + if isinstance(remote_site_dir, dict): + remote_site_dir = remote_site_dir.get(current_platform) + + if not remote_site_dir: + continue + + if os.path.isdir(active_site_dir): + if "destination-path" not in mapping: + mapping["destination-path"] = [] + mapping["destination-path"].append(active_site_dir) + + if "source-path" not in mapping: + mapping["source-path"] = [] + mapping["source-path"].append(remote_site_dir) + + self.log.debug("local sync mapping:: {}".format(mapping)) + return mapping diff --git a/openpype/host/host.py b/openpype/host/host.py new file mode 100644 index 0000000000..d2335c0062 --- /dev/null +++ b/openpype/host/host.py @@ -0,0 +1,194 @@ +import os +import logging +import contextlib +from abc import ABCMeta, abstractproperty +import six + +# NOTE can't import 'typing' because of issues in Maya 2020 +# - shiboken crashes on 'typing' module import + + +@six.add_metaclass(ABCMeta) +class HostBase(object): + """Base of host implementation class. + + Host is pipeline implementation of DCC application. This class should help + to identify what must/should/can be implemented for specific functionality. + + Compared to 'avalon' concept: + What was before considered as functions in host implementation folder. The + host implementation should primarily care about adding ability of creation + (mark subsets to be published) and optionaly about referencing published + representations as containers. + + Host may need extend some functionality like working with workfiles + or loading. Not all host implementations may allow that for those purposes + can be logic extended with implementing functions for the purpose. There + are prepared interfaces to be able identify what must be implemented to + be able use that functionality. + - current statement is that it is not required to inherit from interfaces + but all of the methods are validated (only their existence!) + + # Installation of host before (avalon concept): + ```python + from openpype.pipeline import install_host + import openpype.hosts.maya.api as host + + install_host(host) + ``` + + # Installation of host now: + ```python + from openpype.pipeline import install_host + from openpype.hosts.maya.api import MayaHost + + host = MayaHost() + install_host(host) + ``` + + Todo: + - move content of 'install_host' as method of this class + - register host object + - install legacy_io + - install global plugin paths + - store registered plugin paths to this object + - handle current context (project, asset, task) + - this must be done in many separated steps + - have it's object of host tools instead of using globals + + This implementation will probably change over time when more + functionality and responsibility will be added. + """ + + _log = None + + def __init__(self): + """Initialization of host. + + Register DCC callbacks, host specific plugin paths, targets etc. + (Part of what 'install' did in 'avalon' concept.) + + Note: + At this moment global "installation" must happen before host + installation. Because of this current limitation it is recommended + to implement 'install' method which is triggered after global + 'install'. + """ + + pass + + def install(self): + """Install host specific functionality. + + This is where should be added menu with tools, registered callbacks + and other host integration initialization. + + It is called automatically when 'openpype.pipeline.install_host' is + triggered. + """ + + pass + + @property + def log(self): + if self._log is None: + self._log = logging.getLogger(self.__class__.__name__) + return self._log + + @abstractproperty + def name(self): + """Host name.""" + + pass + + def get_current_project_name(self): + """ + Returns: + Union[str, None]: Current project name. + """ + + return os.environ.get("AVALON_PROJECT") + + def get_current_asset_name(self): + """ + Returns: + Union[str, None]: Current asset name. + """ + + return os.environ.get("AVALON_ASSET") + + def get_current_task_name(self): + """ + Returns: + Union[str, None]: Current task name. + """ + + return os.environ.get("AVALON_TASK") + + def get_current_context(self): + """Get current context information. + + This method should be used to get current context of host. Usage of + this method can be crutial for host implementations in DCCs where + can be opened multiple workfiles at one moment and change of context + can't be catched properly. + + Default implementation returns values from 'legacy_io.Session'. + + Returns: + Dict[str, Union[str, None]]: Context with 3 keys 'project_name', + 'asset_name' and 'task_name'. All of them can be 'None'. + """ + + return { + "project_name": self.get_current_project_name(), + "asset_name": self.get_current_asset_name(), + "task_name": self.get_current_task_name() + } + + def get_context_title(self): + """Context title shown for UI purposes. + + Should return current context title if possible. + + Note: + This method is used only for UI purposes so it is possible to + return some logical title for contextless cases. + Is not meant for "Context menu" label. + + Returns: + str: Context title. + None: Default title is used based on UI implementation. + """ + + # Use current context to fill the context title + current_context = self.get_current_context() + project_name = current_context["project_name"] + asset_name = current_context["asset_name"] + task_name = current_context["task_name"] + items = [] + if project_name: + items.append(project_name) + if asset_name: + items.append(asset_name) + if task_name: + items.append(task_name) + if items: + return "/".join(items) + return None + + @contextlib.contextmanager + def maintained_selection(self): + """Some functionlity will happen but selection should stay same. + + This is DCC specific. Some may not allow to implement this ability + that is reason why default implementation is empty context manager. + + Yields: + None: Yield when is ready to restore selected at the end. + """ + + try: + yield + finally: + pass diff --git a/openpype/host/interfaces.py b/openpype/host/interfaces.py new file mode 100644 index 0000000000..999aefd254 --- /dev/null +++ b/openpype/host/interfaces.py @@ -0,0 +1,386 @@ +from abc import ABCMeta, abstractmethod +import six + + +class MissingMethodsError(ValueError): + """Exception when host miss some required methods for specific workflow. + + Args: + host (HostBase): Host implementation where are missing methods. + missing_methods (list[str]): List of missing methods. + """ + + def __init__(self, host, missing_methods): + joined_missing = ", ".join( + ['"{}"'.format(item) for item in missing_methods] + ) + host_name = getattr(host, "name", None) + if not host_name: + try: + host_name = host.__file__.replace("\\", "/").split("/")[-3] + except Exception: + host_name = str(host) + message = ( + "Host \"{}\" miss methods {}".format(host_name, joined_missing) + ) + super(MissingMethodsError, self).__init__(message) + + +class ILoadHost: + """Implementation requirements to be able use reference of representations. + + The load plugins can do referencing even without implementation of methods + here, but switch and removement of containers would not be possible. + + Questions: + - Is list container dependency of host or load plugins? + - Should this be directly in HostBase? + - how to find out if referencing is available? + - do we need to know that? + """ + + @staticmethod + def get_missing_load_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + loading. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Object of host where to look for + required methods. + + Returns: + list[str]: Missing method implementations for loading workflow. + """ + + if isinstance(host, ILoadHost): + return [] + + required = ["ls"] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_load_methods(host): + """Validate implemented methods of "old type" host for load workflow. + + Args: + Union[ModuleType, HostBase]: Object of host to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + missing = ILoadHost.get_missing_load_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_containers(self): + """Retreive referenced containers from scene. + + This can be implemented in hosts where referencing can be used. + + Todo: + Rename function to something more self explanatory. + Suggestion: 'get_containers' + + Returns: + list[dict]: Information about loaded containers. + """ + + pass + + # --- Deprecated method names --- + def ls(self): + """Deprecated variant of 'get_containers'. + + Todo: + Remove when all usages are replaced. + """ + + return self.get_containers() + + +@six.add_metaclass(ABCMeta) +class IWorkfileHost: + """Implementation requirements to be able use workfile utils and tool.""" + + @staticmethod + def get_missing_workfile_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + workfiles. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Object of host where to look for + required methods. + + Returns: + list[str]: Missing method implementations for workfiles workflow. + """ + + if isinstance(host, IWorkfileHost): + return [] + + required = [ + "open_file", + "save_file", + "current_file", + "has_unsaved_changes", + "file_extensions", + "work_root", + ] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_workfile_methods(host): + """Validate methods of "old type" host for workfiles workflow. + + Args: + Union[ModuleType, HostBase]: Object of host to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + + missing = IWorkfileHost.get_missing_workfile_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_workfile_extensions(self): + """Extensions that can be used as save. + + Questions: + This could potentially use 'HostDefinition'. + """ + + return [] + + @abstractmethod + def save_workfile(self, dst_path=None): + """Save currently opened scene. + + Args: + dst_path (str): Where the current scene should be saved. Or use + current path if 'None' is passed. + """ + + pass + + @abstractmethod + def open_workfile(self, filepath): + """Open passed filepath in the host. + + Args: + filepath (str): Path to workfile. + """ + + pass + + @abstractmethod + def get_current_workfile(self): + """Retreive path to current opened file. + + Returns: + str: Path to file which is currently opened. + None: If nothing is opened. + """ + + return None + + def workfile_has_unsaved_changes(self): + """Currently opened scene is saved. + + Not all hosts can know if current scene is saved because the API of + DCC does not support it. + + Returns: + bool: True if scene is saved and False if has unsaved + modifications. + None: Can't tell if workfiles has modifications. + """ + + return None + + def work_root(self, session): + """Modify workdir per host. + + Default implementation keeps workdir untouched. + + Warnings: + We must handle this modification with more sofisticated way because + this can't be called out of DCC so opening of last workfile + (calculated before DCC is launched) is complicated. Also breaking + defined work template is not a good idea. + Only place where it's really used and can make sense is Maya. There + workspace.mel can modify subfolders where to look for maya files. + + Args: + session (dict): Session context data. + + Returns: + str: Path to new workdir. + """ + + return session["AVALON_WORKDIR"] + + # --- Deprecated method names --- + def file_extensions(self): + """Deprecated variant of 'get_workfile_extensions'. + + Todo: + Remove when all usages are replaced. + """ + return self.get_workfile_extensions() + + def save_file(self, dst_path=None): + """Deprecated variant of 'save_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + self.save_workfile(dst_path) + + def open_file(self, filepath): + """Deprecated variant of 'open_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + return self.open_workfile(filepath) + + def current_file(self): + """Deprecated variant of 'get_current_workfile'. + + Todo: + Remove when all usages are replaced. + """ + + return self.get_current_workfile() + + def has_unsaved_changes(self): + """Deprecated variant of 'workfile_has_unsaved_changes'. + + Todo: + Remove when all usages are replaced. + """ + + return self.workfile_has_unsaved_changes() + + +class IPublishHost: + """Functions related to new creation system in new publisher. + + New publisher is not storing information only about each created instance + but also some global data. At this moment are data related only to context + publish plugins but that can extend in future. + """ + + @staticmethod + def get_missing_publish_methods(host): + """Look for missing methods on "old type" host implementation. + + Method is used for validation of implemented functions related to + new publish creation. Checks only existence of methods. + + Args: + Union[ModuleType, HostBase]: Host module where to look for + required methods. + + Returns: + list[str]: Missing method implementations for new publsher + workflow. + """ + + if isinstance(host, IPublishHost): + return [] + + required = [ + "get_context_data", + "update_context_data", + "get_context_title", + "get_current_context", + ] + missing = [] + for name in required: + if not hasattr(host, name): + missing.append(name) + return missing + + @staticmethod + def validate_publish_methods(host): + """Validate implemented methods of "old type" host. + + Args: + Union[ModuleType, HostBase]: Host module to validate. + + Raises: + MissingMethodsError: If there are missing methods on host + implementation. + """ + missing = IPublishHost.get_missing_publish_methods(host) + if missing: + raise MissingMethodsError(host, missing) + + @abstractmethod + def get_context_data(self): + """Get global data related to creation-publishing from workfile. + + These data are not related to any created instance but to whole + publishing context. Not saving/returning them will cause that each + reset of publishing resets all values to default ones. + + Context data can contain information about enabled/disabled publish + plugins or other values that can be filled by artist. + + Returns: + dict: Context data stored using 'update_context_data'. + """ + + pass + + @abstractmethod + def update_context_data(self, data, changes): + """Store global context data to workfile. + + Called when some values in context data has changed. + + Without storing the values in a way that 'get_context_data' would + return them will each reset of publishing cause loose of filled values + by artist. Best practice is to store values into workfile, if possible. + + Args: + data (dict): New data as are. + changes (dict): Only data that has been changed. Each value has + tuple with '(, )' value. + """ + + pass + + +class INewPublisher(IPublishHost): + """Legacy interface replaced by 'IPublishHost'. + + Deprecated: + 'INewPublisher' is replaced by 'IPublishHost' please change your + imports. + There is no "reasonable" way hot mark these classes as deprecated + to show warning of wrong import. Deprecated since 3.14.* will be + removed in 3.15.* + """ + + pass diff --git a/openpype/hosts/aftereffects/__init__.py b/openpype/hosts/aftereffects/__init__.py index deae48d122..ae750d05b6 100644 --- a/openpype/hosts/aftereffects/__init__.py +++ b/openpype/hosts/aftereffects/__init__.py @@ -1,9 +1,6 @@ -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True", - "WEBSOCKET_URL": "ws://localhost:8097/ws/" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +from .addon import AfterEffectsAddon + + +__all__ = ( + "AfterEffectsAddon", +) diff --git a/openpype/hosts/aftereffects/addon.py b/openpype/hosts/aftereffects/addon.py new file mode 100644 index 0000000000..79df550312 --- /dev/null +++ b/openpype/hosts/aftereffects/addon.py @@ -0,0 +1,22 @@ +from openpype.modules import OpenPypeModule, IHostAddon + + +class AfterEffectsAddon(OpenPypeModule, IHostAddon): + name = "aftereffects" + host_name = "aftereffects" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + defaults = { + "OPENPYPE_LOG_NO_COLORS": "True", + "WEBSOCKET_URL": "ws://localhost:8097/ws/" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_workfile_extensions(self): + return [".aep"] diff --git a/openpype/hosts/aftereffects/api/__init__.py b/openpype/hosts/aftereffects/api/__init__.py index cea1bdc023..a7137ba8fb 100644 --- a/openpype/hosts/aftereffects/api/__init__.py +++ b/openpype/hosts/aftereffects/api/__init__.py @@ -10,27 +10,15 @@ from .launch_logic import ( ) from .pipeline import ( + AfterEffectsHost, ls, - get_asset_settings, - install, - uninstall, - list_instances, - remove_instance, containerise ) -from .workio import ( - file_extensions, - has_unsaved_changes, - save_file, - open_file, - current_file, - work_root, -) - from .lib import ( maintained_selection, - get_extension_manifest_path + get_extension_manifest_path, + get_asset_settings ) from .plugin import ( @@ -45,23 +33,12 @@ __all__ = [ # pipeline "ls", - "get_asset_settings", - "install", - "uninstall", - "list_instances", - "remove_instance", "containerise", - "file_extensions", - "has_unsaved_changes", - "save_file", - "open_file", - "current_file", - "work_root", - # lib "maintained_selection", "get_extension_manifest_path", + "get_asset_settings", # plugin "AfterEffectsLoader" diff --git a/openpype/hosts/aftereffects/api/extension.zxp b/openpype/hosts/aftereffects/api/extension.zxp index 389d74505d..b436f0ca0b 100644 Binary files a/openpype/hosts/aftereffects/api/extension.zxp and b/openpype/hosts/aftereffects/api/extension.zxp differ diff --git a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml index 668cb3fc24..f96e80c503 100644 --- a/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml +++ b/openpype/hosts/aftereffects/api/extension/CSXS/manifest.xml @@ -1,5 +1,5 @@ - diff --git a/openpype/hosts/aftereffects/api/extension/index.html b/openpype/hosts/aftereffects/api/extension/index.html index 9e39bf1acc..52a7c4964f 100644 --- a/openpype/hosts/aftereffects/api/extension/index.html +++ b/openpype/hosts/aftereffects/api/extension/index.html @@ -38,17 +38,6 @@ }); - - - - - - - - - - diff --git a/openpype/hosts/photoshop/api/launch_logic.py b/openpype/hosts/photoshop/api/launch_logic.py index 0021905cb5..a4377a9972 100644 --- a/openpype/hosts/photoshop/api/launch_logic.py +++ b/openpype/hosts/photoshop/api/launch_logic.py @@ -8,12 +8,11 @@ from wsrpc_aiohttp import ( WebSocketAsync ) -from Qt import QtCore +from qtpy import QtCore -from openpype.api import Logger +from openpype.lib import Logger +from openpype.pipeline import legacy_io from openpype.tools.utils import host_tools - -from avalon import api from openpype.tools.adobe_webserver.app import WebServerTool from .ws_stub import PhotoshopServerStub @@ -320,13 +319,13 @@ class PhotoshopRoute(WebSocketRoute): log.info("Setting context change") log.info("project {} asset {} ".format(project, asset)) if project: - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project os.environ["AVALON_PROJECT"] = project if asset: - api.Session["AVALON_ASSET"] = asset + legacy_io.Session["AVALON_ASSET"] = asset os.environ["AVALON_ASSET"] = asset if task: - api.Session["AVALON_TASK"] = task + legacy_io.Session["AVALON_TASK"] = task os.environ["AVALON_TASK"] = task async def read(self): @@ -335,9 +334,6 @@ class PhotoshopRoute(WebSocketRoute): return await self.socket.call('photoshop.read') # panel routes for tools - async def creator_route(self): - self._tool_route("creator") - async def workfiles_route(self): self._tool_route("workfiles") @@ -345,14 +341,11 @@ class PhotoshopRoute(WebSocketRoute): self._tool_route("loader") async def publish_route(self): - self._tool_route("publish") + self._tool_route("publisher") async def sceneinventory_route(self): self._tool_route("sceneinventory") - async def subsetmanager_route(self): - self._tool_route("subsetmanager") - async def experimental_tools_route(self): self._tool_route("experimental_tools") diff --git a/openpype/hosts/photoshop/api/lib.py b/openpype/hosts/photoshop/api/lib.py index 6d2a493a94..ff520348f0 100644 --- a/openpype/hosts/photoshop/api/lib.py +++ b/openpype/hosts/photoshop/api/lib.py @@ -3,14 +3,13 @@ import sys import contextlib import traceback -from Qt import QtWidgets +from qtpy import QtWidgets -import avalon.api - -from openpype.api import Logger +from openpype.lib import env_value_to_bool, Logger +from openpype.modules import ModulesManager +from openpype.pipeline import install_host from openpype.tools.utils import host_tools -from openpype.lib.remote_publish import headless_publish -from openpype.lib import env_value_to_bool +from openpype.tests.lib import is_in_tests from .launch_logic import ProcessLauncher, stub @@ -22,9 +21,11 @@ def safe_excepthook(*args): def main(*subprocess_args): - from openpype.hosts.photoshop import api + from openpype.hosts.photoshop.api import PhotoshopHost + + host = PhotoshopHost() + install_host(host) - avalon.api.install(api) sys.excepthook = safe_excepthook # coloring in StdOutBroker @@ -36,11 +37,13 @@ def main(*subprocess_args): launcher.start() if env_value_to_bool("HEADLESS_PUBLISH"): + manager = ModulesManager() + webpublisher_addon = manager["webpublisher"] launcher.execute_in_main_thread( - headless_publish, + webpublisher_addon.headless_publish, log, "ClosePS", - os.environ.get("IS_TEST") + is_in_tests() ) elif env_value_to_bool("AVALON_PHOTOSHOP_WORKFILES_ON_LAUNCH", default=True): @@ -64,10 +67,15 @@ def maintained_selection(): @contextlib.contextmanager -def maintained_visibility(): - """Maintain visibility during context.""" +def maintained_visibility(layers=None): + """Maintain visibility during context. + + Args: + layers (list) of PSItem (used for caching) + """ visibility = {} - layers = stub().get_layers() + if not layers: + layers = stub().get_layers() for layer in layers: visibility[layer.id] = layer.visible try: diff --git a/openpype/hosts/photoshop/api/pipeline.py b/openpype/hosts/photoshop/api/pipeline.py index c2ad0ac7b0..73dc80260c 100644 --- a/openpype/hosts/photoshop/api/pipeline.py +++ b/openpype/hosts/photoshop/api/pipeline.py @@ -1,53 +1,183 @@ import os -from Qt import QtWidgets -from bson.objectid import ObjectId + +from qtpy import QtWidgets import pyblish.api -import avalon.api -from avalon import io -from openpype.api import Logger -from openpype.lib import register_event_callback +from openpype.lib import register_event_callback, Logger from openpype.pipeline import ( - LegacyCreator, + legacy_io, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) -import openpype.hosts.photoshop + +from openpype.host import ( + HostBase, + IWorkfileHost, + ILoadHost, + IPublishHost +) + +from openpype.pipeline.load import any_outdated_containers +from openpype.hosts.photoshop import PHOTOSHOP_HOST_DIR from . import lib log = Logger.get_logger(__name__) -HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.photoshop.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +PLUGINS_DIR = os.path.join(PHOTOSHOP_HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost): + name = "photoshop" + + def install(self): + """Install Photoshop-specific functionality needed for integration. + + This function is called automatically on calling + `api.install(photoshop)`. + """ + log.info("Installing OpenPype Photoshop...") + pyblish.api.register_host("photoshop") + + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_loader_plugin_path(LOAD_PATH) + register_creator_plugin_path(CREATE_PATH) + log.info(PUBLISH_PATH) + + pyblish.api.register_callback( + "instanceToggled", on_pyblish_instance_toggled + ) + + register_event_callback("application.launched", on_application_launch) + + def current_file(self): + try: + full_name = lib.stub().get_active_document_full_name() + if full_name and full_name != "null": + return os.path.normpath(full_name).replace("\\", "/") + except Exception: + pass + + return None + + def work_root(self, session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") + + def open_workfile(self, filepath): + lib.stub().open(filepath) + + return True + + def save_workfile(self, filepath=None): + _, ext = os.path.splitext(filepath) + lib.stub().saveAs(filepath, ext[1:], True) + + def get_current_workfile(self): + return self.current_file() + + def workfile_has_unsaved_changes(self): + if self.current_file(): + return not lib.stub().is_saved() + + return False + + def get_workfile_extensions(self): + return [".psd", ".psb"] + + def get_containers(self): + return ls() + + def get_context_data(self): + """Get stored values for context (validation enable/disable etc)""" + meta = _get_stub().get_layers_metadata() + for item in meta: + if item.get("id") == "publish_context": + item.pop("id") + return item + + return {} + + def update_context_data(self, data, changes): + """Store value needed for context""" + item = data + item["id"] = "publish_context" + _get_stub().imprint(item["id"], item) + + def get_context_title(self): + """Returns title for Creator window""" + + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + return "{}/{}/{}".format(project_name, asset_name, task_name) + + def list_instances(self): + """List all created instances to publish from current workfile. + + Pulls from File > File Info + + Returns: + (list) of dictionaries matching instances format + """ + stub = _get_stub() + + if not stub: + return [] + + instances = [] + layers_meta = stub.get_layers_metadata() + if layers_meta: + for instance in layers_meta: + if instance.get("id") == "pyblish.avalon.instance": + instances.append(instance) + + return instances + + def remove_instance(self, instance): + """Remove instance from current workfile metadata. + + Updates metadata of current file in File > File Info and removes + icon highlight on group layer. + + Args: + instance (dict): instance representation from subsetmanager model + """ + stub = _get_stub() + + if not stub: + return + + inst_id = instance.get("instance_id") or instance.get("uuid") # legacy + if not inst_id: + log.warning("No instance identifier for {}".format(instance)) + return + + stub.remove_instance(inst_id) + + if instance.get("members"): + item = stub.get_layer(instance["members"][0]) + if item: + stub.rename_layer(item.id, + item.name.replace(stub.PUBLISH_ICON, '')) + + def check_inventory(): - if not lib.any_outdated(): + if not any_outdated_containers(): return - host = avalon.api.registered_host() - outdated_containers = [] - for container in host.ls(): - representation = container['representation'] - representation_doc = io.find_one( - { - "_id": ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not lib.is_latest(representation_doc): - outdated_containers.append(container) - # Warn about outdated containers. - print("Starting new QApplication..") + _app = QtWidgets.QApplication.instance() + if not _app: + print("Starting new QApplication..") + _app = QtWidgets.QApplication([]) message_box = QtWidgets.QMessageBox() message_box.setIcon(QtWidgets.QMessageBox.Warning) @@ -65,32 +195,6 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): instance[0].Visible = new_value -def install(): - """Install Photoshop-specific functionality of avalon-core. - - This function is called automatically on calling `api.install(photoshop)`. - """ - log.info("Installing OpenPype Photoshop...") - pyblish.api.register_host("photoshop") - - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) - log.info(PUBLISH_PATH) - - pyblish.api.register_callback( - "instanceToggled", on_pyblish_instance_toggled - ) - - register_event_callback("application.launched", on_application_launch) - - -def uninstall(): - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) - - def ls(): """Yields containers from active Photoshop document @@ -130,58 +234,6 @@ def ls(): yield data -def list_instances(): - """List all created instances to publish from current workfile. - - Pulls from File > File Info - - For SubsetManager - - Returns: - (list) of dictionaries matching instances format - """ - stub = _get_stub() - - if not stub: - return [] - - instances = [] - layers_meta = stub.get_layers_metadata() - if layers_meta: - for key, instance in layers_meta.items(): - schema = instance.get("schema") - if schema and "container" in schema: - continue - - instance['uuid'] = key - instances.append(instance) - - return instances - - -def remove_instance(instance): - """Remove instance from current workfile metadata. - - Updates metadata of current file in File > File Info and removes - icon highlight on group layer. - - For SubsetManager - - Args: - instance (dict): instance representation from subsetmanager model - """ - stub = _get_stub() - - if not stub: - return - - stub.remove_instance(instance.get("uuid")) - layer = stub.get_layer(instance.get("uuid")) - if layer: - stub.rename_layer(instance.get("uuid"), - layer.name.replace(stub.PUBLISH_ICON, '')) - - def _get_stub(): """Handle pulling stub from PS to run operations on host @@ -231,6 +283,22 @@ def containerise( "members": [str(layer.id)] } stub = lib.stub() - stub.imprint(layer, data) + stub.imprint(layer.id, data) return layer + + +def cache_and_get_instances(creator): + """Cache instances in shared data. + + Storing all instances as a list as legacy instances might be still present. + Args: + creator (Creator): Plugin which would like to get instances from host. + Returns: + List[]: list of all instances stored in metadata + """ + shared_key = "openpype.photoshop.instances" + if shared_key not in creator.collection_shared_data: + creator.collection_shared_data[shared_key] = \ + creator.host.list_instances() + return creator.collection_shared_data[shared_key] diff --git a/openpype/hosts/photoshop/api/workio.py b/openpype/hosts/photoshop/api/workio.py deleted file mode 100644 index 951c5dbfff..0000000000 --- a/openpype/hosts/photoshop/api/workio.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Host API required Work Files tool""" -import os - -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS -from . import lib - - -def _active_document(): - document_name = lib.stub().get_active_document_name() - if not document_name: - return None - - return document_name - - -def file_extensions(): - return HOST_WORKFILE_EXTENSIONS["photoshop"] - - -def has_unsaved_changes(): - if _active_document(): - return not lib.stub().is_saved() - - return False - - -def save_file(filepath): - _, ext = os.path.splitext(filepath) - lib.stub().saveAs(filepath, ext[1:], True) - - -def open_file(filepath): - lib.stub().open(filepath) - - return True - - -def current_file(): - try: - full_name = lib.stub().get_active_document_full_name() - if full_name and full_name != "null": - return os.path.normpath(full_name).replace("\\", "/") - except Exception: - pass - - return None - - -def work_root(session): - return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/openpype/hosts/photoshop/api/ws_stub.py b/openpype/hosts/photoshop/api/ws_stub.py index 64d89f5420..2c4d0ad5fc 100644 --- a/openpype/hosts/photoshop/api/ws_stub.py +++ b/openpype/hosts/photoshop/api/ws_stub.py @@ -27,6 +27,17 @@ class PSItem(object): members = attr.ib(factory=list) long_name = attr.ib(default=None) color_code = attr.ib(default=None) # color code of layer + instance_id = attr.ib(default=None) + + @property + def clean_name(self): + """Returns layer name without publish icon highlight + + Returns: + (str) + """ + return (self.name.replace(PhotoshopServerStub.PUBLISH_ICON, '') + .replace(PhotoshopServerStub.LOADED_ICON, '')) class PhotoshopServerStub: @@ -76,13 +87,31 @@ class PhotoshopServerStub: layer: (PSItem) layers_meta: full list from Headline (for performance in loops) Returns: + (dict) of layer metadata stored in PS file + + Example: + { + 'id': 'pyblish.avalon.container', + 'loader': 'ImageLoader', + 'members': ['64'], + 'name': 'imageMainMiddle', + 'namespace': 'Hero_imageMainMiddle_001', + 'representation': '6203dc91e80934d9f6ee7d96', + 'schema': 'openpype:container-2.0' + } """ if layers_meta is None: layers_meta = self.get_layers_metadata() - return layers_meta.get(str(layer.id)) + for layer_meta in layers_meta: + layer_id = layer_meta.get("uuid") # legacy + if layer_meta.get("members"): + layer_id = layer_meta["members"][0] + if str(layer.id) == str(layer_id): + return layer_meta + print("Unable to find layer metadata for {}".format(layer.id)) - def imprint(self, layer, data, all_layers=None, layers_meta=None): + def imprint(self, item_id, data, all_layers=None, items_meta=None): """Save layer metadata to Headline field of active document Stores metadata in format: @@ -108,28 +137,37 @@ class PhotoshopServerStub: }] - for loaded instances Args: - layer (PSItem): + item_id (str): data(string): json representation for single layer all_layers (list of PSItem): for performance, could be injected for usage in loop, if not, single call will be triggered - layers_meta(string): json representation from Headline + items_meta(string): json representation from Headline (for performance - provide only if imprint is in loop - value should be same) Returns: None """ - if not layers_meta: - layers_meta = self.get_layers_metadata() + if not items_meta: + items_meta = self.get_layers_metadata() # json.dumps writes integer values in a dictionary to string, so # anticipating it here. - if str(layer.id) in layers_meta and layers_meta[str(layer.id)]: - if data: - layers_meta[str(layer.id)].update(data) + item_id = str(item_id) + is_new = True + result_meta = [] + for item_meta in items_meta: + if ((item_meta.get('members') and + item_id == str(item_meta.get('members')[0])) or + item_meta.get("instance_id") == item_id): + is_new = False + if data: + item_meta.update(data) + result_meta.append(item_meta) else: - layers_meta.pop(str(layer.id)) - else: - layers_meta[str(layer.id)] = data + result_meta.append(item_meta) + + if is_new: + result_meta.append(data) # Ensure only valid ids are stored. if not all_layers: @@ -137,12 +175,14 @@ class PhotoshopServerStub: layer_ids = [layer.id for layer in all_layers] cleaned_data = [] - for layer_id in layers_meta: - if int(layer_id) in layer_ids: - cleaned_data.append(layers_meta[layer_id]) + for item in result_meta: + if item.get("members"): + if int(item["members"][0]) not in layer_ids: + continue + + cleaned_data.append(item) payload = json.dumps(cleaned_data, indent=4) - self.websocketserver.call( self.client.call('Photoshop.imprint', payload=payload) ) @@ -189,10 +229,11 @@ class PhotoshopServerStub: return self._get_layers_in_layers(parent_ids) - def get_layers_in_layers_ids(self, layers_ids): + def get_layers_in_layers_ids(self, layers_ids, layers=None): """Return all layers that belong to layers (might be groups). Args: + layers_ids layers : Returns: @@ -200,10 +241,13 @@ class PhotoshopServerStub: """ parent_ids = set(layers_ids) - return self._get_layers_in_layers(parent_ids) + return self._get_layers_in_layers(parent_ids, layers) - def _get_layers_in_layers(self, parent_ids): - all_layers = self.get_layers() + def _get_layers_in_layers(self, parent_ids, layers=None): + if not layers: + layers = self.get_layers() + + all_layers = layers ret = [] for layer in all_layers: @@ -354,14 +398,17 @@ class PhotoshopServerStub: self.hide_all_others_layers_ids(extract_ids) - def hide_all_others_layers_ids(self, extract_ids): + def hide_all_others_layers_ids(self, extract_ids, layers=None): """hides all layers that are not part of the list or that are not children of this list Args: extract_ids (list): list of integer that should be visible + layers (list) of PSItem (used for caching) """ - for layer in self.get_layers(): + if not layers: + layers = self.get_layers() + for layer in layers: if layer.visible and layer.id not in extract_ids: self.set_visible(layer.id, False) @@ -370,38 +417,27 @@ class PhotoshopServerStub: (Headline accessible by File > File Info) Returns: - (string): - json documents + (list) example: {"8":{"active":true,"subset":"imageBG", "family":"image","id":"pyblish.avalon.instance", "asset":"Town"}} 8 is layer(group) id - used for deletion, update etc. """ - layers_data = {} res = self.websocketserver.call(self.client.call('Photoshop.read')) + layers_data = [] try: - layers_data = json.loads(res) + if res: + layers_data = json.loads(res) except json.decoder.JSONDecodeError: - pass + raise ValueError("{} cannot be parsed, recreate meta".format(res)) # format of metadata changed from {} to [] because of standardization # keep current implementation logic as its working - if not isinstance(layers_data, dict): - temp_layers_meta = {} - for layer_meta in layers_data: - layer_id = layer_meta.get("uuid") - if not layer_id: - layer_id = layer_meta.get("members")[0] - - temp_layers_meta[layer_id] = layer_meta - layers_data = temp_layers_meta - else: - # legacy version of metadata + if isinstance(layers_data, dict): for layer_id, layer_meta in layers_data.items(): if layer_meta.get("schema") != "openpype:container-2.0": - layer_meta["uuid"] = str(layer_id) - else: layer_meta["members"] = [str(layer_id)] - + layers_data = list(layers_data.values()) return layers_data def import_smart_object(self, path, layer_name, as_reference=False): @@ -472,11 +508,12 @@ class PhotoshopServerStub: ) def remove_instance(self, instance_id): - cleaned_data = {} + cleaned_data = [] - for key, instance in self.get_layers_metadata().items(): - if key != instance_id: - cleaned_data[key] = instance + for item in self.get_layers_metadata(): + inst_id = item.get("instance_id") or item.get("uuid") + if inst_id != instance_id: + cleaned_data.append(item) payload = json.dumps(cleaned_data, indent=4) @@ -528,6 +565,7 @@ class PhotoshopServerStub: d.get('type'), d.get('members'), d.get('long_name'), - d.get("color_code") + d.get("color_code"), + d.get("instance_id") )) return ret diff --git a/openpype/hosts/photoshop/plugins/create/create_image.py b/openpype/hosts/photoshop/plugins/create/create_image.py index a001b5f171..3d82d6b6f0 100644 --- a/openpype/hosts/photoshop/plugins/create/create_image.py +++ b/openpype/hosts/photoshop/plugins/create/create_image.py @@ -1,99 +1,213 @@ -from Qt import QtWidgets -from openpype.pipeline import create -from openpype.hosts.photoshop import api as photoshop +import re + +from openpype.hosts.photoshop import api +from openpype.lib import BoolDef +from openpype.pipeline import ( + Creator, + CreatedInstance, + CreatorError +) +from openpype.lib import prepare_template_data +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances -class CreateImage(create.LegacyCreator): - """Image folder for publish.""" +class ImageCreator(Creator): + """Creates image instance for publishing. - name = "imageDefault" + Result of 'image' instance is image of all visible layers, or image(s) of + selected layers. + """ + identifier = "image" label = "Image" family = "image" - defaults = ["Main"] + description = "Image creator" - def process(self): - groups = [] - layers = [] - create_group = False + def create(self, subset_name_from_ui, data, pre_create_data): + groups_to_create = [] + top_layers_to_wrap = [] + create_empty_group = False - stub = photoshop.stub() - if (self.options or {}).get("useSelection"): - multiple_instances = False - selection = stub.get_selected_layers() - self.log.info("selection {}".format(selection)) - if len(selection) > 1: - # Ask user whether to create one image or image per selected - # item. - msg_box = QtWidgets.QMessageBox() - msg_box.setIcon(QtWidgets.QMessageBox.Warning) - msg_box.setText( - "Multiple layers selected." - "\nDo you want to make one image per layer?" - ) - msg_box.setStandardButtons( - QtWidgets.QMessageBox.Yes | - QtWidgets.QMessageBox.No | - QtWidgets.QMessageBox.Cancel - ) - ret = msg_box.exec_() - if ret == QtWidgets.QMessageBox.Yes: - multiple_instances = True - elif ret == QtWidgets.QMessageBox.Cancel: - return - - if multiple_instances: - for item in selection: - if item.group: - groups.append(item) - else: - layers.append(item) - else: - group = stub.group_selected_layers(self.name) - groups.append(group) - - elif len(selection) == 1: - # One selected item. Use group if its a LayerSet (group), else - # create a new group. - if selection[0].group: - groups.append(selection[0]) - else: - layers.append(selection[0]) - elif len(selection) == 0: - # No selection creates an empty group. - create_group = True + stub = api.stub() # only after PS is up + top_level_selected_items = stub.get_selected_layers() + if pre_create_data.get("use_selection"): + only_single_item_selected = len(top_level_selected_items) == 1 + if ( + only_single_item_selected or + pre_create_data.get("create_multiple")): + for selected_item in top_level_selected_items: + if selected_item.group: + groups_to_create.append(selected_item) + else: + top_layers_to_wrap.append(selected_item) + else: + group = stub.group_selected_layers(subset_name_from_ui) + groups_to_create.append(group) else: - group = stub.create_group(self.name) - groups.append(group) + stub.select_layers(stub.get_layers()) + try: + group = stub.group_selected_layers(subset_name_from_ui) + except: + raise CreatorError("Cannot group locked Background layer!") + groups_to_create.append(group) - if create_group: - group = stub.create_group(self.name) - groups.append(group) + # create empty group if nothing selected + if not groups_to_create and not top_layers_to_wrap: + group = stub.create_group(subset_name_from_ui) + groups_to_create.append(group) - for layer in layers: + # wrap each top level layer into separate new group + for layer in top_layers_to_wrap: stub.select_layers([layer]) group = stub.group_selected_layers(layer.name) - groups.append(group) + groups_to_create.append(group) - creator_subset_name = self.data["subset"] - for group in groups: - long_names = [] - group.name = group.name.replace(stub.PUBLISH_ICON, ''). \ - replace(stub.LOADED_ICON, '') + layer_name = '' + # use artist chosen option OR force layer if more subsets are created + # to differentiate them + use_layer_name = (pre_create_data.get("use_layer_name") or + len(groups_to_create) > 1) + for group in groups_to_create: + subset_name = subset_name_from_ui # reset to name from creator UI + layer_names_in_hierarchy = [] + created_group_name = self._clean_highlights(stub, group.name) - subset_name = creator_subset_name - if len(groups) > 1: - subset_name += group.name.title().replace(" ", "") + if use_layer_name: + layer_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + group.name + ) + if "{layer}" not in subset_name.lower(): + subset_name += "{Layer}" + + layer_fill = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill) if group.long_name: for directory in group.long_name[::-1]: - name = directory.replace(stub.PUBLISH_ICON, '').\ - replace(stub.LOADED_ICON, '') - long_names.append(name) + name = self._clean_highlights(stub, directory) + layer_names_in_hierarchy.append(name) - self.data.update({"subset": subset_name}) - self.data.update({"uuid": str(group.id)}) - self.data.update({"long_name": "_".join(long_names)}) - stub.imprint(group, self.data) + data.update({"subset": subset_name}) + data.update({"members": [str(group.id)]}) + data.update({"layer_name": layer_name}) + data.update({"long_name": "_".join(layer_names_in_hierarchy)}) + + new_instance = CreatedInstance(self.family, subset_name, data, + self) + + stub.imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) + self._add_instance_to_context(new_instance) # reusing existing group, need to rename afterwards - if not create_group: - stub.rename_layer(group.id, stub.PUBLISH_ICON + group.name) + if not create_empty_group: + stub.rename_layer(group.id, + stub.PUBLISH_ICON + created_group_name) + + def collect_instances(self): + for instance_data in cache_and_get_instances(self): + # legacy instances have family=='image' + creator_id = (instance_data.get("creator_identifier") or + instance_data.get("family")) + + if creator_id == self.identifier: + instance_data = self._handle_legacy(instance_data) + layer = api.stub().get_layer(instance_data["members"][0]) + instance_data["layer"] = layer + instance = CreatedInstance.from_existing( + instance_data, self + ) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + self.log.debug("update_list:: {}".format(update_list)) + for created_inst, _changes in update_list: + if created_inst.get("layer"): + # not storing PSItem layer to metadata + created_inst.pop("layer") + api.stub().imprint(created_inst.get("instance_id"), + created_inst.data_to_store()) + + def remove_instances(self, instances): + for instance in instances: + self.host.remove_instance(instance) + self._remove_instance_from_context(instance) + + def get_default_variants(self): + return [ + "Main" + ] + + def get_pre_create_attr_defs(self): + output = [ + BoolDef("use_selection", default=True, + label="Create only for selected"), + BoolDef("create_multiple", + default=True, + label="Create separate instance for each selected"), + BoolDef("use_layer_name", + default=False, + label="Use layer name in subset") + ] + return output + + def get_detail_description(self): + return """Creator for Image instances + + Main publishable item in Photoshop will be of `image` family. Result of + this item (instance) is picture that could be loaded and used + in another DCCs (for example as single layer in composition in + AfterEffects, reference in Maya etc). + + There are couple of options what to publish: + - separate image per selected layer (or group of layers) + - one image for all selected layers + - all visible layers (groups) flattened into single image + + In most cases you would like to keep `Create only for selected` + toggled on and select what you would like to publish. + Toggling this option off will allow you to create instance for all + visible layers without a need to select them explicitly. + + Use 'Create separate instance for each selected' to create separate + images per selected layer (group of layers). + + 'Use layer name in subset' will explicitly add layer name into subset + name. Position of this name is configurable in + `project_settings/global/tools/creator/subset_name_profiles`. + If layer placeholder ({layer}) is not used in `subset_name_profiles` + but layer name should be used (set explicitly in UI or implicitly if + multiple images should be created), it is added in capitalized form + as a suffix to subset name. + """ + + def _handle_legacy(self, instance_data): + """Converts old instances to new format.""" + if not instance_data.get("members"): + instance_data["members"] = [instance_data.get("uuid")] + + if instance_data.get("uuid"): + # uuid not needed, replaced with unique instance_id + api.stub().remove_instance(instance_data.get("uuid")) + instance_data.pop("uuid") + + if not instance_data.get("task"): + instance_data["task"] = self.create_context.get_current_task_name() + + if not instance_data.get("variant"): + instance_data["variant"] = '' + + return instance_data + + def _clean_highlights(self, stub, item): + return item.replace(stub.PUBLISH_ICON, '').replace(stub.LOADED_ICON, + '') + + def get_dynamic_data(self, variant, task_name, asset_doc, + project_name, host_name, instance): + if instance is not None: + layer_name = instance.get("layer_name") + if layer_name: + return {"layer": layer_name} + return {"layer": "{layer}"} diff --git a/openpype/hosts/testhost/plugins/create/auto_creator.py b/openpype/hosts/photoshop/plugins/create/workfile_creator.py similarity index 54% rename from openpype/hosts/testhost/plugins/create/auto_creator.py rename to openpype/hosts/photoshop/plugins/create/workfile_creator.py index d5935602a0..f5d56adcbc 100644 --- a/openpype/hosts/testhost/plugins/create/auto_creator.py +++ b/openpype/hosts/photoshop/plugins/create/workfile_creator.py @@ -1,24 +1,23 @@ -from avalon import io -from openpype.lib import NumberDef -from openpype.hosts.testhost.api import pipeline +import openpype.hosts.photoshop.api as api +from openpype.client import get_asset_by_name from openpype.pipeline import ( AutoCreator, - CreatedInstance, + CreatedInstance ) +from openpype.hosts.photoshop.api.pipeline import cache_and_get_instances -class MyAutoCreator(AutoCreator): +class PSWorkfileCreator(AutoCreator): identifier = "workfile" family = "workfile" + default_variant = "Main" + def get_instance_attr_defs(self): - output = [ - NumberDef("number_key", label="Number") - ] - return output + return [] def collect_instances(self): - for instance_data in pipeline.list_instances(): + for instance_data in cache_and_get_instances(self): creator_id = instance_data.get("creator_identifier") if creator_id == self.identifier: subset_name = instance_data["subset"] @@ -28,7 +27,8 @@ class MyAutoCreator(AutoCreator): self._add_instance_to_context(instance) def update_instances(self, update_list): - pipeline.update_instances(update_list) + # nothing to change on workfiles + pass def create(self, options=None): existing_instance = None @@ -37,38 +37,43 @@ class MyAutoCreator(AutoCreator): existing_instance = instance break - variant = "Main" - project_name = io.Session["AVALON_PROJECT"] - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] - host_name = io.Session["AVALON_APP"] - + context = self.create_context + project_name = context.get_current_project_name() + asset_name = context.get_current_asset_name() + task_name = context.get_current_task_name() + host_name = context.host_name if existing_instance is None: - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name ) data = { "asset": asset_name, "task": task_name, - "variant": variant + "variant": self.default_variant } data.update(self.get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name, None )) new_instance = CreatedInstance( self.family, subset_name, data, self ) self._add_instance_to_context(new_instance) + api.stub().imprint(new_instance.get("instance_id"), + new_instance.data_to_store()) elif ( existing_instance["asset"] != asset_name or existing_instance["task"] != task_name ): - asset_doc = io.find_one({"type": "asset", "name": asset_name}) + asset_doc = get_asset_by_name(project_name, asset_name) subset_name = self.get_subset_name( - variant, task_name, asset_doc, project_name, host_name + self.default_variant, task_name, asset_doc, + project_name, host_name ) existing_instance["asset"] = asset_name existing_instance["task"] = task_name + existing_instance["subset"] = subset_name diff --git a/openpype/hosts/photoshop/plugins/load/load_image.py b/openpype/hosts/photoshop/plugins/load/load_image.py index 0a9421b8f2..91a9787781 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image.py +++ b/openpype/hosts/photoshop/plugins/load/load_image.py @@ -61,7 +61,7 @@ class ImageLoader(photoshop.PhotoshopLoader): ) stub.imprint( - layer, {"representation": str(representation["_id"])} + layer.id, {"representation": str(representation["_id"])} ) def remove(self, container): @@ -73,7 +73,7 @@ class ImageLoader(photoshop.PhotoshopLoader): stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_layer(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py index 5f39121ae1..c25c5a8f2c 100644 --- a/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py +++ b/openpype/hosts/photoshop/plugins/load/load_image_from_sequence.py @@ -2,7 +2,6 @@ import os import qargparse -from openpype.pipeline import get_representation_path_from_context from openpype.hosts.photoshop import api as photoshop from openpype.hosts.photoshop.api import get_unique_layer_name @@ -63,7 +62,7 @@ class ImageFromSequenceLoader(photoshop.PhotoshopLoader): """ files = [] for context in repre_contexts: - fname = get_representation_path_from_context(context) + fname = cls.filepath_from_context(context) _, file_extension = os.path.splitext(fname) for file_name in os.listdir(os.path.dirname(fname)): diff --git a/openpype/hosts/photoshop/plugins/load/load_reference.py b/openpype/hosts/photoshop/plugins/load/load_reference.py index f5f0545d39..1f32a5d23c 100644 --- a/openpype/hosts/photoshop/plugins/load/load_reference.py +++ b/openpype/hosts/photoshop/plugins/load/load_reference.py @@ -61,7 +61,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): ) stub.imprint( - layer, {"representation": str(representation["_id"])} + layer.id, {"representation": str(representation["_id"])} ) def remove(self, container): @@ -72,7 +72,7 @@ class ReferenceLoader(photoshop.PhotoshopLoader): """ stub = self.get_stub() layer = container.pop("layer") - stub.imprint(layer, {}) + stub.imprint(layer.id, {}) stub.delete_layer(layer.id) def switch(self, container, representation): diff --git a/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py new file mode 100644 index 0000000000..a5fea7ac7d --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_batch_data.py @@ -0,0 +1,78 @@ +"""Parses batch context from json and continues in publish process. + +Provides: + context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant + +Code is practically copy of `openype/hosts/webpublish/collect_batch_data` as +webpublisher should be eventually ejected as an addon, eg. mentioned plugin +shouldn't be pushed into general publish plugins. +""" + +import os + +import pyblish.api + +from openpype.pipeline import legacy_io +from openpype_modules.webpublisher.lib import ( + get_batch_asset_task_info, + parse_json +) +from openpype.tests.lib import is_in_tests + + +class CollectBatchData(pyblish.api.ContextPlugin): + """Collect batch data from json stored in 'OPENPYPE_PUBLISH_DATA' env dir. + + The directory must contain 'manifest.json' file where batch data should be + stored. + """ + # must be really early, context values are only in json file + order = pyblish.api.CollectorOrder - 0.495 + label = "Collect batch data" + hosts = ["photoshop"] + targets = ["remotepublish"] + + def process(self, context): + self.log.info("CollectBatchData") + batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") + if is_in_tests(): + self.log.debug("Automatic testing, no batch data, skipping") + return + + assert batch_dir, ( + "Missing `OPENPYPE_PUBLISH_DATA`") + + assert os.path.exists(batch_dir), \ + "Folder {} doesn't exist".format(batch_dir) + + project_name = os.environ.get("AVALON_PROJECT") + if project_name is None: + raise AssertionError( + "Environment `AVALON_PROJECT` was not found." + "Could not set project `root` which may cause issues." + ) + + batch_data = parse_json(os.path.join(batch_dir, "manifest.json")) + + context.data["batchDir"] = batch_dir + context.data["batchData"] = batch_data + + asset_name, task_name, task_type = get_batch_asset_task_info( + batch_data["context"] + ) + + os.environ["AVALON_ASSET"] = asset_name + os.environ["AVALON_TASK"] = task_name + legacy_io.Session["AVALON_ASSET"] = asset_name + legacy_io.Session["AVALON_TASK"] = task_name + + context.data["asset"] = asset_name + context.data["task"] = task_name + context.data["taskType"] = task_type + context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py index 7d44d55a80..90fca8398f 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_color_coded_instances.py @@ -4,19 +4,28 @@ import re import pyblish.api from openpype.lib import prepare_template_data -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info from openpype.hosts.photoshop import api as photoshop +from openpype.settings import get_project_settings +from openpype.tests.lib import is_in_tests class CollectColorCodedInstances(pyblish.api.ContextPlugin): - """Creates instances for configured color code of a layer. + """Creates instances for layers marked by configurable color. Used in remote publishing when artists marks publishable layers by color- - coding. + coding. Top level layers (group) must be marked by specific color to be + published as an instance of 'image' family. Can add group for all publishable layers to allow creation of flattened image. (Cannot contain special background layer as it cannot be grouped!) + Based on value `create_flatten_image` from Settings: + - "yes": create flattened 'image' subset of all publishable layers + create + 'image' subset per publishable layer + - "only": create ONLY flattened 'image' subset of all publishable layers + - "no": do not create flattened 'image' subset at all, + only separate subsets per marked layer. + Identifier: id (str): "pyblish.avalon.instance" """ @@ -32,27 +41,36 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): # TODO check if could be set globally, probably doesn't make sense when # flattened template cannot subset_template_name = "" - create_flatten_image = False - # probably not possible to configure this globally + create_flatten_image = "no" flatten_subset_template = "" def process(self, context): self.log.info("CollectColorCodedInstances") batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") - if (os.environ.get("IS_TEST") and + if (is_in_tests() and (not batch_dir or not os.path.exists(batch_dir))): self.log.debug("Automatic testing, no batch data, skipping") return existing_subset_names = self._get_existing_subset_names(context) - asset_name, task_name, variant = self._parse_batch(batch_dir) + # from CollectBatchData + asset_name = context.data["asset"] + task_name = context.data["task"] + variant = context.data["variant"] + project_name = context.data["projectEntity"]["name"] + + naming_conventions = get_project_settings(project_name).get( + "photoshop", {}).get( + "publish", {}).get( + "ValidateNaming", {}) stub = photoshop.stub() layers = stub.get_layers() publishable_layers = [] created_instances = [] + family_from_settings = None for layer in layers: self.log.debug("Layer:: {}".format(layer)) if layer.parents: @@ -71,28 +89,37 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): self.log.debug("!!! Not found family or template, skip") continue + if not family_from_settings: + family_from_settings = resolved_family + fill_pairs = { "variant": variant, "family": resolved_family, "task": task_name, - "layer": layer.name + "layer": layer.clean_name } subset = resolved_subset_template.format( **prepare_template_data(fill_pairs)) + subset = self._clean_subset_name(stub, naming_conventions, + subset, layer) + if subset in existing_subset_names: self.log.info( "Subset {} already created, skipping.".format(subset)) continue - instance = self._create_instance(context, layer, resolved_family, - asset_name, subset, task_name) + if self.create_flatten_image != "flatten_only": + instance = self._create_instance(context, layer, + resolved_family, + asset_name, subset, task_name) + created_instances.append(instance) + existing_subset_names.append(subset) publishable_layers.append(layer) - created_instances.append(instance) - if self.create_flatten_image and publishable_layers: + if self.create_flatten_image != "no" and publishable_layers: self.log.debug("create_flatten_image") if not self.flatten_subset_template: self.log.warning("No template for flatten image") @@ -104,7 +131,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): first_layer = publishable_layers[0] # dummy layer first_layer.name = subset - family = created_instances[0].data["family"] # inherit family + family = family_from_settings # inherit family instance = self._create_instance(context, first_layer, family, asset_name, subset, task_name) @@ -130,25 +157,6 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): return existing_subset_names - def _parse_batch(self, batch_dir): - """Parses asset_name, task_name, variant from batch manifest.""" - task_data = None - if batch_dir and os.path.exists(batch_dir): - task_data = parse_json(os.path.join(batch_dir, - "manifest.json")) - if not task_data: - raise ValueError( - "Cannot parse batch meta in {} folder".format(batch_dir)) - variant = task_data["variant"] - - asset, task_name, task_type = get_batch_asset_task_info( - task_data["context"]) - - if not task_name: - task_name = task_type - - return asset, task_name, variant - def _create_instance(self, context, layer, family, asset, subset, task_name): instance = context.create_instance(layer.name) @@ -158,6 +166,7 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): instance.data["task"] = task_name instance.data["subset"] = subset instance.data["layer"] = layer + instance.data["families"] = [] return instance @@ -203,3 +212,21 @@ class CollectColorCodedInstances(pyblish.api.ContextPlugin): self.log.debug("resolved_subset_template {}".format( resolved_subset_template)) return family, resolved_subset_template + + def _clean_subset_name(self, stub, naming_conventions, subset, layer): + """Cleans invalid characters from subset name and layer name.""" + if re.search(naming_conventions["invalid_chars"], subset): + subset = re.sub( + naming_conventions["invalid_chars"], + naming_conventions["replace_char"], + subset + ) + layer_name = re.sub( + naming_conventions["invalid_chars"], + naming_conventions["replace_char"], + layer.clean_name + ) + layer.name = layer_name + stub.rename_layer(layer.id, layer_name) + + return subset diff --git a/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py b/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py index 64c99b4fc1..dc0678c9af 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_extension_version.py @@ -43,7 +43,7 @@ class CollectExtensionVersion(pyblish.api.ContextPlugin): with open(manifest_url) as fp: content = fp.read() - found = re.findall(r'(ExtensionBundleVersion=")([0-10\.]+)(")', + found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")', content) if found: expected_version = found[0][1] diff --git a/openpype/hosts/photoshop/plugins/publish/collect_instances.py b/openpype/hosts/photoshop/plugins/publish/collect_instances.py index c3e27e9646..5bf12379b1 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_instances.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_instances.py @@ -1,61 +1,116 @@ +import pprint + import pyblish.api +from openpype.settings import get_project_settings from openpype.hosts.photoshop import api as photoshop +from openpype.lib import prepare_template_data +from openpype.pipeline import legacy_io class CollectInstances(pyblish.api.ContextPlugin): """Gather instances by LayerSet and file metadata - This collector takes into account assets that are associated with - an LayerSet and marked with a unique identifier; + Collects publishable instances from file metadata or enhance + already collected by creator (family == "image"). + + If no image instances are explicitly created, it looks if there is value + in `flatten_subset_template` (configurable in Settings), in that case it + produces flatten image with all visible layers. Identifier: id (str): "pyblish.avalon.instance" """ - label = "Instances" + label = "Collect Instances" order = pyblish.api.CollectorOrder hosts = ["photoshop"] families_mapping = { "image": [] } + # configurable in Settings + flatten_subset_template = "" def process(self, context): + instance_by_layer_id = {} + for instance in context: + if ( + instance.data["family"] == "image" and + instance.data.get("members")): + layer_id = str(instance.data["members"][0]) + instance_by_layer_id[layer_id] = instance + stub = photoshop.stub() - layers = stub.get_layers() + layer_items = stub.get_layers() layers_meta = stub.get_layers_metadata() instance_names = [] - for layer in layers: - layer_data = stub.read(layer, layers_meta) + + all_layer_ids = [] + for layer_item in layer_items: + layer_meta_data = stub.read(layer_item, layers_meta) + all_layer_ids.append(layer_item.id) # Skip layers without metadata. - if layer_data is None: + if layer_meta_data is None: continue # Skip containers. - if "container" in layer_data["id"]: + if "container" in layer_meta_data["id"]: continue - # child_layers = [*layer.Layers] - # self.log.debug("child_layers {}".format(child_layers)) - # if not child_layers: - # self.log.info("%s skipped, it was empty." % layer.Name) - # continue + # active might not be in legacy meta + if not layer_meta_data.get("active", True): + continue - instance = context.create_instance(layer_data["subset"]) - instance.data["layer"] = layer - instance.data.update(layer_data) + instance = instance_by_layer_id.get(str(layer_item.id)) + if instance is None: + instance = context.create_instance(layer_meta_data["subset"]) + + instance.data["layer"] = layer_item + instance.data.update(layer_meta_data) instance.data["families"] = self.families_mapping[ - layer_data["family"] + layer_meta_data["family"] ] - instance.data["publish"] = layer.visible - instance_names.append(layer_data["subset"]) + instance.data["publish"] = layer_item.visible + instance_names.append(layer_meta_data["subset"]) # Produce diagnostic message for any graphical # user interface interested in visualising it. self.log.info("Found: \"%s\" " % instance.data["name"]) - self.log.info("instance: {} ".format(instance.data)) + self.log.info("instance: {} ".format( + pprint.pformat(instance.data, indent=4))) if len(instance_names) != len(set(instance_names)): self.log.warning("Duplicate instances found. " + - "Remove unwanted via SubsetManager") + "Remove unwanted via Publisher") + + if len(instance_names) == 0 and self.flatten_subset_template: + project_name = context.data["projectEntity"]["name"] + variants = get_project_settings(project_name).get( + "photoshop", {}).get( + "create", {}).get( + "CreateImage", {}).get( + "defaults", ['']) + family = "image" + task_name = legacy_io.Session["AVALON_TASK"] + asset_name = context.data["assetEntity"]["name"] + + variant = context.data.get("variant") or variants[0] + fill_pairs = { + "variant": variant, + "family": family, + "task": task_name + } + + subset = self.flatten_subset_template.format( + **prepare_template_data(fill_pairs)) + + instance = context.create_instance(subset) + instance.data["family"] = family + instance.data["asset"] = asset_name + instance.data["subset"] = subset + instance.data["ids"] = all_layer_ids + instance.data["families"] = self.families_mapping[family] + instance.data["publish"] = True + + self.log.info("flatten instance: {} ".format(instance.data)) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_published_version.py b/openpype/hosts/photoshop/plugins/publish/collect_published_version.py new file mode 100644 index 0000000000..2502689e4b --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_published_version.py @@ -0,0 +1,55 @@ +"""Collects published version of workfile and increments it. + +For synchronization of published image and workfile version it is required +to store workfile version from workfile file name in context.data["version"]. +In remote publishing this name is unreliable (artist might not follow naming +convention etc.), last published workfile version for particular workfile +subset is used instead. + +This plugin runs only in remote publishing (eg. Webpublisher). + +Requires: + context.data["assetEntity"] + +Provides: + context["version"] - incremented latest published workfile version +""" + +import pyblish.api + +from openpype.client import get_last_version_by_subset_name + + +class CollectPublishedVersion(pyblish.api.ContextPlugin): + """Collects published version of workfile and increments it.""" + + order = pyblish.api.CollectorOrder + 0.190 + label = "Collect published version" + hosts = ["photoshop"] + targets = ["remotepublish"] + + def process(self, context): + workfile_subset_name = None + for instance in context: + if instance.data["family"] == "workfile": + workfile_subset_name = instance.data["subset"] + break + + if not workfile_subset_name: + self.log.warning("No workfile instance found, " + "synchronization of version will not work.") + return + + project_name = context.data["projectName"] + asset_doc = context.data["assetEntity"] + asset_id = asset_doc["_id"] + + version_doc = get_last_version_by_subset_name(project_name, + workfile_subset_name, + asset_id) + version_int = 1 + if version_doc: + version_int += int(version_doc["name"]) + + self.log.debug(f"Setting {version_int} to context.") + context.data["version"] = version_int diff --git a/openpype/hosts/photoshop/plugins/publish/collect_review.py b/openpype/hosts/photoshop/plugins/publish/collect_review.py index 5ab48b76da..7e598a8250 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_review.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_review.py @@ -1,30 +1,52 @@ +""" +Requires: + None + +Provides: + instance -> family ("review") +""" + import os import pyblish.api +from openpype.pipeline.create import get_subset_name + class CollectReview(pyblish.api.ContextPlugin): - """Gather the active document as review instance.""" + """Gather the active document as review instance. + Triggers once even if no 'image' is published as by defaults it creates + flatten image from a workfile. + """ + + label = "Collect Review" label = "Review" - order = pyblish.api.CollectorOrder hosts = ["photoshop"] + order = pyblish.api.CollectorOrder + 0.1 + + publish = True def process(self, context): family = "review" - task = os.getenv("AVALON_TASK", None) - subset = family + task.capitalize() - - file_path = context.data["currentFile"] - base_name = os.path.basename(file_path) + subset = get_subset_name( + family, + context.data.get("variant", ''), + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] + ) instance = context.create_instance(subset) instance.data.update({ "subset": subset, - "label": base_name, - "name": base_name, + "label": subset, + "name": subset, "family": family, - "families": ["ftrack"], + "families": [], "representations": [], - "asset": os.environ["AVALON_ASSET"] + "asset": os.environ["AVALON_ASSET"], + "publish": self.publish }) diff --git a/openpype/hosts/photoshop/plugins/publish/collect_version.py b/openpype/hosts/photoshop/plugins/publish/collect_version.py new file mode 100644 index 0000000000..cda71d8643 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/collect_version.py @@ -0,0 +1,29 @@ +import pyblish.api + + +class CollectVersion(pyblish.api.InstancePlugin): + """Collect version for publishable instances. + + Used to synchronize version from workfile to all publishable instances: + - image (manually created or color coded) + - review + - workfile + + Dev comment: + Explicit collector created to control this from single place and not from + 3 different. + + Workfile set here explicitly as version might to be forced from latest + 1 + because of Webpublisher. + (This plugin must run after CollectPublishedVersion!) + """ + order = pyblish.api.CollectorOrder + 0.200 + label = 'Collect Version' + + hosts = ["photoshop"] + families = ["image", "review", "workfile"] + + def process(self, instance): + workfile_version = instance.context.data["version"] + self.log.debug(f"Applying version {workfile_version}") + instance.data["version"] = workfile_version diff --git a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py index db1ede14d5..9a5aad5569 100644 --- a/openpype/hosts/photoshop/plugins/publish/collect_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/collect_workfile.py @@ -1,6 +1,8 @@ import os import pyblish.api +from openpype.pipeline.create import get_subset_name + class CollectWorkfile(pyblish.api.ContextPlugin): """Collect current script for publish.""" @@ -9,26 +11,47 @@ class CollectWorkfile(pyblish.api.ContextPlugin): label = "Collect Workfile" hosts = ["photoshop"] + default_variant = "Main" + def process(self, context): + existing_instance = None + for instance in context: + if instance.data["family"] == "workfile": + self.log.debug("Workfile instance found, won't create new") + existing_instance = instance + break + family = "workfile" - task = os.getenv("AVALON_TASK", None) - subset = family + task.capitalize() + # context.data["variant"] might come only from collect_batch_data + variant = context.data.get("variant") or self.default_variant + subset = get_subset_name( + family, + variant, + context.data["anatomyData"]["task"]["name"], + context.data["assetEntity"], + context.data["anatomyData"]["project"]["name"], + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] + ) file_path = context.data["currentFile"] staging_dir = os.path.dirname(file_path) base_name = os.path.basename(file_path) # Create instance - instance = context.create_instance(subset) - instance.data.update({ - "subset": subset, - "label": base_name, - "name": base_name, - "family": family, - "families": [], - "representations": [], - "asset": os.environ["AVALON_ASSET"] - }) + if existing_instance is None: + instance = context.create_instance(subset) + instance.data.update({ + "subset": subset, + "label": base_name, + "name": base_name, + "family": family, + "families": [], + "representations": [], + "asset": os.environ["AVALON_ASSET"] + }) + else: + instance = existing_instance # creating representation _, ext = os.path.splitext(file_path) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_image.py b/openpype/hosts/photoshop/plugins/publish/extract_image.py index 04ce77ee34..cdb28c742d 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_image.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_image.py @@ -1,60 +1,99 @@ import os -import openpype.api +import pyblish.api +from openpype.pipeline import publish from openpype.hosts.photoshop import api as photoshop -class ExtractImage(openpype.api.Extractor): - """Produce a flattened image file from instance +class ExtractImage(pyblish.api.ContextPlugin): + """Extract all layers (groups) marked for publish. - This plug-in takes into account only the layers in the group. + Usually publishable instance is created as a wrapper of layer(s). For each + publishable instance so many images as there is 'formats' is created. + + Logic tries to hide/unhide layers minimum times. + + Called once for all publishable instances. """ + order = publish.Extractor.order - 0.48 label = "Extract Image" hosts = ["photoshop"] + families = ["image", "background"] formats = ["png", "jpg"] - def process(self, instance): - - staging_dir = self.staging_dir(instance) - self.log.info("Outputting image to {}".format(staging_dir)) - - # Perform extraction + def process(self, context): stub = photoshop.stub() - files = {} + hidden_layer_ids = set() + + all_layers = stub.get_layers() + for layer in all_layers: + if not layer.visible: + hidden_layer_ids.add(layer.id) + stub.hide_all_others_layers_ids([], layers=all_layers) + with photoshop.maintained_selection(): - self.log.info("Extracting %s" % str(list(instance))) - with photoshop.maintained_visibility(): - layer = instance.data.get("layer") - ids = set([layer.id]) - add_ids = instance.data.pop("ids", None) - if add_ids: - ids.update(set(add_ids)) - extract_ids = set([ll.id for ll in stub. - get_layers_in_layers_ids(ids)]) - stub.hide_all_others_layers_ids(extract_ids) + with photoshop.maintained_visibility(layers=all_layers): + for instance in context: + if instance.data["family"] not in self.families: + continue - file_basename = os.path.splitext( - stub.get_active_document_name() - )[0] - for extension in self.formats: - _filename = "{}.{}".format(file_basename, extension) - files[extension] = _filename + staging_dir = self.staging_dir(instance) + self.log.info("Outputting image to {}".format(staging_dir)) - full_filename = os.path.join(staging_dir, _filename) - stub.saveAs(full_filename, extension, True) - self.log.info(f"Extracted: {extension}") + # Perform extraction + files = {} + ids = set() + layer = instance.data.get("layer") + if layer: + ids.add(layer.id) + add_ids = instance.data.pop("ids", None) + if add_ids: + ids.update(set(add_ids)) + extract_ids = set([ll.id for ll in stub. + get_layers_in_layers_ids(ids, all_layers) + if ll.id not in hidden_layer_ids]) - representations = [] - for extension, filename in files.items(): - representations.append({ - "name": extension, - "ext": extension, - "files": filename, - "stagingDir": staging_dir - }) - instance.data["representations"] = representations - instance.data["stagingDir"] = staging_dir + for extracted_id in extract_ids: + stub.set_visible(extracted_id, True) - self.log.info(f"Extracted {instance} to {staging_dir}") + file_basename = os.path.splitext( + stub.get_active_document_name() + )[0] + for extension in self.formats: + _filename = "{}.{}".format(file_basename, + extension) + files[extension] = _filename + + full_filename = os.path.join(staging_dir, + _filename) + stub.saveAs(full_filename, extension, True) + self.log.info(f"Extracted: {extension}") + + representations = [] + for extension, filename in files.items(): + representations.append({ + "name": extension, + "ext": extension, + "files": filename, + "stagingDir": staging_dir + }) + instance.data["representations"] = representations + instance.data["stagingDir"] = staging_dir + + self.log.info(f"Extracted {instance} to {staging_dir}") + + for extracted_id in extract_ids: + stub.set_visible(extracted_id, False) + + def staging_dir(self, instance): + """Provide a temporary directory in which to store extracted files + + Upon calling this method the staging directory is stored inside + the instance.data['stagingDir'] + """ + + from openpype.pipeline.publish import get_instance_staging_dir + + return get_instance_staging_dir(instance) diff --git a/openpype/hosts/photoshop/plugins/publish/extract_review.py b/openpype/hosts/photoshop/plugins/publish/extract_review.py index b8f4470c7b..01022ce0b2 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_review.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_review.py @@ -1,17 +1,28 @@ import os import shutil +from PIL import Image -import openpype.api -import openpype.lib +from openpype.lib import ( + run_subprocess, + get_ffmpeg_tool_path, +) +from openpype.pipeline import publish from openpype.hosts.photoshop import api as photoshop -class ExtractReview(openpype.api.Extractor): +class ExtractReview(publish.Extractor): """ - Produce a flattened or sequence image file from all 'image' instances. + Produce a flattened or sequence image files from all 'image' instances. If no 'image' instance is created, it produces flattened image from all visible layers. + + It creates review, thumbnail and mov representations. + + 'review' family could be used in other steps as a reference, as it + contains flattened image by default. (Eg. artist could load this + review as a single item and see full image. In most cases 'image' + family is separated by layers to better usage in animation or comp.) """ label = "Extract Review" @@ -22,6 +33,7 @@ class ExtractReview(openpype.api.Extractor): jpg_options = None mov_options = None make_image_sequence = None + max_downscale_size = 8192 def process(self, instance): staging_dir = self.staging_dir(instance) @@ -37,7 +49,7 @@ class ExtractReview(openpype.api.Extractor): if self.make_image_sequence and len(layers) > 1: self.log.info("Extract layers to image sequence.") - img_list = self._saves_sequences_layers(staging_dir, layers) + img_list = self._save_sequence_images(staging_dir, layers) instance.data["representations"].append({ "name": "jpg", @@ -49,117 +61,163 @@ class ExtractReview(openpype.api.Extractor): "stagingDir": staging_dir, "tags": self.jpg_options['tags'], }) - + processed_img_names = img_list else: self.log.info("Extract layers to flatten image.") - img_list = self._saves_flattened_layers(staging_dir, layers) + img_list = self._save_flatten_image(staging_dir, layers) instance.data["representations"].append({ "name": "jpg", "ext": "jpg", - "files": img_list, + "files": img_list, # cannot be [] for single frame "stagingDir": staging_dir, "tags": self.jpg_options['tags'] }) + processed_img_names = [img_list] - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") instance.data["stagingDir"] = staging_dir - # Generate thumbnail. - thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") - self.log.info(f"Generate thumbnail {thumbnail_path}") - args = [ - ffmpeg_path, - "-y", - "-i", os.path.join(staging_dir, self.output_seq_filename), - "-vf", "scale=300:-1", - "-vframes", "1", - thumbnail_path - ] - output = openpype.lib.run_subprocess(args) + source_files_pattern = os.path.join(staging_dir, + self.output_seq_filename) + source_files_pattern = self._check_and_resize(processed_img_names, + source_files_pattern, + staging_dir) + self._generate_thumbnail(ffmpeg_path, instance, source_files_pattern, + staging_dir) - instance.data["representations"].append({ - "name": "thumbnail", - "ext": "jpg", - "files": os.path.basename(thumbnail_path), - "stagingDir": staging_dir, - "tags": ["thumbnail"] - }) + no_of_frames = len(processed_img_names) + if no_of_frames > 1: + self._generate_mov(ffmpeg_path, instance, fps, no_of_frames, + source_files_pattern, staging_dir) + self.log.info(f"Extracted {instance} to {staging_dir}") + + def _generate_mov(self, ffmpeg_path, instance, fps, no_of_frames, + source_files_pattern, staging_dir): + """Generates .mov to upload to Ftrack. + + Args: + ffmpeg_path (str): path to ffmpeg + instance (Pyblish Instance) + fps (str) + no_of_frames (int): + source_files_pattern (str): name of source file + staging_dir (str): temporary location to store thumbnail + Updates: + instance - adds representation portion + """ # Generate mov. mov_path = os.path.join(staging_dir, "review.mov") self.log.info(f"Generate mov review: {mov_path}") - img_number = len(img_list) args = [ ffmpeg_path, "-y", - "-i", os.path.join(staging_dir, self.output_seq_filename), + "-i", source_files_pattern, "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", - "-vframes", str(img_number), + "-vframes", str(no_of_frames), mov_path ] - output = openpype.lib.run_subprocess(args) - self.log.debug(output) + self.log.debug("mov args:: {}".format(args)) + _output = run_subprocess(args) instance.data["representations"].append({ "name": "mov", "ext": "mov", "files": os.path.basename(mov_path), "stagingDir": staging_dir, "frameStart": 1, - "frameEnd": img_number, + "frameEnd": no_of_frames, "fps": fps, "preview": True, "tags": self.mov_options['tags'] }) - # Required for extract_review plugin (L222 onwards). - instance.data["frameStart"] = 1 - instance.data["frameEnd"] = img_number - instance.data["fps"] = 25 + def _generate_thumbnail(self, ffmpeg_path, instance, source_files_pattern, + staging_dir): + """Generates scaled down thumbnail and adds it as representation. - self.log.info(f"Extracted {instance} to {staging_dir}") + Args: + ffmpeg_path (str): path to ffmpeg + instance (Pyblish Instance) + source_files_pattern (str): name of source file + staging_dir (str): temporary location to store thumbnail + Updates: + instance - adds representation portion + """ + # Generate thumbnail + thumbnail_path = os.path.join(staging_dir, "thumbnail.jpg") + self.log.info(f"Generate thumbnail {thumbnail_path}") + args = [ + ffmpeg_path, + "-y", + "-i", source_files_pattern, + "-vf", "scale=300:-1", + "-vframes", "1", + thumbnail_path + ] + self.log.debug("thumbnail args:: {}".format(args)) + _output = run_subprocess(args) + instance.data["representations"].append({ + "name": "thumbnail", + "ext": "jpg", + "outputName": "thumb", + "files": os.path.basename(thumbnail_path), + "stagingDir": staging_dir, + "tags": ["thumbnail", "delete"] + }) - def _get_image_path_from_instances(self, instance): - img_list = [] + def _check_and_resize(self, processed_img_names, source_files_pattern, + staging_dir): + """Check if saved image could be used in ffmpeg. - for instance in sorted(instance.context): - if instance.data["family"] != "image": - continue + Ffmpeg has max size 16384x16384. Saved image(s) must be resized to be + used as a source for thumbnail or review mov. + """ + Image.MAX_IMAGE_PIXELS = None + first_url = os.path.join(staging_dir, processed_img_names[0]) + with Image.open(first_url) as im: + width, height = im.size - for rep in instance.data["representations"]: - img_path = os.path.join( - rep["stagingDir"], - rep["files"] - ) - img_list.append(img_path) + if width > self.max_downscale_size or height > self.max_downscale_size: + resized_dir = os.path.join(staging_dir, "resized") + os.mkdir(resized_dir) + source_files_pattern = os.path.join(resized_dir, + self.output_seq_filename) + for file_name in processed_img_names: + source_url = os.path.join(staging_dir, file_name) + with Image.open(source_url) as res_img: + # 'thumbnail' automatically keeps aspect ratio + res_img.thumbnail((self.max_downscale_size, + self.max_downscale_size), + Image.ANTIALIAS) + res_img.save(os.path.join(resized_dir, file_name)) - return img_list - - def _copy_image_to_staging_dir(self, staging_dir, img_list): - copy_files = [] - for i, img_src in enumerate(img_list): - img_filename = self.output_seq_filename % i - img_dst = os.path.join(staging_dir, img_filename) - - self.log.debug( - "Copying file .. {} -> {}".format(img_src, img_dst) - ) - shutil.copy(img_src, img_dst) - copy_files.append(img_filename) - - return copy_files + return source_files_pattern def _get_layers_from_image_instances(self, instance): + """Collect all layers from 'instance'. + + Returns: + (list) of PSItem + """ layers = [] for image_instance in instance.context: if image_instance.data["family"] != "image": continue + if not image_instance.data.get("layer"): + # dummy instance for flatten image + continue layers.append(image_instance.data.get("layer")) return sorted(layers) - def _saves_flattened_layers(self, staging_dir, layers): + def _save_flatten_image(self, staging_dir, layers): + """Creates flat image from 'layers' into 'staging_dir'. + + Returns: + (str): path to new image + """ img_filename = self.output_seq_filename % 0 output_image_path = os.path.join(staging_dir, img_filename) stub = photoshop.stub() @@ -173,7 +231,13 @@ class ExtractReview(openpype.api.Extractor): return img_filename - def _saves_sequences_layers(self, staging_dir, layers): + def _save_sequence_images(self, staging_dir, layers): + """Creates separate flat images from 'layers' into 'staging_dir'. + + Used as source for multi frames .mov to review at once. + Returns: + (list): paths to new images + """ stub = photoshop.stub() list_img_filename = [] diff --git a/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py b/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py index 03086f389f..aa900fec9f 100644 --- a/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py +++ b/openpype/hosts/photoshop/plugins/publish/extract_save_scene.py @@ -1,11 +1,11 @@ -import openpype.api +from openpype.pipeline import publish from openpype.hosts.photoshop import api as photoshop -class ExtractSaveScene(openpype.api.Extractor): +class ExtractSaveScene(publish.Extractor): """Save scene before extraction.""" - order = openpype.api.Extractor.order - 0.49 + order = publish.Extractor.order - 0.49 label = "Extract Save Scene" hosts = ["photoshop"] families = ["workfile"] diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml new file mode 100644 index 0000000000..e05ac92182 --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_instance_asset.xml @@ -0,0 +1,20 @@ + + + +Asset does not match + +## Collected asset name is not same as in context + + {msg} +### How to repair? + {repair_msg} + Refresh Publish afterwards (circle arrow at the bottom right). + + If that's not correct value, close workfile and reopen via Workfiles to get + proper context asset name OR disable this validator and publish again + if you are publishing to different context deliberately. + + (Context means combination of project, asset name and task name.) + + + \ No newline at end of file diff --git a/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml new file mode 100644 index 0000000000..023bbf26fa --- /dev/null +++ b/openpype/hosts/photoshop/plugins/publish/help/validate_naming.xml @@ -0,0 +1,21 @@ + + + +Subset name + +## Invalid subset or layer name + +Subset or layer name cannot contain specific characters (spaces etc) which could cause issue when subset name is used in a published file name. + {msg} + +### How to repair? + +You can fix this with "repair" button on the right and press Refresh publishing button at the bottom right. + + +### __Detailed Info__ (optional) + +Not all characters are available in a file names on all OS. Wrong characters could be configured in Settings. + + + \ No newline at end of file diff --git a/openpype/hosts/photoshop/plugins/publish/increment_workfile.py b/openpype/hosts/photoshop/plugins/publish/increment_workfile.py index 92132c393b..665dd67fc5 100644 --- a/openpype/hosts/photoshop/plugins/publish/increment_workfile.py +++ b/openpype/hosts/photoshop/plugins/publish/increment_workfile.py @@ -1,6 +1,6 @@ import os import pyblish.api -from openpype.action import get_errored_plugins_from_data +from openpype.pipeline.publish import get_errored_plugins_from_context from openpype.lib import version_up from openpype.hosts.photoshop import api as photoshop @@ -19,7 +19,7 @@ class IncrementWorkfile(pyblish.api.InstancePlugin): optional = True def process(self, instance): - errored_plugins = get_errored_plugins_from_data(instance.context) + errored_plugins = get_errored_plugins_from_context(instance.context) if errored_plugins: raise RuntimeError( "Skipping incrementing current file because publishing failed." diff --git a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py index ebe9cc21ea..b9d721dbdb 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_instance_asset.py @@ -1,6 +1,11 @@ -from avalon import api import pyblish.api -import openpype.api + +from openpype.pipeline import legacy_io +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin +) from openpype.hosts.photoshop import api as photoshop @@ -26,34 +31,42 @@ class ValidateInstanceAssetRepair(pyblish.api.Action): for instance in instances: data = stub.read(instance[0]) - data["asset"] = api.Session["AVALON_ASSET"] + data["asset"] = legacy_io.Session["AVALON_ASSET"] stub.imprint(instance[0], data) -class ValidateInstanceAsset(pyblish.api.InstancePlugin): +class ValidateInstanceAsset(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): """Validate the instance asset is the current selected context asset. - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong asset! + As it might happen that multiple worfiles are opened, switching + between them would mess with selected context. + In that case outputs might be output under wrong asset! - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. + Repair action will use Context asset value (from Workfiles or Launcher) + Closing and reopening with Workfiles will refresh Context value. """ label = "Validate Instance Asset" hosts = ["photoshop"] + optional = True actions = [ValidateInstanceAssetRepair] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, instance): instance_asset = instance.data["asset"] - current_asset = api.Session["AVALON_ASSET"] - msg = ( - f"Instance asset {instance_asset} is not the same " - f"as current context {current_asset}. PLEASE DO:\n" - f"Repair with 'A' action to use '{current_asset}'.\n" - f"If that's not correct value, close workfile and " - f"reopen via Workfiles!" - ) - assert instance_asset == current_asset, msg + current_asset = legacy_io.Session["AVALON_ASSET"] + + if instance_asset != current_asset: + msg = ( + f"Instance asset {instance_asset} is not the same " + f"as current context {current_asset}." + + ) + repair_msg = ( + f"Repair with 'Repair' button to use '{current_asset}'.\n" + ) + formatting_data = {"msg": msg, + "repair_msg": repair_msg} + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/hosts/photoshop/plugins/publish/validate_naming.py b/openpype/hosts/photoshop/plugins/publish/validate_naming.py index b40e44d016..07810f505e 100644 --- a/openpype/hosts/photoshop/plugins/publish/validate_naming.py +++ b/openpype/hosts/photoshop/plugins/publish/validate_naming.py @@ -1,8 +1,13 @@ import re import pyblish.api -import openpype.api + from openpype.hosts.photoshop import api as photoshop +from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateNamingRepair(pyblish.api.Action): @@ -22,32 +27,42 @@ class ValidateNamingRepair(pyblish.api.Action): failed.append(result["instance"]) invalid_chars, replace_char = plugin.get_replace_chars() - self.log.info("{} --- {}".format(invalid_chars, replace_char)) + self.log.debug("{} --- {}".format(invalid_chars, replace_char)) # Apply pyblish.logic to get the instances for the plug-in instances = pyblish.api.instances_by_plugin(failed, plugin) stub = photoshop.stub() for instance in instances: - self.log.info("validate_naming instance {}".format(instance)) - metadata = stub.read(instance[0]) - self.log.info("metadata instance {}".format(metadata)) - layer_name = None - if metadata.get("uuid"): - layer_data = stub.get_layer(metadata["uuid"]) - self.log.info("layer_data {}".format(layer_data)) - if layer_data: - layer_name = re.sub(invalid_chars, - replace_char, - layer_data.name) + self.log.debug("validate_naming instance {}".format(instance)) + current_layer_state = stub.get_layer(instance.data["layer"].id) + self.log.debug("current_layer{}".format(current_layer_state)) - stub.rename_layer(instance.data["uuid"], layer_name) + layer_meta = stub.read(current_layer_state) + instance_id = (layer_meta.get("instance_id") or + layer_meta.get("uuid")) + if not instance_id: + self.log.warning("Unable to repair, cannot find layer") + continue + + layer_name = re.sub(invalid_chars, + replace_char, + current_layer_state.clean_name) + layer_name = stub.PUBLISH_ICON + layer_name + + stub.rename_layer(current_layer_state.id, layer_name) subset_name = re.sub(invalid_chars, replace_char, - instance.data["name"]) + instance.data["subset"]) - instance[0].Name = layer_name or subset_name - metadata["subset"] = subset_name - stub.imprint(instance[0], metadata) + # format from Tool Creator + subset_name = re.sub( + "[^{}]+".format(SUBSET_NAME_ALLOWED_SYMBOLS), + "", + subset_name + ) + + layer_meta["subset"] = subset_name + stub.imprint(instance_id, layer_meta) return True @@ -60,7 +75,7 @@ class ValidateNaming(pyblish.api.InstancePlugin): label = "Validate Naming" hosts = ["photoshop"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["image"] actions = [ValidateNamingRepair] @@ -69,14 +84,25 @@ class ValidateNaming(pyblish.api.InstancePlugin): replace_char = '' def process(self, instance): - help_msg = ' Use Repair action (A) in Pyblish to fix it.' - msg = "Name \"{}\" is not allowed.{}".format(instance.data["name"], - help_msg) - assert not re.search(self.invalid_chars, instance.data["name"]), msg + help_msg = ' Use Repair button to fix it and then refresh publish.' + + layer = instance.data.get("layer") + if layer: + msg = "Name \"{}\" is not allowed.{}".format(layer.clean_name, + help_msg) + + formatting_data = {"msg": msg} + if re.search(self.invalid_chars, layer.clean_name): + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data + ) msg = "Subset \"{}\" is not allowed.{}".format(instance.data["subset"], help_msg) - assert not re.search(self.invalid_chars, instance.data["subset"]), msg + formatting_data = {"msg": msg} + if re.search(self.invalid_chars, instance.data["subset"]): + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) @classmethod def get_replace_chars(cls): diff --git a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py b/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py deleted file mode 100644 index 40abfb1bbd..0000000000 --- a/openpype/hosts/photoshop/plugins/publish/validate_unique_subsets.py +++ /dev/null @@ -1,30 +0,0 @@ -import collections -import pyblish.api -import openpype.api - - -class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): - """ - Validate that all subset's names are unique. - """ - - label = "Validate Subset Uniqueness" - hosts = ["photoshop"] - order = openpype.api.ValidateContentsOrder - families = ["image"] - - def process(self, context): - subset_names = [] - - for instance in context: - self.log.info("instance:: {}".format(instance.data)) - if instance.data.get('publish'): - subset_names.append(instance.data.get('subset')) - - non_unique = \ - [item - for item, count in collections.Counter(subset_names).items() - if count > 1] - msg = ("Instance subset names {} are not unique. ".format(non_unique) + - "Remove duplicates via SubsetManager.") - assert not non_unique, msg diff --git a/openpype/hosts/resolve/README.markdown b/openpype/hosts/resolve/README.markdown index 8c9f72fb0c..a8bb071e7e 100644 --- a/openpype/hosts/resolve/README.markdown +++ b/openpype/hosts/resolve/README.markdown @@ -1,22 +1,24 @@ -#### Basic setup +## Basic setup -- Install [latest DaVinci Resolve](https://sw.blackmagicdesign.com/DaVinciResolve/v16.2.8/DaVinci_Resolve_Studio_16.2.8_Windows.zip?Key-Pair-Id=APKAJTKA3ZJMJRQITVEA&Signature=EcFuwQFKHZIBu2zDj5LTCQaQDXcKOjhZY7Fs07WGw24xdDqfwuALOyKu+EVzDX2Tik0cWDunYyV0r7hzp+mHmczp9XP4YaQXHdyhD/2BGWDgiMsiTQbNkBgbfy5MsAMFY8FHCl724Rxm8ke1foWeUVyt/Cdkil+ay+9sL72yFhaSV16sncko1jCIlCZeMkHhbzqPwyRuqLGmxmp8ey9KgBhI3wGFFPN201VMaV+RHrpX+KAfaR6p6dwo3FrPbRHK9TvMI1RA/1lJ3fVtrkDW69LImIKAWmIxgcStUxR9/taqLOD66FNiflHd1tufHv3FBa9iYQsjb3VLMPx7OCwLyg==&Expires=1608308139) -- add absolute path to ffmpeg into openpype settings - ![image](https://user-images.githubusercontent.com/40640033/102630786-43294f00-414d-11eb-98de-f0ae51f62077.png) -- install Python 3.6 into `%LOCALAPPDATA%/Programs/Python/Python36` (only respected path by Resolve) -- install OpenTimelineIO for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `%LOCALAPPDATA%/Programs/Python/Python36/Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and +- Actually supported version is up to v18 +- install Python 3.6.2 (latest tested v17) or up to 3.9.13 (latest tested on v18) +- pip install PySide2: + - Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install PySide2` +- pip install OpenTimelineIO: + - Python 3.9.*: open terminal and go to python.exe directory, then `python -m pip install OpenTimelineIO` + - Python 3.6: open terminal and go to python.exe directory, then `python -m pip install git+https://github.com/PixarAnimationStudios/OpenTimelineIO.git@5aa24fbe89d615448876948fe4b4900455c9a3e8` and move built files from `./Lib/site-packages/opentimelineio/cxx-libs/bin and lib` to `./Lib/site-packages/opentimelineio/`. I was building it on Win10 machine with Visual Studio Community 2019 and ![image](https://user-images.githubusercontent.com/40640033/102792588-ffcb1c80-43a8-11eb-9c6b-bf2114ed578e.png) with installed CMake in PATH. -- install PySide2 for 3.6 `%LOCALAPPDATA%\Programs\Python\Python36\python.exe -m pip install PySide2` - make sure Resolve Fusion (Fusion Tab/menu/Fusion/Fusion Settings) is set to Python 3.6 ![image](https://user-images.githubusercontent.com/40640033/102631545-280b0f00-414e-11eb-89fc-98ac268d209d.png) +- Open OpenPype **Tray/Admin/Studio settings** > `applications/resolve/environment` and add Python3 path to `RESOLVE_PYTHON3_HOME` platform related. -#### Editorial setup +## Editorial setup This is how it looks on my testing project timeline ![image](https://user-images.githubusercontent.com/40640033/102637638-96ec6600-4156-11eb-9656-6e8e3ce4baf8.png) Notice I had renamed tracks to `main` (holding metadata markers) and `review` used for generating review data with ffmpeg confersion to jpg sequence. -1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/**__OpenPype_Menu__** +1. you need to start OpenPype menu from Resolve/EditTab/Menu/Workspace/Scripts/Comp/**__OpenPype_Menu__** 2. then select any clips in `main` track and change their color to `Chocolate` 3. in OpenPype Menu select `Create` 4. in Creator select `Create Publishable Clip [New]` (temporary name) diff --git a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_down.txt b/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_down.txt deleted file mode 100644 index 139b66bc24..0000000000 --- a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_down.txt +++ /dev/null @@ -1,189 +0,0 @@ -Updated as of 08 March 2019 - --------------------------- -In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import modules for scripting access (DaVinciResolve.py) and some representative examples. - -Overview --------- - -As with Blackmagic Design Fusion scripts, user scripts written in Lua and Python programming languages are supported. By default, scripts can be invoked from the Console window in the Fusion page, or via command line. This permission can be changed in Resolve Preferences, to be only from Console, or to be invoked from the local network. Please be aware of the security implications when allowing scripting access from outside of the Resolve application. - - -Using a script --------------- -DaVinci Resolve needs to be running for a script to be invoked. - -For a Resolve script to be executed from an external folder, the script needs to know of the API location. -You may need to set the these environment variables to allow for your Python installation to pick up the appropriate dependencies as shown below: - - Mac OS X: - RESOLVE_SCRIPT_API="/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting/" - RESOLVE_SCRIPT_LIB="/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so" - PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/" - - Windows: - RESOLVE_SCRIPT_API="%PROGRAMDATA%\\Blackmagic Design\\DaVinci Resolve\\Support\\Developer\\Scripting\\" - RESOLVE_SCRIPT_LIB="C:\\Program Files\\Blackmagic Design\\DaVinci Resolve\\fusionscript.dll" - PYTHONPATH="%PYTHONPATH%;%RESOLVE_SCRIPT_API%\\Modules\\" - - Linux: - RESOLVE_SCRIPT_API="/opt/resolve/Developer/Scripting/" - RESOLVE_SCRIPT_LIB="/opt/resolve/libs/Fusion/fusionscript.so" - PYTHONPATH="$PYTHONPATH:$RESOLVE_SCRIPT_API/Modules/" - (Note: For standard ISO Linux installations, the path above may need to be modified to refer to /home/resolve instead of /opt/resolve) - -As with Fusion scripts, Resolve scripts can also be invoked via the menu and the Console. - -On startup, DaVinci Resolve scans the Utility Scripts directory and enumerates the scripts found in the Script application menu. Placing your script in this folder and invoking it from this menu is the easiest way to use scripts. The Utility Scripts folder is located in: - Mac OS X: /Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp/ - Windows: %APPDATA%\Blackmagic Design\DaVinci Resolve\Fusion\Scripts\Comp\ - Linux: /opt/resolve/Fusion/Scripts/Comp/ (or /home/resolve/Fusion/Scripts/Comp/ depending on installation) - -The interactive Console window allows for an easy way to execute simple scripting commands, to query or modify properties, and to test scripts. The console accepts commands in Python 2.7, Python 3.6 and Lua and evaluates and executes them immediately. For more information on how to use the Console, please refer to the DaVinci Resolve User Manual. - -This example Python script creates a simple project: - #!/usr/bin/env python - import DaVinciResolveScript as dvr_script - resolve = dvr_script.scriptapp("Resolve") - fusion = resolve.Fusion() - projectManager = resolve.GetProjectManager() - projectManager.CreateProject("Hello World") - -The resolve object is the fundamental starting point for scripting via Resolve. As a native object, it can be inspected for further scriptable properties - using table iteration and `getmetatable` in Lua and dir, help etc in Python (among other methods). A notable scriptable object above is fusion - it allows access to all existing Fusion scripting functionality. - -Running DaVinci Resolve in headless mode ----------------------------------------- - -DaVinci Resolve can be launched in a headless mode without the user interface using the -nogui command line option. When DaVinci Resolve is launched using this option, the user interface is disabled. However, the various scripting APIs will continue to work as expected. - -Basic Resolve API ------------------ - -Some commonly used API functions are described below (*). As with the resolve object, each object is inspectable for properties and functions. - - -Resolve - Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts. - GetMediaStorage() --> MediaStorage # Returns media storage object to query and act on media locations. - GetProjectManager() --> ProjectManager # Returns project manager object for currently open database. - OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "edit", "fusion", "color", "fairlight", "deliver"). -ProjectManager - CreateProject(projectName) --> Project # Creates and returns a project if projectName (text) is unique, and None if it is not. - LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (text) if there is a match found, and None if there is no matching Project. - GetCurrentProject() --> Project # Returns the currently loaded Resolve project. - SaveProject() --> Bool # Saves the currently loaded project with its own name. Returns True if successful. - CreateFolder(folderName) --> Bool # Creates a folder if folderName (text) is unique. - GetProjectsInCurrentFolder() --> [project names...] # Returns an array of project names in current folder. - GetFoldersInCurrentFolder() --> [folder names...] # Returns an array of folder names in current folder. - GotoRootFolder() --> Bool # Opens root folder in database. - GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent. - OpenFolder(folderName) --> Bool # Opens folder under given name. - ImportProject(filePath) --> Bool # Imports a project under given file path. Returns true in case of success. - ExportProject(projectName, filePath) --> Bool # Exports a project based on given name into provided file path. Returns true in case of success. - RestoreProject(filePath) --> Bool # Restores a project under given backup file path. Returns true in case of success. -Project - GetMediaPool() --> MediaPool # Returns the Media Pool object. - GetTimelineCount() --> int # Returns the number of timelines currently present in the project. - GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount() - GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline. - SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful. - GetName() --> string # Returns project name. - SetName(projectName) --> Bool # Sets project name if given projectname (text) is unique. - GetPresets() --> [presets...] # Returns a table of presets and their information. - SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project. - GetRenderJobs() --> [render jobs...] # Returns a table of render jobs and their information. - GetRenderPresets() --> [presets...] # Returns a table of render presets and their information. - StartRendering(index1, index2, ...) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs. - StartRendering([idxs...]) --> Bool # Starts rendering for given render jobs based on their indices. If no parameter is given rendering would start for all render jobs. - StopRendering() --> None # Stops rendering for all render jobs. - IsRenderingInProgress() --> Bool # Returns true is rendering is in progress. - AddRenderJob() --> Bool # Adds render job to render queue. - DeleteRenderJobByIndex(idx) --> Bool # Deletes render job based on given job index (int). - DeleteAllRenderJobs() --> Bool # Deletes all render jobs. - LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (text) exists. - SaveAsNewRenderPreset(presetName) --> Bool # Creates a new render preset by given name if presetName(text) is unique. - SetRenderSettings([settings map]) --> Bool # Sets given settings for rendering. Settings map is a map, keys of map are: "SelectAllFrames", "MarkIn", "MarkOut", "TargetDir", "CustomName". - GetRenderJobStatus(idx) --> [status info] # Returns job status and completion rendering percentage of the job by given job index (int). - GetSetting(settingName) --> string # Returns setting value by given settingName (string) if the setting exist. With empty settingName the function returns a full list of settings. - SetSetting(settingName, settingValue) --> Bool # Sets project setting base on given name (string) and value (string). - GetRenderFormats() --> [render formats...]# Returns a list of available render formats. - GetRenderCodecs(renderFormat) --> [render codecs...] # Returns a list of available codecs for given render format (string). - GetCurrentRenderFormatAndCodec() --> [format, codec] # Returns currently selected render format and render codec. - SetCurrentRenderFormatAndCodec(format, codec) --> Bool # Sets given render format (string) and render codec (string) as options for rendering. -MediaStorage - GetMountedVolumes() --> [paths...] # Returns an array of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage. - GetSubFolders(folderPath) --> [paths...] # Returns an array of folder paths in the given absolute folder path. - GetFiles(folderPath) --> [paths...] # Returns an array of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries. - RevealInStorage(path) --> None # Expands and displays a given file/folder path in Resolve’s Media Storage. - AddItemsToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is one or more file/folder paths. - AddItemsToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Store into current Media Pool folder. Input is an array of file/folder paths. -MediaPool - GetRootFolder() --> Folder # Returns the root Folder of Media Pool - AddSubFolder(folder, name) --> Folder # Adds a new subfolder under specified Folder object with the given name. - CreateEmptyTimeline(name) --> Timeline # Adds a new timeline with given name. - AppendToTimeline(clip1, clip2...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - CreateTimelineFromClips(name, clip1, clip2, ...)--> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects. - CreateTimelineFromClips(name, [clips]) --> Timeline # Creates a new timeline with specified name, and appends the specified MediaPoolItem objects. - ImportTimelineFromFile(filePath) --> Timeline # Creates timeline based on parameters within given file. - GetCurrentFolder() --> Folder # Returns currently selected Folder. - SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder. -Folder - GetClips() --> [clips...] # Returns a list of clips (items) within the folder. - GetName() --> string # Returns user-defined name of the folder. - GetSubFolders() --> [folders...] # Returns a list of subfolders in the folder. -MediaPoolItem - GetMetadata(metadataType) --> [[types],[values]] # Returns a value of metadataType. If parameter is not specified returns all set metadata parameters. - SetMetadata(metadataType, metadataValue) --> Bool # Sets metadata by given type and value. Returns True if successful. - GetMediaId() --> string # Returns a unique ID name related to MediaPoolItem. - AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information. - GetMarkers() --> [markers...] # Returns a list of all markers and their information. - AddFlag(color) --> Bool # Adds a flag with given color (text). - GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item. - GetClipColor() --> string # Returns an item color as a string. - GetClipProperty(propertyName) --> [[types],[values]] # Returns property value related to the item based on given propertyName (string). if propertyName is empty then it returns a full list of properties. - SetClipProperty(propertyName, propertyValue) --> Bool # Sets into given propertyName (string) propertyValue (string). -Timeline - GetName() --> string # Returns user-defined name of the timeline. - SetName(timelineName) --> Bool # Sets timeline name is timelineName (text) is unique. - GetStartFrame() --> int # Returns frame number at the start of timeline. - GetEndFrame() --> int # Returns frame number at the end of timeline. - GetTrackCount(trackType) --> int # Returns a number of track based on specified track type ("audio", "video" or "subtitle"). - GetItemsInTrack(trackType, index) --> [items...] # Returns an array of Timeline items on the video or audio track (based on trackType) at specified index. 1 <= index <= GetTrackCount(trackType). - AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information. - GetMarkers() --> [markers...] # Returns a list of all markers and their information. - ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - GetCurrentTimecode() --> string # Returns a string representing a timecode for current position of the timeline, while on Cut, Edit, Color and Deliver page. - GetCurrentVideoItem() --> item # Returns current video timeline item. - GetCurrentClipThumbnailImage() --> [width, height, format, data] # Returns raw thumbnail image data (This image data is encoded in base 64 format and the image format is RGB 8 bit) for the current media in the Color Page in the format of dictionary (in Python) and table (in Lua). Information return are "width", "height", "format" and "data". Example is provided in 6_get_current_media_thumbnail.py in Example folder. -TimelineItem - GetName() --> string # Returns a name of the item. - GetDuration() --> int # Returns a duration of item. - GetEnd() --> int # Returns a position of end frame. - GetFusionCompCount() --> int # Returns the number of Fusion compositions associated with the timeline item. - GetFusionCompByIndex(compIndex) --> fusionComp # Returns Fusion composition object based on given index. 1 <= compIndex <= timelineItem.GetFusionCompCount() - GetFusionCompNames() --> [names...] # Returns a list of Fusion composition names associated with the timeline item. - GetFusionCompByName(compName) --> fusionComp # Returns Fusion composition object based on given name. - GetLeftOffset() --> int # Returns a maximum extension by frame for clip from left side. - GetRightOffset() --> int # Returns a maximum extension by frame for clip from right side. - GetStart() --> int # Returns a position of first frame. - AddMarker(frameId, color, name, note, duration) --> Bool # Creates a new marker at given frameId position and with given marker information. - GetMarkers() --> [markers...] # Returns a list of all markers and their information. - GetFlags() --> [colors...] # Returns a list of flag colors assigned to the item. - GetClipColor() --> string # Returns an item color as a string. - AddFusionComp() --> fusionComp # Adds a new Fusion composition associated with the timeline item. - ImportFusionComp(path) --> fusionComp # Imports Fusion composition from given file path by creating and adding a new composition for the item. - ExportFusionComp(path, compIndex) --> Bool # Exports Fusion composition based on given index into provided file name path. - DeleteFusionCompByName(compName) --> Bool # Deletes Fusion composition by provided name. - LoadFusionCompByName(compName) --> fusionComp # Loads Fusion composition by provided name and sets it as active composition. - RenameFusionCompByName(oldName, newName) --> Bool # Renames Fusion composition by provided name with new given name. - AddVersion(versionName, versionType) --> Bool # Adds a new Version associated with the timeline item. versionType: 0 - local, 1 - remote. - DeleteVersionByName(versionName, versionType) --> Bool # Deletes Version by provided name. versionType: 0 - local, 1 - remote. - LoadVersionByName(versionName, versionType) --> Bool # Loads Version by provided name and sets it as active Version. versionType: 0 - local, 1 - remote. - RenameVersionByName(oldName, newName, versionType)--> Bool # Renames Version by provided name with new given name. versionType: 0 - local, 1 - remote. - GetMediaPoolItem() --> MediaPoolItem # Returns a corresponding to the timeline item media pool item if it exists. - GetVersionNames(versionType) --> [strings...] # Returns a list of version names by provided versionType: 0 - local, 1 - remote. - GetStereoConvergenceValues() --> [offset, value] # Returns a table of keyframe offsets and respective convergence values - GetStereoLeftFloatingWindowParams() --> [offset, value] # For the LEFT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values - GetStereoRightFloatingWindowParams() --> [offset, value] # For the RIGHT eye -> returns a table of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values diff --git a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt b/openpype/hosts/resolve/RESOLVE_API_v18.0.4.txt similarity index 70% rename from openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt rename to openpype/hosts/resolve/RESOLVE_API_v18.0.4.txt index f1b8b81a71..98597a12cb 100644 --- a/openpype/hosts/resolve/RESOLVE_API_README_v16.2.0_up.txt +++ b/openpype/hosts/resolve/RESOLVE_API_v18.0.4.txt @@ -1,5 +1,5 @@ -Updated as of 20 October 2020 ------------------------------ +Updated as of 9 May 2022 +---------------------------- In this package, you will find a brief introduction to the Scripting API for DaVinci Resolve Studio. Apart from this README.txt file, this package contains folders containing the basic import modules for scripting access (DaVinciResolve.py) and some representative examples. @@ -89,12 +89,25 @@ Resolve Fusion() --> Fusion # Returns the Fusion object. Starting point for Fusion scripts. GetMediaStorage() --> MediaStorage # Returns the media storage object to query and act on media locations. GetProjectManager() --> ProjectManager # Returns the project manager object for currently open database. - OpenPage(pageName) --> None # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver"). + OpenPage(pageName) --> Bool # Switches to indicated page in DaVinci Resolve. Input can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver"). + GetCurrentPage() --> String # Returns the page currently displayed in the main window. Returned value can be one of ("media", "cut", "edit", "fusion", "color", "fairlight", "deliver", None). GetProductName() --> string # Returns product name. GetVersion() --> [version fields] # Returns list of product version fields in [major, minor, patch, build, suffix] format. GetVersionString() --> string # Returns product version in "major.minor.patch[suffix].build" format. + LoadLayoutPreset(presetName) --> Bool # Loads UI layout from saved preset named 'presetName'. + UpdateLayoutPreset(presetName) --> Bool # Overwrites preset named 'presetName' with current UI layout. + ExportLayoutPreset(presetName, presetFilePath) --> Bool # Exports preset named 'presetName' to path 'presetFilePath'. + DeleteLayoutPreset(presetName) --> Bool # Deletes preset named 'presetName'. + SaveLayoutPreset(presetName) --> Bool # Saves current UI layout as a preset named 'presetName'. + ImportLayoutPreset(presetFilePath, presetName) --> Bool # Imports preset from path 'presetFilePath'. The optional argument 'presetName' specifies how the preset shall be named. If not specified, the preset is named based on the filename. + Quit() --> None # Quits the Resolve App. ProjectManager + ArchiveProject(projectName, + filePath, + isArchiveSrcMedia=True, + isArchiveRenderCache=True, + isArchiveProxyMedia=False) --> Bool # Archives project to provided file path with the configuration as provided by the optional arguments CreateProject(projectName) --> Project # Creates and returns a project if projectName (string) is unique, and None if it is not. DeleteProject(projectName) --> Bool # Delete project in the current folder if not currently loaded LoadProject(projectName) --> Project # Loads and returns the project with name = projectName (string) if there is a match found, and None if there is no matching Project. @@ -109,9 +122,9 @@ ProjectManager GotoParentFolder() --> Bool # Opens parent folder of current folder in database if current folder has parent. GetCurrentFolder() --> string # Returns the current folder name. OpenFolder(folderName) --> Bool # Opens folder under given name. - ImportProject(filePath) --> Bool # Imports a project from the file path provided. Returns True if successful. + ImportProject(filePath, projectName=None) --> Bool # Imports a project from the file path provided with given project name, if any. Returns True if successful. ExportProject(projectName, filePath, withStillsAndLUTs=True) --> Bool # Exports project to provided file path, including stills and LUTs if withStillsAndLUTs is True (enabled by default). Returns True in case of success. - RestoreProject(filePath) --> Bool # Restores a project from the file path provided. Returns True if successful. + RestoreProject(filePath, projectName=None) --> Bool # Restores a project from the file path provided with given project name, if any. Returns True if successful. GetCurrentDatabase() --> {dbInfo} # Returns a dictionary (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to the current database connection GetDatabaseList() --> [{dbInfo}] # Returns a list of dictionary items (with keys 'DbType', 'DbName' and optional 'IpAddress') corresponding to all the databases added to Resolve SetCurrentDatabase({dbInfo}) --> Bool # Switches current database connection to the database specified by the keys below, and closes any open project. @@ -125,8 +138,9 @@ Project GetTimelineByIndex(idx) --> Timeline # Returns timeline at the given index, 1 <= idx <= project.GetTimelineCount() GetCurrentTimeline() --> Timeline # Returns the currently loaded timeline. SetCurrentTimeline(timeline) --> Bool # Sets given timeline as current timeline for the project. Returns True if successful. + GetGallery() --> Gallery # Returns the Gallery object. GetName() --> string # Returns project name. - SetName(projectName) --> Bool # Sets project name if given projectname (string) is unique. + SetName(projectName) --> Bool # Sets project name if given projectName (string) is unique. GetPresetList() --> [presets...] # Returns a list of presets and their information. SetPreset(presetName) --> Bool # Sets preset by given presetName (string) into project. AddRenderJob() --> string # Adds a render job based on current render settings to the render queue. Returns a unique job id (string) for the new render job. @@ -144,27 +158,7 @@ Project LoadRenderPreset(presetName) --> Bool # Sets a preset as current preset for rendering if presetName (string) exists. SaveAsNewRenderPreset(presetName) --> Bool # Creates new render preset by given name if presetName(string) is unique. SetRenderSettings({settings}) --> Bool # Sets given settings for rendering. Settings is a dict, with support for the keys: - # "SelectAllFrames": Bool - # "MarkIn": int - # "MarkOut": int - # "TargetDir": string - # "CustomName": string - # "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix. - # "ExportVideo": Bool - # "ExportAudio": Bool - # "FormatWidth": int - # "FormatHeight": int - # "FrameRate": float (examples: 23.976, 24) - # "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope") - # "VideoQuality" possible values for current codec (if applicable): - # 0 (int) - will set quality to automatic - # [1 -> MAX] (int) - will set input bit rate - # ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level - # "AudioCodec": string (example: "aac") - # "AudioBitDepth": int - # "AudioSampleRate": int - # "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign") - # "GammaTag" : string (example: "Same as Project", "ACEScct") + # Refer to "Looking up render settings" section for information for supported settings GetRenderJobStatus(jobId) --> {status info} # Returns a dict with job status and completion percentage of the job by given jobId (string). GetSetting(settingName) --> string # Returns value of project setting (indicated by settingName, string). Check the section below for more information. SetSetting(settingName, settingValue) --> Bool # Sets the project setting (indicated by settingName, string) to the value (settingValue, string). Check the section below for more information. @@ -176,12 +170,13 @@ Project SetCurrentRenderMode(renderMode) --> Bool # Sets the render mode. Specify renderMode = 0 for Individual clips, 1 for Single clip. GetRenderResolutions(format, codec) --> [{Resolution}] # Returns list of resolutions applicable for the given render format (string) and render codec (string). Returns full list of resolutions if no argument is provided. Each element in the list is a dictionary with 2 keys "Width" and "Height". RefreshLUTList() --> Bool # Refreshes LUT List + GetUniqueId() --> string # Returns a unique ID for the project item MediaStorage GetMountedVolumeList() --> [paths...] # Returns list of folder paths corresponding to mounted volumes displayed in Resolve’s Media Storage. GetSubFolderList(folderPath) --> [paths...] # Returns list of folder paths in the given absolute folder path. GetFileList(folderPath) --> [paths...] # Returns list of media and file listings in the given absolute folder path. Note that media listings may be logically consolidated entries. - RevealInStorage(path) --> None # Expands and displays given file/folder path in Resolve’s Media Storage. + RevealInStorage(path) --> Bool # Expands and displays given file/folder path in Resolve’s Media Storage. AddItemListToMediaPool(item1, item2, ...) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is one or more file/folder paths. Returns a list of the MediaPoolItems created. AddItemListToMediaPool([items...]) --> [clips...] # Adds specified file/folder paths from Media Storage into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created. AddClipMattesToMediaPool(MediaPoolItem, [paths], stereoEye) --> Bool # Adds specified media files as mattes for the specified MediaPoolItem. StereoEye is an optional argument for specifying which eye to add the matte to for stereo clips ("left" or "right"). Returns True if successful. @@ -190,10 +185,11 @@ MediaStorage MediaPool GetRootFolder() --> Folder # Returns root Folder of Media Pool AddSubFolder(folder, name) --> Folder # Adds new subfolder under specified Folder object with the given name. + RefreshFolders() --> Bool # Updates the folders in collaboration mode CreateEmptyTimeline(name) --> Timeline # Adds new timeline with given name. - AppendToTimeline(clip1, clip2, ...) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - AppendToTimeline([clips]) --> Bool # Appends specified MediaPoolItem objects in the current timeline. Returns True if successful. - AppendToTimeline([{clipInfo}, ...]) --> Bool # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int). + AppendToTimeline(clip1, clip2, ...) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems. + AppendToTimeline([clips]) --> [TimelineItem] # Appends specified MediaPoolItem objects in the current timeline. Returns the list of appended timelineItems. + AppendToTimeline([{clipInfo}, ...]) --> [TimelineItem] # Appends list of clipInfos specified as dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int), (optional) "mediaType" (int; 1 - Video only, 2 - Audio only). Returns the list of appended timelineItems. CreateTimelineFromClips(name, clip1, clip2,...) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects. CreateTimelineFromClips(name, [clips]) --> Timeline # Creates new timeline with specified name, and appends the specified MediaPoolItem objects. CreateTimelineFromClips(name, [{clipInfo}]) --> Timeline # Creates new timeline with specified name, appending the list of clipInfos specified as a dict of "mediaPoolItem", "startFrame" (int), "endFrame" (int). @@ -202,6 +198,8 @@ MediaPool # "importSourceClips": Bool, specifies whether source clips should be imported, True by default # "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "importSourceClips" is True # "sourceClipsFolders": List of Media Pool folder objects to search for source clips if the media is not present in current folder and if "importSourceClips" is False + # "interlaceProcessing": Bool, specifies whether to enable interlace processing on the imported timeline being created. valid only for AAF import + DeleteTimelines([timeline]) --> Bool # Deletes specified timelines in the media pool. GetCurrentFolder() --> Folder # Returns currently selected Folder. SetCurrentFolder(Folder) --> Bool # Sets current folder by given Folder. DeleteClips([clips]) --> Bool # Deletes specified clips or timeline mattes in the media pool @@ -214,19 +212,26 @@ MediaPool RelinkClips([MediaPoolItem], folderPath) --> Bool # Update the folder location of specified media pool clips with the specified folder path. UnlinkClips([MediaPoolItem]) --> Bool # Unlink specified media pool clips. ImportMedia([items...]) --> [MediaPoolItems] # Imports specified file/folder paths into current Media Pool folder. Input is an array of file/folder paths. Returns a list of the MediaPoolItems created. + ImportMedia([{clipInfo}]) --> [MediaPoolItems] # Imports file path(s) into current Media Pool folder as specified in list of clipInfo dict. Returns a list of the MediaPoolItems created. + # Each clipInfo gets imported as one MediaPoolItem unless 'Show Individual Frames' is turned on. + # Example: ImportMedia([{"FilePath":"file_%03d.dpx", "StartIndex":1, "EndIndex":100}]) would import clip "file_[001-100].dpx". ExportMetadata(fileName, [clips]) --> Bool # Exports metadata of specified clips to 'fileName' in CSV format. # If no clips are specified, all clips from media pool will be used. + GetUniqueId() --> string # Returns a unique ID for the media pool Folder GetClipList() --> [clips...] # Returns a list of clips (items) within the folder. GetName() --> string # Returns the media folder name. GetSubFolderList() --> [folders...] # Returns a list of subfolders in the folder. + GetIsFolderStale() --> bool # Returns true if folder is stale in collaboration mode, false otherwise + GetUniqueId() --> string # Returns a unique ID for the media pool folder MediaPoolItem GetName() --> string # Returns the clip name. GetMetadata(metadataType=None) --> string|dict # Returns the metadata value for the key 'metadataType'. # If no argument is specified, a dict of all set metadata properties is returned. SetMetadata(metadataType, metadataValue) --> Bool # Sets the given metadata to metadataValue (string). Returns True if successful. + SetMetadata({metadata}) --> Bool # Sets the item metadata with specified 'metadata' dict. Returns True if successful. GetMediaId() --> string # Returns the unique ID for the MediaPoolItem. AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. customData) @@ -248,15 +253,18 @@ MediaPoolItem GetClipProperty(propertyName=None) --> string|dict # Returns the property value for the key 'propertyName'. # If no argument is specified, a dict of all clip properties is returned. Check the section below for more information. SetClipProperty(propertyName, propertyValue) --> Bool # Sets the given property to propertyValue (string). Check the section below for more information. - LinkProxyMedia(propertyName) --> Bool # Links proxy media (absolute path) with the current clip. + LinkProxyMedia(proxyMediaFilePath) --> Bool # Links proxy media located at path specified by arg 'proxyMediaFilePath' with the current clip. 'proxyMediaFilePath' should be absolute clip path. UnlinkProxyMedia() --> Bool # Unlinks any proxy media associated with clip. ReplaceClip(filePath) --> Bool # Replaces the underlying asset and metadata of MediaPoolItem with the specified absolute clip path. + GetUniqueId() --> string # Returns a unique ID for the media pool item Timeline GetName() --> string # Returns the timeline name. SetName(timelineName) --> Bool # Sets the timeline name if timelineName (string) is unique. Returns True if successful. GetStartFrame() --> int # Returns the frame number at the start of timeline. GetEndFrame() --> int # Returns the frame number at the end of timeline. + SetStartTimecode(timecode) --> Bool # Set the start timecode of the timeline to the string 'timecode'. Returns true when the change is successful, false otherwise. + GetStartTimecode() --> string # Returns the start timecode for the timeline. GetTrackCount(trackType) --> int # Returns the number of tracks for the given track type ("audio", "video" or "subtitle"). GetItemListInTrack(trackType, index) --> [items...] # Returns a list of timeline items on that track (based on trackType and index). 1 <= index <= GetTrackCount(trackType). AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. @@ -271,7 +279,8 @@ Timeline DeleteMarkerByCustomData(customData) --> Bool # Delete first matching marker with specified customData. ApplyGradeFromDRX(path, gradeMode, item1, item2, ...)--> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". ApplyGradeFromDRX(path, gradeMode, [items]) --> Bool # Loads a still from given file path (string) and applies grade to Timeline Items with gradeMode (int): 0 - "No keyframes", 1 - "Source Timecode aligned", 2 - "Start Frames aligned". - GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color and Deliver pages. + GetCurrentTimecode() --> string # Returns a string timecode representation for the current playhead position, while on Cut, Edit, Color, Fairlight and Deliver pages. + SetCurrentTimecode(timecode) --> Bool # Sets current playhead position from input timecode for Cut, Edit, Color, Fairlight and Deliver pages. GetCurrentVideoItem() --> item # Returns the current video timeline item. GetCurrentClipThumbnailImage() --> {thumbnailData} # Returns a dict (keys "width", "height", "format" and "data") with data containing raw thumbnail image data (RGB 8-bit image data encoded in base64 format) for current media in the Color Page. # An example of how to retrieve and interpret thumbnails is provided in 6_get_current_media_thumbnail.py in the Examples folder. @@ -280,37 +289,30 @@ Timeline DuplicateTimeline(timelineName) --> timeline # Duplicates the timeline and returns the created timeline, with the (optional) timelineName, on success. CreateCompoundClip([timelineItems], {clipInfo}) --> timelineItem # Creates a compound clip of input timeline items with an optional clipInfo map: {"startTimecode" : "00:00:00:00", "name" : "Compound Clip 1"}. It returns the created timeline item. CreateFusionClip([timelineItems]) --> timelineItem # Creates a Fusion clip of input timeline items. It returns the created timeline item. + ImportIntoTimeline(filePath, {importOptions}) --> Bool # Imports timeline items from an AAF file and optional importOptions dict into the timeline, with support for the keys: + # "autoImportSourceClipsIntoMediaPool": Bool, specifies if source clips should be imported into media pool, True by default + # "ignoreFileExtensionsWhenMatching": Bool, specifies if file extensions should be ignored when matching, False by default + # "linkToSourceCameraFiles": Bool, specifies if link to source camera files should be enabled, False by default + # "useSizingInfo": Bool, specifies if sizing information should be used, False by default + # "importMultiChannelAudioTracksAsLinkedGroups": Bool, specifies if multi-channel audio tracks should be imported as linked groups, False by default + # "insertAdditionalTracks": Bool, specifies if additional tracks should be inserted, True by default + # "insertWithOffset": string, specifies insert with offset value in timecode format - defaults to "00:00:00:00", applicable if "insertAdditionalTracks" is False + # "sourceClipsPath": string, specifies a filesystem path to search for source clips if the media is inaccessible in their original path and if "ignoreFileExtensionsWhenMatching" is True + # "sourceClipsFolders": string, list of Media Pool folder objects to search for source clips if the media is not present in current folder + Export(fileName, exportType, exportSubtype) --> Bool # Exports timeline to 'fileName' as per input exportType & exportSubtype format. - # exportType can be one of the following constants: - # resolve.EXPORT_AAF - # resolve.EXPORT_DRT - # resolve.EXPORT_EDL - # resolve.EXPORT_FCP_7_XML - # resolve.EXPORT_FCPXML_1_3 - # resolve.EXPORT_FCPXML_1_4 - # resolve.EXPORT_FCPXML_1_5 - # resolve.EXPORT_FCPXML_1_6 - # resolve.EXPORT_FCPXML_1_7 - # resolve.EXPORT_FCPXML_1_8 - # resolve.EXPORT_HDR_10_PROFILE_A - # resolve.EXPORT_HDR_10_PROFILE_B - # resolve.EXPORT_TEXT_CSV - # resolve.EXPORT_TEXT_TAB - # resolve.EXPORT_DOLBY_VISION_VER_2_9 - # resolve.EXPORT_DOLBY_VISION_VER_4_0 - # exportSubtype can be one of the following enums: - # resolve.EXPORT_NONE - # resolve.EXPORT_AAF_NEW - # resolve.EXPORT_AAF_EXISTING - # resolve.EXPORT_CDL - # resolve.EXPORT_SDL - # resolve.EXPORT_MISSING_CLIPS - # Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored. - # When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING. - # When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE. - # Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used. + # Refer to section "Looking up timeline exports properties" for information on the parameters. GetSetting(settingName) --> string # Returns value of timeline setting (indicated by settingName : string). Check the section below for more information. SetSetting(settingName, settingValue) --> Bool # Sets timeline setting (indicated by settingName : string) to the value (settingValue : string). Check the section below for more information. + InsertGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a generator (indicated by generatorName : string) into the timeline. + InsertFusionGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts a Fusion generator (indicated by generatorName : string) into the timeline. + InsertFusionCompositionIntoTimeline() --> TimelineItem # Inserts a Fusion composition into the timeline. + InsertOFXGeneratorIntoTimeline(generatorName) --> TimelineItem # Inserts an OFX generator (indicated by generatorName : string) into the timeline. + InsertTitleIntoTimeline(titleName) --> TimelineItem # Inserts a title (indicated by titleName : string) into the timeline. + InsertFusionTitleIntoTimeline(titleName) --> TimelineItem # Inserts a Fusion title (indicated by titleName : string) into the timeline. + GrabStill() --> galleryStill # Grabs still from the current video clip. Returns a GalleryStill object. + GrabAllStills(stillFrameSource) --> [galleryStill] # Grabs stills from all the clips of the timeline at 'stillFrameSource' (1 - First frame, 2 - Middle frame). Returns the list of GalleryStill objects. + GetUniqueId() --> string # Returns a unique ID for the timeline TimelineItem GetName() --> string # Returns the item name. @@ -323,6 +325,10 @@ TimelineItem GetLeftOffset() --> int # Returns the maximum extension by frame for clip from left side. GetRightOffset() --> int # Returns the maximum extension by frame for clip from right side. GetStart() --> int # Returns the start frame position on the timeline. + SetProperty(propertyKey, propertyValue) --> Bool # Sets the value of property "propertyKey" to value "propertyValue" + # Refer to "Looking up Timeline item properties" for more information + GetProperty(propertyKey) --> int/[key:value] # returns the value of the specified key + # if no key is specified, the method returns a dictionary(python) or table(lua) for all supported keys AddMarker(frameId, color, name, note, duration, --> Bool # Creates a new marker at given frameId position and with given marker information. 'customData' is optional and helps to attach user specific data to the marker. customData) GetMarkers() --> {markers...} # Returns a dict (frameId -> {information}) of all markers and dicts with their information. @@ -345,7 +351,8 @@ TimelineItem DeleteFusionCompByName(compName) --> Bool # Deletes the named Fusion composition. LoadFusionCompByName(compName) --> fusionComp # Loads the named Fusion composition as the active composition. RenameFusionCompByName(oldName, newName) --> Bool # Renames the Fusion composition identified by oldName. - AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clipbased on versionType (0 - local, 1 - remote). + AddVersion(versionName, versionType) --> Bool # Adds a new color version for a video clip based on versionType (0 - local, 1 - remote). + GetCurrentVersion() --> {versionName...} # Returns the current version of the video clip. The returned value will have the keys versionName and versionType(0 - local, 1 - remote). DeleteVersionByName(versionName, versionType) --> Bool # Deletes a color version by name and versionType (0 - local, 1 - remote). LoadVersionByName(versionName, versionType) --> Bool # Loads a named color version as the active version. versionType: 0 - local, 1 - remote. RenameVersionByName(oldName, newName, versionType)--> Bool # Renames the color version identified by oldName and versionType (0 - local, 1 - remote). @@ -354,12 +361,14 @@ TimelineItem GetStereoConvergenceValues() --> {keyframes...} # Returns a dict (offset -> value) of keyframe offsets and respective convergence values. GetStereoLeftFloatingWindowParams() --> {keyframes...} # For the LEFT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values. GetStereoRightFloatingWindowParams() --> {keyframes...} # For the RIGHT eye -> returns a dict (offset -> dict) of keyframe offsets and respective floating window params. Value at particular offset includes the left, right, top and bottom floating window values. + GetNumNodes() --> int # Returns the number of nodes in the current graph for the timeline item SetLUT(nodeIndex, lutPath) --> Bool # Sets LUT on the node mapping the node index provided, 1 <= nodeIndex <= total number of nodes. # The lutPath can be an absolute path, or a relative path (based off custom LUT paths or the master LUT path). # The operation is successful for valid lut paths that Resolve has already discovered (see Project.RefreshLUTList). + GetLUT(nodeIndex) --> String # Gets relative LUT path based on the node index provided, 1 <= nodeIndex <= total number of nodes. SetCDL([CDL map]) --> Bool # Keys of map are: "NodeIndex", "Slope", "Offset", "Power", "Saturation", where 1 <= NodeIndex <= total number of nodes. # Example python code - SetCDL({"NodeIndex" : "1", "Slope" : "0.5 0.4 0.2", "Offset" : "0.4 0.3 0.2", "Power" : "0.6 0.7 0.8", "Saturation" : "0.65"}) - AddTake(mediaPoolItem, startFrame=0, endFrame)=0 --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the whole clip is added. startFrame and endFrame can be specified as extents. + AddTake(mediaPoolItem, startFrame, endFrame) --> Bool # Adds mediaPoolItem as a new take. Initializes a take selector for the timeline item if needed. By default, the full clip extents is added. startFrame (int) and endFrame (int) are optional arguments used to specify the extents. GetSelectedTakeIndex() --> int # Returns the index of the currently selected take, or 0 if the clip is not a take selector. GetTakesCount() --> int # Returns the number of takes in take selector, or 0 if the clip is not a take selector. GetTakeByIndex(idx) --> {takeInfo...} # Returns a dict (keys "startFrame", "endFrame" and "mediaPoolItem") with take info for specified index. @@ -367,7 +376,24 @@ TimelineItem SelectTakeByIndex(idx) --> Bool # Selects a take by index, 1 <= idx <= number of takes. FinalizeTake() --> Bool # Finalizes take selection. CopyGrades([tgtTimelineItems]) --> Bool # Copies the current grade to all the items in tgtTimelineItems list. Returns True on success and False if any error occurred. + UpdateSidecar() --> Bool # Updates sidecar file for BRAW clips or RMD file for R3D clips. + GetUniqueId() --> string # Returns a unique ID for the timeline item +Gallery + GetAlbumName(galleryStillAlbum) --> string # Returns the name of the GalleryStillAlbum object 'galleryStillAlbum'. + SetAlbumName(galleryStillAlbum, albumName) --> Bool # Sets the name of the GalleryStillAlbum object 'galleryStillAlbum' to 'albumName'. + GetCurrentStillAlbum() --> galleryStillAlbum # Returns current album as a GalleryStillAlbum object. + SetCurrentStillAlbum(galleryStillAlbum) --> Bool # Sets current album to GalleryStillAlbum object 'galleryStillAlbum'. + GetGalleryStillAlbums() --> [galleryStillAlbum] # Returns the gallery albums as a list of GalleryStillAlbum objects. + +GalleryStillAlbum + GetStills() --> [galleryStill] # Returns the list of GalleryStill objects in the album. + GetLabel(galleryStill) --> string # Returns the label of the galleryStill. + SetLabel(galleryStill, label) --> Bool # Sets the new 'label' to GalleryStill object 'galleryStill'. + ExportStills([galleryStill], folderPath, filePrefix, format) --> Bool # Exports list of GalleryStill objects '[galleryStill]' to directory 'folderPath', with filename prefix 'filePrefix', using file format 'format' (supported formats: dpx, cin, tif, jpg, png, ppm, bmp, xpm). + DeleteStills([galleryStill]) --> Bool # Deletes specified list of GalleryStill objects '[galleryStill]'. + +GalleryStill # This class does not provide any API functions but the object type is used by functions in other classes. List and Dict Data Structures ----------------------------- @@ -375,7 +401,6 @@ Beside primitive data types, Resolve's Python API mainly uses list and dict data As Lua does not support list and dict data structures, the Lua API implements "list" as a table with indices, e.g. { [1] = listValue1, [2] = listValue2, ... }. Similarly the Lua API implements "dict" as a table with the dictionary key as first element, e.g. { [dictKey1] = dictValue1, [dictKey2] = dictValue2, ... }. - Looking up Project and Clip properties -------------------------------------- This section covers additional notes for the functions "Project:GetSetting", "Project:SetSetting", "Timeline:GetSetting", "Timeline:SetSetting", "MediaPoolItem:GetClipProperty" and @@ -412,6 +437,179 @@ Affects: • x = MediaPoolItem:GetClipProperty('Super Scale') and MediaPoolItem:SetClipProperty('Super Scale', x) +Looking up Render Settings +-------------------------- +This section covers the supported settings for the method SetRenderSettings({settings}) + +The parameter setting is a dictionary containing the following keys: + - "SelectAllFrames": Bool (when set True, the settings MarkIn and MarkOut are ignored) + - "MarkIn": int + - "MarkOut": int + - "TargetDir": string + - "CustomName": string + - "UniqueFilenameStyle": 0 - Prefix, 1 - Suffix. + - "ExportVideo": Bool + - "ExportAudio": Bool + - "FormatWidth": int + - "FormatHeight": int + - "FrameRate": float (examples: 23.976, 24) + - "PixelAspectRatio": string (for SD resolution: "16_9" or "4_3") (other resolutions: "square" or "cinemascope") + - "VideoQuality" possible values for current codec (if applicable): + - 0 (int) - will set quality to automatic + - [1 -> MAX] (int) - will set input bit rate + - ["Least", "Low", "Medium", "High", "Best"] (String) - will set input quality level + - "AudioCodec": string (example: "aac") + - "AudioBitDepth": int + - "AudioSampleRate": int + - "ColorSpaceTag" : string (example: "Same as Project", "AstroDesign") + - "GammaTag" : string (example: "Same as Project", "ACEScct") + - "ExportAlpha": Bool + - "EncodingProfile": string (example: "Main10"). Can only be set for H.264 and H.265. + - "MultiPassEncode": Bool. Can only be set for H.264. + - "AlphaMode": 0 - Premultiplied, 1 - Straight. Can only be set if "ExportAlpha" is true. + - "NetworkOptimization": Bool. Only supported by QuickTime and MP4 formats. + +Looking up timeline export properties +------------------------------------- +This section covers the parameters for the argument Export(fileName, exportType, exportSubtype). + +exportType can be one of the following constants: + - resolve.EXPORT_AAF + - resolve.EXPORT_DRT + - resolve.EXPORT_EDL + - resolve.EXPORT_FCP_7_XML + - resolve.EXPORT_FCPXML_1_3 + - resolve.EXPORT_FCPXML_1_4 + - resolve.EXPORT_FCPXML_1_5 + - resolve.EXPORT_FCPXML_1_6 + - resolve.EXPORT_FCPXML_1_7 + - resolve.EXPORT_FCPXML_1_8 + - resolve.EXPORT_FCPXML_1_9 + - resolve.EXPORT_FCPXML_1_10 + - resolve.EXPORT_HDR_10_PROFILE_A + - resolve.EXPORT_HDR_10_PROFILE_B + - resolve.EXPORT_TEXT_CSV + - resolve.EXPORT_TEXT_TAB + - resolve.EXPORT_DOLBY_VISION_VER_2_9 + - resolve.EXPORT_DOLBY_VISION_VER_4_0 +exportSubtype can be one of the following enums: + - resolve.EXPORT_NONE + - resolve.EXPORT_AAF_NEW + - resolve.EXPORT_AAF_EXISTING + - resolve.EXPORT_CDL + - resolve.EXPORT_SDL + - resolve.EXPORT_MISSING_CLIPS +Please note that exportSubType is a required parameter for resolve.EXPORT_AAF and resolve.EXPORT_EDL. For rest of the exportType, exportSubtype is ignored. +When exportType is resolve.EXPORT_AAF, valid exportSubtype values are resolve.EXPORT_AAF_NEW and resolve.EXPORT_AAF_EXISTING. +When exportType is resolve.EXPORT_EDL, valid exportSubtype values are resolve.EXPORT_CDL, resolve.EXPORT_SDL, resolve.EXPORT_MISSING_CLIPS and resolve.EXPORT_NONE. +Note: Replace 'resolve.' when using the constants above, if a different Resolve class instance name is used. + +Looking up Timeline item properties +----------------------------------- +This section covers additional notes for the function "TimelineItem:SetProperty" and "TimelineItem:GetProperty". These functions are used to get and set properties mentioned. + +The supported keys with their accepted values are: + "Pan" : floating point values from -4.0*width to 4.0*width + "Tilt" : floating point values from -4.0*height to 4.0*height + "ZoomX" : floating point values from 0.0 to 100.0 + "ZoomY" : floating point values from 0.0 to 100.0 + "ZoomGang" : a boolean value + "RotationAngle" : floating point values from -360.0 to 360.0 + "AnchorPointX" : floating point values from -4.0*width to 4.0*width + "AnchorPointY" : floating point values from -4.0*height to 4.0*height + "Pitch" : floating point values from -1.5 to 1.5 + "Yaw" : floating point values from -1.5 to 1.5 + "FlipX" : boolean value for flipping horizontally + "FlipY" : boolean value for flipping vertically + "CropLeft" : floating point values from 0.0 to width + "CropRight" : floating point values from 0.0 to width + "CropTop" : floating point values from 0.0 to height + "CropBottom" : floating point values from 0.0 to height + "CropSoftness" : floating point values from -100.0 to 100.0 + "CropRetain" : boolean value for "Retain Image Position" checkbox + "DynamicZoomEase" : A value from the following constants + - DYNAMIC_ZOOM_EASE_LINEAR = 0 + - DYNAMIC_ZOOM_EASE_IN + - DYNAMIC_ZOOM_EASE_OUT + - DYNAMIC_ZOOM_EASE_IN_AND_OUT + "CompositeMode" : A value from the following constants + - COMPOSITE_NORMAL = 0 + - COMPOSITE_ADD + - COMPOSITE_SUBTRACT + - COMPOSITE_DIFF + - COMPOSITE_MULTIPLY + - COMPOSITE_SCREEN + - COMPOSITE_OVERLAY + - COMPOSITE_HARDLIGHT + - COMPOSITE_SOFTLIGHT + - COMPOSITE_DARKEN + - COMPOSITE_LIGHTEN + - COMPOSITE_COLOR_DODGE + - COMPOSITE_COLOR_BURN + - COMPOSITE_EXCLUSION + - COMPOSITE_HUE + - COMPOSITE_SATURATE + - COMPOSITE_COLORIZE + - COMPOSITE_LUMA_MASK + - COMPOSITE_DIVIDE + - COMPOSITE_LINEAR_DODGE + - COMPOSITE_LINEAR_BURN + - COMPOSITE_LINEAR_LIGHT + - COMPOSITE_VIVID_LIGHT + - COMPOSITE_PIN_LIGHT + - COMPOSITE_HARD_MIX + - COMPOSITE_LIGHTER_COLOR + - COMPOSITE_DARKER_COLOR + - COMPOSITE_FOREGROUND + - COMPOSITE_ALPHA + - COMPOSITE_INVERTED_ALPHA + - COMPOSITE_LUM + - COMPOSITE_INVERTED_LUM + "Opacity" : floating point value from 0.0 to 100.0 + "Distortion" : floating point value from -1.0 to 1.0 + "RetimeProcess" : A value from the following constants + - RETIME_USE_PROJECT = 0 + - RETIME_NEAREST + - RETIME_FRAME_BLEND + - RETIME_OPTICAL_FLOW + "MotionEstimation" : A value from the following constants + - MOTION_EST_USE_PROJECT = 0 + - MOTION_EST_STANDARD_FASTER + - MOTION_EST_STANDARD_BETTER + - MOTION_EST_ENHANCED_FASTER + - MOTION_EST_ENHANCED_BETTER + - MOTION_EST_SPEED_WRAP + "Scaling" : A value from the following constants + - SCALE_USE_PROJECT = 0 + - SCALE_CROP + - SCALE_FIT + - SCALE_FILL + - SCALE_STRETCH + "ResizeFilter" : A value from the following constants + - RESIZE_FILTER_USE_PROJECT = 0 + - RESIZE_FILTER_SHARPER + - RESIZE_FILTER_SMOOTHER + - RESIZE_FILTER_BICUBIC + - RESIZE_FILTER_BILINEAR + - RESIZE_FILTER_BESSEL + - RESIZE_FILTER_BOX + - RESIZE_FILTER_CATMULL_ROM + - RESIZE_FILTER_CUBIC + - RESIZE_FILTER_GAUSSIAN + - RESIZE_FILTER_LANCZOS + - RESIZE_FILTER_MITCHELL + - RESIZE_FILTER_NEAREST_NEIGHBOR + - RESIZE_FILTER_QUADRATIC + - RESIZE_FILTER_SINC + - RESIZE_FILTER_LINEAR +Values beyond the range will be clipped +width and height are same as the UI max limits + +The arguments can be passed as a key and value pair or they can be grouped together into a dictionary (for python) or table (for lua) and passed +as a single argument. + +Getting the values for the keys that uses constants will return the number which is in the constant + Deprecated Resolve API Functions -------------------------------- The following API functions are deprecated. @@ -450,12 +648,12 @@ TimelineItem Unsupported Resolve API Functions --------------------------------- -The following API (functions and paraameters) are no longer supported. +The following API (functions and parameters) are no longer supported. Use job IDs instead of indices. Project StartRendering(index1, index2, ...) --> Bool # Please use unique job ids (string) instead of indices. StartRendering([idxs...]) --> Bool # Please use unique job ids (string) instead of indices. DeleteRenderJobByIndex(idx) --> Bool # Please use unique job ids (string) instead of indices. GetRenderJobStatus(idx) --> {status info} # Please use unique job ids (string) instead of indices. - GetSetting and SetSetting --> {} # settingName "videoMonitorUseRec601For422SDI" is no longer supported. - # Please use "videoMonitorUseMatrixOverrideFor422SDI" and "videoMonitorMatrixOverrideFor422SDI" instead. + GetSetting and SetSetting --> {} # settingName videoMonitorUseRec601For422SDI is now replaced with videoMonitorUseMatrixOverrideFor422SDI and videoMonitorMatrixOverrideFor422SDI. + # settingName perfProxyMediaOn is now replaced with perfProxyMediaMode which takes values 0 - disabled, 1 - when available, 2 - when source not available. diff --git a/openpype/hosts/resolve/__init__.py b/openpype/hosts/resolve/__init__.py index 3e49ce3b9b..b4a994bbaa 100644 --- a/openpype/hosts/resolve/__init__.py +++ b/openpype/hosts/resolve/__init__.py @@ -1,129 +1,6 @@ -from .api.utils import ( - setup, - get_resolve_module +from .addon import ResolveAddon + + +__all__ = ( + "ResolveAddon", ) - -from .api.pipeline import ( - install, - uninstall, - ls, - containerise, - update_container, - publish, - launch_workfiles_app, - maintained_selection, - remove_instance, - list_instances -) - -from .api.lib import ( - maintain_current_timeline, - publish_clip_color, - get_project_manager, - get_current_project, - get_current_timeline, - create_bin, - get_media_pool_item, - create_media_pool_item, - create_timeline_item, - get_timeline_item, - get_video_track_names, - get_current_timeline_items, - get_pype_timeline_item_by_name, - get_timeline_item_pype_tag, - set_timeline_item_pype_tag, - imprint, - set_publish_attribute, - get_publish_attribute, - create_compound_clip, - swap_clips, - get_pype_clip_metadata, - set_project_manager_to_folder_name, - get_otio_clip_instance_data, - get_reformated_path -) - -from .api.menu import launch_pype_menu - -from .api.plugin import ( - ClipLoader, - TimelineItemLoader, - Creator, - PublishClip -) - -from .api.workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root -) - -from .api.testing_utils import TestGUI - - -__all__ = [ - # pipeline - "install", - "uninstall", - "ls", - "containerise", - "update_container", - "reload_pipeline", - "publish", - "launch_workfiles_app", - "maintained_selection", - "remove_instance", - "list_instances", - - # utils - "setup", - "get_resolve_module", - - # lib - "maintain_current_timeline", - "publish_clip_color", - "get_project_manager", - "get_current_project", - "get_current_timeline", - "create_bin", - "get_media_pool_item", - "create_media_pool_item", - "create_timeline_item", - "get_timeline_item", - "get_video_track_names", - "get_current_timeline_items", - "get_pype_timeline_item_by_name", - "get_timeline_item_pype_tag", - "set_timeline_item_pype_tag", - "imprint", - "set_publish_attribute", - "get_publish_attribute", - "create_compound_clip", - "swap_clips", - "get_pype_clip_metadata", - "set_project_manager_to_folder_name", - "get_otio_clip_instance_data", - "get_reformated_path", - - # menu - "launch_pype_menu", - - # plugin - "ClipLoader", - "TimelineItemLoader", - "Creator", - "PublishClip", - - # workio - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root", - - "TestGUI" -] diff --git a/openpype/hosts/resolve/addon.py b/openpype/hosts/resolve/addon.py new file mode 100644 index 0000000000..02c1d7957f --- /dev/null +++ b/openpype/hosts/resolve/addon.py @@ -0,0 +1,23 @@ +import os + +from openpype.modules import OpenPypeModule, IHostAddon + +from .utils import RESOLVE_ROOT_DIR + + +class ResolveAddon(OpenPypeModule, IHostAddon): + name = "resolve" + host_name = "resolve" + + def initialize(self, module_settings): + self.enabled = True + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(RESOLVE_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".drp"] diff --git a/openpype/hosts/resolve/api/__init__.py b/openpype/hosts/resolve/api/__init__.py index 48bd938e57..00a598548e 100644 --- a/openpype/hosts/resolve/api/__init__.py +++ b/openpype/hosts/resolve/api/__init__.py @@ -1,11 +1,136 @@ """ resolve api """ -import os +from .utils import ( + get_resolve_module +) + +from .pipeline import ( + install, + uninstall, + ls, + containerise, + update_container, + publish, + launch_workfiles_app, + maintained_selection, + remove_instance, + list_instances +) + +from .lib import ( + maintain_current_timeline, + publish_clip_color, + get_project_manager, + get_current_project, + get_current_timeline, + create_bin, + get_media_pool_item, + create_media_pool_item, + create_timeline_item, + get_timeline_item, + get_video_track_names, + get_current_timeline_items, + get_pype_timeline_item_by_name, + get_timeline_item_pype_tag, + set_timeline_item_pype_tag, + imprint, + set_publish_attribute, + get_publish_attribute, + create_compound_clip, + swap_clips, + get_pype_clip_metadata, + set_project_manager_to_folder_name, + get_otio_clip_instance_data, + get_reformated_path +) + +from .menu import launch_pype_menu + +from .plugin import ( + ClipLoader, + TimelineItemLoader, + Creator, + PublishClip +) + +from .workio import ( + open_file, + save_file, + current_file, + has_unsaved_changes, + file_extensions, + work_root +) + +from .testing_utils import TestGUI + bmdvr = None bmdvf = None -API_DIR = os.path.dirname(os.path.abspath(__file__)) -HOST_DIR = os.path.dirname(API_DIR) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") +__all__ = [ + "bmdvr", + "bmdvf", + + # pipeline + "install", + "uninstall", + "ls", + "containerise", + "update_container", + "reload_pipeline", + "publish", + "launch_workfiles_app", + "maintained_selection", + "remove_instance", + "list_instances", + + # utils + "get_resolve_module", + + # lib + "maintain_current_timeline", + "publish_clip_color", + "get_project_manager", + "get_current_project", + "get_current_timeline", + "create_bin", + "get_media_pool_item", + "create_media_pool_item", + "create_timeline_item", + "get_timeline_item", + "get_video_track_names", + "get_current_timeline_items", + "get_pype_timeline_item_by_name", + "get_timeline_item_pype_tag", + "set_timeline_item_pype_tag", + "imprint", + "set_publish_attribute", + "get_publish_attribute", + "create_compound_clip", + "swap_clips", + "get_pype_clip_metadata", + "set_project_manager_to_folder_name", + "get_otio_clip_instance_data", + "get_reformated_path", + + # menu + "launch_pype_menu", + + # plugin + "ClipLoader", + "TimelineItemLoader", + "Creator", + "PublishClip", + + # workio + "open_file", + "save_file", + "current_file", + "has_unsaved_changes", + "file_extensions", + "work_root", + + "TestGUI" +] diff --git a/openpype/hosts/resolve/api/action.py b/openpype/hosts/resolve/api/action.py index f8f338a850..ceedc2cc54 100644 --- a/openpype/hosts/resolve/api/action.py +++ b/openpype/hosts/resolve/api/action.py @@ -4,7 +4,7 @@ from __future__ import absolute_import import pyblish.api -from ...action import get_errored_instances_from_context +from openpype.pipeline.publish import get_errored_instances_from_context class SelectInvalidAction(pyblish.api.Action): diff --git a/openpype/hosts/resolve/api/lib.py b/openpype/hosts/resolve/api/lib.py index 22f83c6eed..f41eb36caf 100644 --- a/openpype/hosts/resolve/api/lib.py +++ b/openpype/hosts/resolve/api/lib.py @@ -4,13 +4,13 @@ import re import os import contextlib from opentimelineio import opentime -import openpype + +from openpype.lib import Logger +from openpype.pipeline.editorial import is_overlapping_otio_ranges from ..otio import davinci_export as otio_export -from openpype.api import Logger - -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) self = sys.modules[__name__] self.project_manager = None @@ -319,14 +319,13 @@ def get_current_timeline_items( selected_track_count = timeline.GetTrackCount(track_type) # loop all tracks and get items - _clips = dict() + _clips = {} for track_index in range(1, (int(selected_track_count) + 1)): _track_name = timeline.GetTrackName(track_type, track_index) # filter out all unmathed track names - if track_name: - if _track_name not in track_name: - continue + if track_name and _track_name not in track_name: + continue timeline_items = timeline.GetItemListInTrack( track_type, track_index) @@ -348,12 +347,8 @@ def get_current_timeline_items( "index": clip_index } ti_color = ti.GetClipColor() - if filter is True: - if selecting_color in ti_color: - selected_clips.append(data) - else: + if filter and selecting_color in ti_color or not filter: selected_clips.append(data) - return selected_clips @@ -824,7 +819,7 @@ def get_otio_clip_instance_data(otio_timeline, timeline_item_data): continue if otio_clip.name not in timeline_item.GetName(): continue - if openpype.lib.is_overlapping_otio_ranges( + if is_overlapping_otio_ranges( parent_range, timeline_range, strict=True): # add pypedata marker to otio_clip metadata diff --git a/openpype/hosts/resolve/api/menu.py b/openpype/hosts/resolve/api/menu.py index 9e0dd12376..eeb9e65dec 100644 --- a/openpype/hosts/resolve/api/menu.py +++ b/openpype/hosts/resolve/api/menu.py @@ -1,15 +1,15 @@ import os import sys -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore + +from openpype.tools.utils import host_tools from .pipeline import ( publish, launch_workfiles_app ) -from openpype.tools.utils import host_tools - def load_stylesheet(): path = os.path.join(os.path.dirname(__file__), "menu_style.qss") @@ -54,15 +54,15 @@ class OpenPypeMenu(QtWidgets.QWidget): ) self.setWindowTitle("OpenPype") - workfiles_btn = QtWidgets.QPushButton("Workfiles...", self) - create_btn = QtWidgets.QPushButton("Create...", self) - publish_btn = QtWidgets.QPushButton("Publish...", self) - load_btn = QtWidgets.QPushButton("Load...", self) - inventory_btn = QtWidgets.QPushButton("Inventory...", self) - subsetm_btn = QtWidgets.QPushButton("Subset Manager...", self) - libload_btn = QtWidgets.QPushButton("Library...", self) + workfiles_btn = QtWidgets.QPushButton("Workfiles ...", self) + create_btn = QtWidgets.QPushButton("Create ...", self) + publish_btn = QtWidgets.QPushButton("Publish ...", self) + load_btn = QtWidgets.QPushButton("Load ...", self) + inventory_btn = QtWidgets.QPushButton("Manager ...", self) + subsetm_btn = QtWidgets.QPushButton("Subset Manager ...", self) + libload_btn = QtWidgets.QPushButton("Library ...", self) experimental_btn = QtWidgets.QPushButton( - "Experimental tools...", self + "Experimental tools ...", self ) # rename_btn = QtWidgets.QPushButton("Rename", self) # set_colorspace_btn = QtWidgets.QPushButton( diff --git a/openpype/hosts/resolve/api/pipeline.py b/openpype/hosts/resolve/api/pipeline.py index e8b017ead5..899cb825bb 100644 --- a/openpype/hosts/resolve/api/pipeline.py +++ b/openpype/hosts/resolve/api/pipeline.py @@ -4,21 +4,27 @@ Basic avalon integration import os import contextlib from collections import OrderedDict -from avalon import api as avalon -from avalon import schema + from pyblish import api as pyblish -from openpype.api import Logger + +from openpype.lib import Logger from openpype.pipeline import ( - LegacyCreator, + schema, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) -from . import lib -from . import PLUGINS_DIR from openpype.tools.utils import host_tools -log = Logger().get_logger(__name__) +from . import lib +from .utils import get_resolve_module + +log = Logger.get_logger(__name__) + +HOST_DIR = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) +PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") LOAD_PATH = os.path.join(PLUGINS_DIR, "load") CREATE_PATH = os.path.join(PLUGINS_DIR, "create") @@ -37,7 +43,6 @@ def install(): See the Maya equivalent for inspiration on how to implement this. """ - from .. import get_resolve_module log.info("openpype.hosts.resolve installed") @@ -46,7 +51,7 @@ def install(): log.info("Registering DaVinci Resovle plug-ins..") register_loader_plugin_path(LOAD_PATH) - avalon.register_plugin_path(LegacyCreator, CREATE_PATH) + register_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) @@ -70,7 +75,7 @@ def uninstall(): log.info("Deregistering DaVinci Resovle plug-ins..") deregister_loader_plugin_path(LOAD_PATH) - avalon.deregister_plugin_path(LegacyCreator, CREATE_PATH) + deregister_creator_plugin_path(CREATE_PATH) # register callback for switching publishable pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled) @@ -239,7 +244,7 @@ def on_pyblish_instance_toggled(instance, old_value, new_value): log.info("instance toggle: {}, old_value: {}, new_value:{} ".format( instance, old_value, new_value)) - from openpype.hosts.resolve import ( + from openpype.hosts.resolve.api import ( set_publish_attribute ) diff --git a/openpype/hosts/resolve/api/plugin.py b/openpype/hosts/resolve/api/plugin.py index 8e1436021c..77e30149fd 100644 --- a/openpype/hosts/resolve/api/plugin.py +++ b/openpype/hosts/resolve/api/plugin.py @@ -2,15 +2,17 @@ import re import uuid import qargparse -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore -import openpype.api as pype +from openpype.settings import get_current_project_settings +from openpype.pipeline.context_tools import get_current_project_asset from openpype.pipeline import ( LegacyCreator, LoaderPlugin, ) -from openpype.hosts import resolve + from . import lib +from .menu import load_stylesheet class CreatorWidget(QtWidgets.QDialog): @@ -86,7 +88,7 @@ class CreatorWidget(QtWidgets.QDialog): ok_btn.clicked.connect(self._on_ok_clicked) cancel_btn.clicked.connect(self._on_cancel_clicked) - stylesheet = resolve.api.menu.load_stylesheet() + stylesheet = load_stylesheet() self.setStyleSheet(stylesheet) def _on_ok_clicked(self): @@ -375,7 +377,7 @@ class ClipLoader: """ asset_name = self.context["representation"]["context"]["asset"] - self.data["assetData"] = pype.get_asset(asset_name)["data"] + self.data["assetData"] = get_current_project_asset(asset_name)["data"] def load(self): # create project bin for the media to be imported into @@ -438,7 +440,7 @@ class ClipLoader: source_in = int(_clip_property("Start")) source_out = int(_clip_property("End")) - resolve.swap_clips( + lib.swap_clips( timeline_item, media_pool_item, source_in, @@ -504,21 +506,21 @@ class Creator(LegacyCreator): def __init__(self, *args, **kwargs): super(Creator, self).__init__(*args, **kwargs) - from openpype.api import get_current_project_settings + resolve_p_settings = get_current_project_settings().get("resolve") - self.presets = dict() + self.presets = {} if resolve_p_settings: self.presets = resolve_p_settings["create"].get( self.__class__.__name__, {}) # adding basic current context resolve objects - self.project = resolve.get_current_project() - self.timeline = resolve.get_current_timeline() + self.project = lib.get_current_project() + self.timeline = lib.get_current_timeline() if (self.options or {}).get("useSelection"): - self.selected = resolve.get_current_timeline_items(filter=True) + self.selected = lib.get_current_timeline_items(filter=True) else: - self.selected = resolve.get_current_timeline_items(filter=False) + self.selected = lib.get_current_timeline_items(filter=False) self.widget = CreatorWidget diff --git a/openpype/hosts/resolve/api/preload_console.py b/openpype/hosts/resolve/api/preload_console.py deleted file mode 100644 index 1e3a56b4dd..0000000000 --- a/openpype/hosts/resolve/api/preload_console.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python -import time -from openpype.hosts.resolve.utils import get_resolve_module -from openpype.api import Logger - -log = Logger().get_logger(__name__) - -wait_delay = 2.5 -wait = 0.00 -ready = None -while True: - try: - # Create project and set parameters: - resolve = get_resolve_module() - pm = resolve.GetProjectManager() - if pm: - ready = None - else: - ready = True - except AttributeError: - pass - - if ready is None: - time.sleep(wait_delay) - log.info(f"Waiting {wait}s for Resolve to have opened Project Manager") - wait += wait_delay - else: - print(f"Preloaded variables: \n\n\tResolve module: " - f"`resolve` > {type(resolve)} \n\tProject manager: " - f"`pm` > {type(pm)}") - break diff --git a/openpype/hosts/resolve/api/utils.py b/openpype/hosts/resolve/api/utils.py index 9b3762f328..871b3af38d 100644 --- a/openpype/hosts/resolve/api/utils.py +++ b/openpype/hosts/resolve/api/utils.py @@ -4,21 +4,21 @@ Resolve's tools for setting environment """ -import sys import os -import shutil -from . import HOST_DIR -from openpype.api import Logger -log = Logger().get_logger(__name__) +import sys + +from openpype.lib import Logger + +log = Logger.get_logger(__name__) def get_resolve_module(): - from openpype.hosts import resolve + from openpype.hosts.resolve import api # dont run if already loaded - if resolve.api.bmdvr: + if api.bmdvr: log.info(("resolve module is assigned to " - f"`pype.hosts.resolve.api.bmdvr`: {resolve.api.bmdvr}")) - return resolve.api.bmdvr + f"`pype.hosts.resolve.api.bmdvr`: {api.bmdvr}")) + return api.bmdvr try: """ The PYTHONPATH needs to be set correctly for this import @@ -71,79 +71,9 @@ def get_resolve_module(): # assign global var and return bmdvr = bmd.scriptapp("Resolve") bmdvf = bmd.scriptapp("Fusion") - resolve.api.bmdvr = bmdvr - resolve.api.bmdvf = bmdvf + api.bmdvr = bmdvr + api.bmdvf = bmdvf log.info(("Assigning resolve module to " - f"`pype.hosts.resolve.api.bmdvr`: {resolve.api.bmdvr}")) + f"`pype.hosts.resolve.api.bmdvr`: {api.bmdvr}")) log.info(("Assigning resolve module to " - f"`pype.hosts.resolve.api.bmdvf`: {resolve.api.bmdvf}")) - - -def _sync_utility_scripts(env=None): - """ Synchronizing basic utlility scripts for resolve. - - To be able to run scripts from inside `Resolve/Workspace/Scripts` menu - all scripts has to be accessible from defined folder. - """ - if not env: - env = os.environ - - # initiate inputs - scripts = {} - us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") - us_dir = env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "") - us_paths = [os.path.join( - HOST_DIR, - "utility_scripts" - )] - - # collect script dirs - if us_env: - log.info(f"Utility Scripts Env: `{us_env}`") - us_paths = us_env.split( - os.pathsep) + us_paths - - # collect scripts from dirs - for path in us_paths: - scripts.update({path: os.listdir(path)}) - - log.info(f"Utility Scripts Dir: `{us_paths}`") - log.info(f"Utility Scripts: `{scripts}`") - - # make sure no script file is in folder - if next((s for s in os.listdir(us_dir)), None): - for s in os.listdir(us_dir): - path = os.path.join(us_dir, s) - log.info(f"Removing `{path}`...") - if os.path.isdir(path): - shutil.rmtree(path, onerror=None) - else: - os.remove(path) - - # copy scripts into Resolve's utility scripts dir - for d, sl in scripts.items(): - # directory and scripts list - for s in sl: - # script in script list - src = os.path.join(d, s) - dst = os.path.join(us_dir, s) - log.info(f"Copying `{src}` to `{dst}`...") - if os.path.isdir(src): - shutil.copytree( - src, dst, symlinks=False, - ignore=None, ignore_dangling_symlinks=False - ) - else: - shutil.copy2(src, dst) - - -def setup(env=None): - """ Wrapper installer started from pype.hooks.resolve.ResolvePrelaunch() - """ - if not env: - env = os.environ - - # synchronize resolve utility scripts - _sync_utility_scripts(env) - - log.info("Resolve OpenPype wrapper has been installed") + f"`pype.hosts.resolve.api.bmdvf`: {api.bmdvf}")) diff --git a/openpype/hosts/resolve/api/workio.py b/openpype/hosts/resolve/api/workio.py index f175769387..5ce73eea53 100644 --- a/openpype/hosts/resolve/api/workio.py +++ b/openpype/hosts/resolve/api/workio.py @@ -1,15 +1,15 @@ """Host API required Work Files tool""" import os -from openpype.api import Logger -from .. import ( +from openpype.lib import Logger +from .lib import ( get_project_manager, get_current_project, set_project_manager_to_folder_name ) -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) exported_projet_ext = ".drp" @@ -60,7 +60,7 @@ def open_file(filepath): # load project from input path project = pm.LoadProject(fname) log.info(f"Project {project.GetName()} opened...") - return True + except AttributeError: log.warning((f"Project with name `{fname}` does not exist! It will " f"be imported from {filepath} and then loaded...")) @@ -69,9 +69,8 @@ def open_file(filepath): project = pm.LoadProject(fname) log.info(f"Project imported/loaded {project.GetName()}...") return True - else: - return False - + return False + return True def current_file(): pm = get_project_manager() @@ -80,13 +79,9 @@ def current_file(): name = project.GetName() fname = name + exported_projet_ext current_file = os.path.join(current_dir, fname) - normalised = os.path.normpath(current_file) - - # Unsaved current file - if normalised == "": + if not current_file: return None - - return normalised + return os.path.normpath(current_file) def work_root(session): diff --git a/openpype/hosts/resolve/hooks/pre_resolve_setup.py b/openpype/hosts/resolve/hooks/pre_resolve_setup.py index 978e3760fd..8574b3ad01 100644 --- a/openpype/hosts/resolve/hooks/pre_resolve_setup.py +++ b/openpype/hosts/resolve/hooks/pre_resolve_setup.py @@ -1,7 +1,7 @@ import os -import importlib +import platform from openpype.lib import PreLaunchHook -from openpype.hosts.resolve.api import utils +from openpype.hosts.resolve.utils import setup class ResolvePrelaunch(PreLaunchHook): @@ -14,47 +14,91 @@ class ResolvePrelaunch(PreLaunchHook): app_groups = ["resolve"] def execute(self): + current_platform = platform.system().lower() + + PROGRAMDATA = self.launch_context.env.get("PROGRAMDATA", "") + RESOLVE_SCRIPT_API_ = { + "windows": ( + f"{PROGRAMDATA}/Blackmagic Design/" + "DaVinci Resolve/Support/Developer/Scripting" + ), + "darwin": ( + "/Library/Application Support/Blackmagic Design" + "/DaVinci Resolve/Developer/Scripting" + ), + "linux": "/opt/resolve/Developer/Scripting" + } + RESOLVE_SCRIPT_API = os.path.normpath( + RESOLVE_SCRIPT_API_[current_platform]) + self.launch_context.env["RESOLVE_SCRIPT_API"] = RESOLVE_SCRIPT_API + + RESOLVE_SCRIPT_LIB_ = { + "windows": ( + "C:/Program Files/Blackmagic Design" + "/DaVinci Resolve/fusionscript.dll" + ), + "darwin": ( + "/Applications/DaVinci Resolve/DaVinci Resolve.app" + "/Contents/Libraries/Fusion/fusionscript.so" + ), + "linux": "/opt/resolve/libs/Fusion/fusionscript.so" + } + RESOLVE_SCRIPT_LIB = os.path.normpath( + RESOLVE_SCRIPT_LIB_[current_platform]) + self.launch_context.env["RESOLVE_SCRIPT_LIB"] = RESOLVE_SCRIPT_LIB + # TODO: add OTIO installation from `openpype/requirements.py` - # making sure python 3.6 is installed at provided path - py36_dir = os.path.normpath( - self.launch_context.env.get("PYTHON36_RESOLVE", "")) - assert os.path.isdir(py36_dir), ( - "Python 3.6 is not installed at the provided folder path. Either " + # making sure python <3.9.* is installed at provided path + python3_home = os.path.normpath( + self.launch_context.env.get("RESOLVE_PYTHON3_HOME", "")) + + assert os.path.isdir(python3_home), ( + "Python 3 is not installed at the provided folder path. Either " "make sure the `environments\resolve.json` is having correctly " - "set `PYTHON36_RESOLVE` or make sure Python 3.6 is installed " - f"in given path. \nPYTHON36_RESOLVE: `{py36_dir}`" + "set `RESOLVE_PYTHON3_HOME` or make sure Python 3 is installed " + f"in given path. \nRESOLVE_PYTHON3_HOME: `{python3_home}`" ) - self.log.info(f"Path to Resolve Python folder: `{py36_dir}`...") + self.launch_context.env["PYTHONHOME"] = python3_home + self.log.info(f"Path to Resolve Python folder: `{python3_home}`...") + # add to the python path to path + env_path = self.launch_context.env["PATH"] + self.launch_context.env["PATH"] = os.pathsep.join([ + python3_home, + os.path.join(python3_home, "Scripts") + ] + env_path.split(os.pathsep)) + + self.log.debug(f"PATH: {self.launch_context.env['PATH']}") + + # add to the PYTHONPATH + env_pythonpath = self.launch_context.env["PYTHONPATH"] + self.launch_context.env["PYTHONPATH"] = os.pathsep.join([ + os.path.join(python3_home, "Lib", "site-packages"), + os.path.join(RESOLVE_SCRIPT_API, "Modules"), + ] + env_pythonpath.split(os.pathsep)) + + self.log.debug(f"PYTHONPATH: {self.launch_context.env['PYTHONPATH']}") + + RESOLVE_UTILITY_SCRIPTS_DIR_ = { + "windows": ( + f"{PROGRAMDATA}/Blackmagic Design" + "/DaVinci Resolve/Fusion/Scripts/Comp" + ), + "darwin": ( + "/Library/Application Support/Blackmagic Design" + "/DaVinci Resolve/Fusion/Scripts/Comp" + ), + "linux": "/opt/resolve/Fusion/Scripts/Comp" + } + RESOLVE_UTILITY_SCRIPTS_DIR = os.path.normpath( + RESOLVE_UTILITY_SCRIPTS_DIR_[current_platform] + ) # setting utility scripts dir for scripts syncing - us_dir = os.path.normpath( - self.launch_context.env.get("RESOLVE_UTILITY_SCRIPTS_DIR", "") - ) - assert os.path.isdir(us_dir), ( - "Resolve utility script dir does not exists. Either make sure " - "the `environments\resolve.json` is having correctly set " - "`RESOLVE_UTILITY_SCRIPTS_DIR` or reinstall DaVinci Resolve. \n" - f"RESOLVE_UTILITY_SCRIPTS_DIR: `{us_dir}`" - ) - self.log.debug(f"-- us_dir: `{us_dir}`") + self.launch_context.env["RESOLVE_UTILITY_SCRIPTS_DIR"] = ( + RESOLVE_UTILITY_SCRIPTS_DIR) - # correctly format path for pre python script - pre_py_sc = os.path.normpath( - self.launch_context.env.get("PRE_PYTHON_SCRIPT", "")) - self.launch_context.env["PRE_PYTHON_SCRIPT"] = pre_py_sc - self.log.debug(f"-- pre_py_sc: `{pre_py_sc}`...") - try: - __import__("openpype.hosts.resolve") - __import__("pyblish") + # remove terminal coloring tags + self.launch_context.env["OPENPYPE_LOG_NO_COLORS"] = "True" - except ImportError: - self.log.warning( - "pyblish: Could not load Resolve integration.", - exc_info=True - ) - - else: - # Resolve Setup integration - importlib.reload(utils) - self.log.debug(f"-- utils.__file__: `{utils.__file__}`") - utils.setup(self.launch_context.env) + # Resolve Setup integration + setup(self.launch_context.env) diff --git a/openpype/hosts/resolve/plugins/create/create_shot_clip.py b/openpype/hosts/resolve/plugins/create/create_shot_clip.py index 62d5557a50..4b14f2493f 100644 --- a/openpype/hosts/resolve/plugins/create/create_shot_clip.py +++ b/openpype/hosts/resolve/plugins/create/create_shot_clip.py @@ -1,9 +1,12 @@ # from pprint import pformat -from openpype.hosts import resolve -from openpype.hosts.resolve.api import lib +from openpype.hosts.resolve.api import plugin, lib +from openpype.hosts.resolve.api.lib import ( + get_video_track_names, + create_bin, +) -class CreateShotClip(resolve.Creator): +class CreateShotClip(plugin.Creator): """Publishable clip""" label = "Create Publishable Clip" @@ -11,7 +14,7 @@ class CreateShotClip(resolve.Creator): icon = "film" defaults = ["Main"] - gui_tracks = resolve.get_video_track_names() + gui_tracks = get_video_track_names() gui_name = "OpenPype publish attributes creator" gui_info = "Define sequential rename and fill hierarchy data." gui_inputs = { @@ -116,12 +119,13 @@ class CreateShotClip(resolve.Creator): "order": 0}, "vSyncTrack": { "value": gui_tracks, # noqa - "type": "QComboBox", - "label": "Hero track", - "target": "ui", - "toolTip": "Select driving track name which should be mastering all others", # noqa - "order": 1} + "type": "QComboBox", + "label": "Hero track", + "target": "ui", + "toolTip": "Select driving track name which should be mastering all others", # noqa + "order": 1 } + } }, "publishSettings": { "type": "section", @@ -172,28 +176,31 @@ class CreateShotClip(resolve.Creator): "target": "ui", "order": 4, "value": { - "workfileFrameStart": { - "value": 1001, - "type": "QSpinBox", - "label": "Workfiles Start Frame", - "target": "tag", - "toolTip": "Set workfile starting frame number", # noqa - "order": 0}, - "handleStart": { - "value": 0, - "type": "QSpinBox", - "label": "Handle start (head)", - "target": "tag", - "toolTip": "Handle at start of clip", # noqa - "order": 1}, - "handleEnd": { - "value": 0, - "type": "QSpinBox", - "label": "Handle end (tail)", - "target": "tag", - "toolTip": "Handle at end of clip", # noqa - "order": 2}, - } + "workfileFrameStart": { + "value": 1001, + "type": "QSpinBox", + "label": "Workfiles Start Frame", + "target": "tag", + "toolTip": "Set workfile starting frame number", # noqa + "order": 0 + }, + "handleStart": { + "value": 0, + "type": "QSpinBox", + "label": "Handle start (head)", + "target": "tag", + "toolTip": "Handle at start of clip", # noqa + "order": 1 + }, + "handleEnd": { + "value": 0, + "type": "QSpinBox", + "label": "Handle end (tail)", + "target": "tag", + "toolTip": "Handle at end of clip", # noqa + "order": 2 + } + } } } @@ -229,8 +236,10 @@ class CreateShotClip(resolve.Creator): v_sync_track = widget.result["vSyncTrack"]["value"] # sort selected trackItems by - sorted_selected_track_items = list() - unsorted_selected_track_items = list() + sorted_selected_track_items = [] + unsorted_selected_track_items = [] + print("_____ selected ______") + print(self.selected) for track_item_data in self.selected: if track_item_data["track"]["name"] in v_sync_track: sorted_selected_track_items.append(track_item_data) @@ -244,7 +253,7 @@ class CreateShotClip(resolve.Creator): sq_markers = self.timeline.GetMarkers() # create media bin for compound clips (trackItems) - mp_folder = resolve.create_bin(self.timeline.GetName()) + mp_folder = create_bin(self.timeline.GetName()) kwargs = { "ui_inputs": widget.result, @@ -253,11 +262,11 @@ class CreateShotClip(resolve.Creator): "sq_frame_start": sq_frame_start, "sq_markers": sq_markers } - + print(kwargs) for i, track_item_data in enumerate(sorted_selected_track_items): self.rename_index = i - + self.log.info(track_item_data) # convert track item to timeline media pool item - track_item = resolve.PublishClip( + track_item = plugin.PublishClip( self, track_item_data, **kwargs).convert() track_item.SetClipColor(lib.publish_clip_color) diff --git a/openpype/hosts/resolve/plugins/load/load_clip.py b/openpype/hosts/resolve/plugins/load/load_clip.py index 71850d95f6..a0c78c182f 100644 --- a/openpype/hosts/resolve/plugins/load/load_clip.py +++ b/openpype/hosts/resolve/plugins/load/load_clip.py @@ -1,15 +1,22 @@ from copy import deepcopy -from importlib import reload -from avalon import io -from openpype.hosts import resolve -from openpype.pipeline import get_representation_path +from openpype.client import ( + get_version_by_id, + get_last_version_by_subset_id, +) +# from openpype.hosts import resolve +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) from openpype.hosts.resolve.api import lib, plugin -reload(plugin) -reload(lib) +from openpype.hosts.resolve.api.pipeline import ( + containerise, + update_container, +) -class LoadClip(resolve.TimelineItemLoader): +class LoadClip(plugin.TimelineItemLoader): """Load a subset to timeline as clip Place clip to timeline on its asset origin timings collected @@ -17,7 +24,7 @@ class LoadClip(resolve.TimelineItemLoader): """ families = ["render2d", "source", "plate", "render", "review"] - representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264", ".mov"] + representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264", "mov"] label = "Load as clip" order = -10 @@ -40,7 +47,7 @@ class LoadClip(resolve.TimelineItemLoader): }) # load clip to timeline and get main variables - timeline_item = resolve.ClipLoader( + timeline_item = plugin.ClipLoader( self, context, **options).load() namespace = namespace or timeline_item.GetName() version = context['version'] @@ -74,7 +81,7 @@ class LoadClip(resolve.TimelineItemLoader): self.log.info("Loader done: `{}`".format(name)) - return resolve.containerise( + return containerise( timeline_item, name, namespace, context, self.__class__.__name__, @@ -92,12 +99,10 @@ class LoadClip(resolve.TimelineItemLoader): context.update({"representation": representation}) name = container['name'] namespace = container['namespace'] - timeline_item_data = resolve.get_pype_timeline_item_by_name(namespace) + timeline_item_data = lib.get_pype_timeline_item_by_name(namespace) timeline_item = timeline_item_data["clip"]["item"] - version = io.find_one({ - "type": "version", - "_id": representation["parent"] - }) + project_name = legacy_io.active_project() + version = get_version_by_id(project_name, representation["parent"]) version_data = version.get("data", {}) version_name = version.get("name", None) colorspace = version_data.get("colorspace", None) @@ -105,7 +110,7 @@ class LoadClip(resolve.TimelineItemLoader): self.fname = get_representation_path(representation) context["version"] = {"data": version_data} - loader = resolve.ClipLoader(self, context) + loader = plugin.ClipLoader(self, context) timeline_item = loader.update(timeline_item) # add additional metadata from the version to imprint Avalon knob @@ -132,23 +137,26 @@ class LoadClip(resolve.TimelineItemLoader): # update color of clip regarding the version order self.set_item_color(timeline_item, version) - return resolve.update_container(timeline_item, data_imprint) + return update_container(timeline_item, data_imprint) @classmethod def set_item_color(cls, timeline_item, version): - # define version name version_name = version.get("name", None) # get all versions in list - versions = io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) + project_name = legacy_io.active_project() + last_version_doc = get_last_version_by_subset_id( + project_name, + version["parent"], + fields=["name"] + ) + if last_version_doc: + last_version = last_version_doc["name"] + else: + last_version = None # set clip colour - if version_name == max_version: + if version_name == last_version: timeline_item.SetClipColor(cls.clip_color_last) else: timeline_item.SetClipColor(cls.clip_color) diff --git a/openpype/hosts/resolve/plugins/publish/extract_workfile.py b/openpype/hosts/resolve/plugins/publish/extract_workfile.py index e3d60465a2..535f879b58 100644 --- a/openpype/hosts/resolve/plugins/publish/extract_workfile.py +++ b/openpype/hosts/resolve/plugins/publish/extract_workfile.py @@ -1,10 +1,11 @@ import os import pyblish.api -import openpype.api -from openpype.hosts import resolve + +from openpype.pipeline import publish +from openpype.hosts.resolve.api.lib import get_project_manager -class ExtractWorkfile(openpype.api.Extractor): +class ExtractWorkfile(publish.Extractor): """ Extractor export DRP workfile file representation """ @@ -29,7 +30,7 @@ class ExtractWorkfile(openpype.api.Extractor): os.path.join(staging_dir, drp_file_name)) # write out the drp workfile - resolve.get_project_manager().ExportProject( + get_project_manager().ExportProject( project.GetName(), drp_file_path) # create drp workfile representation diff --git a/openpype/hosts/resolve/plugins/publish/precollect_instances.py b/openpype/hosts/resolve/plugins/publish/precollect_instances.py index 8f1a13a4e5..8ec169ad65 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_instances.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_instances.py @@ -1,9 +1,15 @@ -import pyblish -from openpype.hosts import resolve - -# # developer reload modules from pprint import pformat +import pyblish + +from openpype.hosts.resolve.api.lib import ( + get_current_timeline_items, + get_timeline_item_pype_tag, + publish_clip_color, + get_publish_attribute, + get_otio_clip_instance_data, +) + class PrecollectInstances(pyblish.api.ContextPlugin): """Collect all Track items selection.""" @@ -14,8 +20,8 @@ class PrecollectInstances(pyblish.api.ContextPlugin): def process(self, context): otio_timeline = context.data["otioTimeline"] - selected_timeline_items = resolve.get_current_timeline_items( - filter=True, selecting_color=resolve.publish_clip_color) + selected_timeline_items = get_current_timeline_items( + filter=True, selecting_color=publish_clip_color) self.log.info( "Processing enabled track items: {}".format( @@ -27,7 +33,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): timeline_item = timeline_item_data["clip"]["item"] # get pype tag data - tag_data = resolve.get_timeline_item_pype_tag(timeline_item) + tag_data = get_timeline_item_pype_tag(timeline_item) self.log.debug(f"__ tag_data: {pformat(tag_data)}") if not tag_data: @@ -67,14 +73,15 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "asset": asset, "item": timeline_item, "families": families, - "publish": resolve.get_publish_attribute(timeline_item), + "publish": get_publish_attribute(timeline_item), "fps": context.data["fps"], "handleStart": handle_start, - "handleEnd": handle_end + "handleEnd": handle_end, + "newAssetPublishing": True }) # otio clip data - otio_data = resolve.get_otio_clip_instance_data( + otio_data = get_otio_clip_instance_data( otio_timeline, timeline_item_data) or {} data.update(otio_data) @@ -133,7 +140,7 @@ class PrecollectInstances(pyblish.api.ContextPlugin): "asset": asset, "family": family, "families": [], - "publish": resolve.get_publish_attribute(timeline_item) + "publish": get_publish_attribute(timeline_item) }) context.create_instance(**data) diff --git a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py index 1333516177..0f94216556 100644 --- a/openpype/hosts/resolve/plugins/publish/precollect_workfile.py +++ b/openpype/hosts/resolve/plugins/publish/precollect_workfile.py @@ -1,12 +1,9 @@ import pyblish.api -from openpype.hosts import resolve -from avalon import api as avalon from pprint import pformat -# dev -from importlib import reload +from openpype.hosts.resolve import api as rapi +from openpype.pipeline import legacy_io from openpype.hosts.resolve.otio import davinci_export -reload(davinci_export) class PrecollectWorkfile(pyblish.api.ContextPlugin): @@ -17,11 +14,11 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): def process(self, context): - asset = avalon.Session["AVALON_ASSET"] + asset = legacy_io.Session["AVALON_ASSET"] subset = "workfile" - project = resolve.get_current_project() + project = rapi.get_current_project() fps = project.GetSetting("timelineFrameRate") - video_tracks = resolve.get_video_track_names() + video_tracks = rapi.get_video_track_names() # adding otio timeline to context otio_timeline = davinci_export.create_otio_timeline(project) @@ -31,7 +28,8 @@ class PrecollectWorkfile(pyblish.api.ContextPlugin): "asset": asset, "subset": "{}{}".format(asset, subset.capitalize()), "item": project, - "family": "workfile" + "family": "workfile", + "families": [] } # create instance with workfile diff --git a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py index ac66916b91..8f3917bece 100644 --- a/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py +++ b/openpype/hosts/resolve/utility_scripts/OpenPype_sync_util_scripts.py @@ -1,14 +1,16 @@ #!/usr/bin/env python import os import sys -import openpype + +from openpype.pipeline import install_host def main(env): - import openpype.hosts.resolve as bmdvr + from openpype.hosts.resolve.utils import setup + import openpype.hosts.resolve.api as bmdvr # Registers openpype's Global pyblish plugins - openpype.install() - bmdvr.setup(env) + install_host(bmdvr) + setup(env) if __name__ == "__main__": diff --git a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py index b0cef1838a..1087a7b7a0 100644 --- a/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py +++ b/openpype/hosts/resolve/utility_scripts/__OpenPype__Menu__.py @@ -1,22 +1,17 @@ import os import sys -import avalon.api as avalon -import openpype -from openpype.api import Logger +from openpype.pipeline import install_host +from openpype.lib import Logger -log = Logger().get_logger(__name__) +log = Logger.get_logger(__name__) def main(env): - import openpype.hosts.resolve as bmdvr - # Registers openpype's Global pyblish plugins - openpype.install() + import openpype.hosts.resolve.api as bmdvr # activate resolve from openpype - avalon.install(bmdvr) - - log.info(f"Avalon registered hosts: {avalon.registered_host()}") + install_host(bmdvr) bmdvr.launch_pype_menu() diff --git a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py index 5430ad32df..92f2e43a72 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py +++ b/openpype/hosts/resolve/utility_scripts/tests/test_otio_as_edl.py @@ -1,11 +1,13 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype + import opentimelineio as otio -from openpype.hosts.resolve import TestGUI -import openpype.hosts.resolve as bmdvr + +from openpype.pipeline import install_host + +import openpype.hosts.resolve.api as bmdvr +from openpype.hosts.resolve.api.testing_utils import TestGUI from openpype.hosts.resolve.otio import davinci_export as otio_export @@ -14,10 +16,8 @@ class ThisTestGUI(TestGUI): def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py index afa311e0b8..91a361ec08 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_create_timeline_item_from_path.py @@ -1,22 +1,25 @@ #! python3 import os import sys -import avalon.api as avalon -import openpype -from openpype.hosts.resolve import TestGUI -import openpype.hosts.resolve as bmdvr + import clique +from openpype.pipeline import install_host +from openpype.hosts.resolve.api.testing_utils import TestGUI +import openpype.hosts.resolve.api as bmdvr +from openpype.hosts.resolve.api.lib import ( + create_media_pool_item, + create_timeline_item, +) + class ThisTestGUI(TestGUI): extensions = [".exr", ".jpg", ".mov", ".png", ".mp4", ".ari", ".arx"] def __init__(self): super(ThisTestGUI, self).__init__() - # Registers openpype's Global pyblish plugins - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) def _open_dir_button_pressed(self, event): # selected_path = self.fu.RequestFile(os.path.expanduser("~")) @@ -57,10 +60,10 @@ class ThisTestGUI(TestGUI): # skip if unwanted extension if ext not in self.extensions: return - media_pool_item = bmdvr.create_media_pool_item(fpath) + media_pool_item = create_media_pool_item(fpath) print(media_pool_item) - track_item = bmdvr.create_timeline_item(media_pool_item) + track_item = create_timeline_item(media_pool_item) print(track_item) diff --git a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py index cfdbe890e5..2e83188bde 100644 --- a/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py +++ b/openpype/hosts/resolve/utility_scripts/tests/testing_load_media_pool_item.py @@ -1,22 +1,24 @@ #! python3 -import avalon.api as avalon -import openpype -import openpype.hosts.resolve as bmdvr +from openpype.pipeline import install_host +from openpype.hosts.resolve import api as bmdvr +from openpype.hosts.resolve.api.lib import ( + create_media_pool_item, + create_timeline_item, +) def file_processing(fpath): - media_pool_item = bmdvr.create_media_pool_item(fpath) + media_pool_item = create_media_pool_item(fpath) print(media_pool_item) - track_item = bmdvr.create_timeline_item(media_pool_item) + track_item = create_timeline_item(media_pool_item) print(track_item) if __name__ == "__main__": path = "C:/CODE/__openpype_projects/jtest03dev/shots/sq01/mainsq01sh030/publish/plate/plateMain/v006/jt3d_mainsq01sh030_plateMain_v006.0996.exr" - openpype.install() # activate resolve from openpype - avalon.install(bmdvr) + install_host(bmdvr) - file_processing(path) \ No newline at end of file + file_processing(path) diff --git a/openpype/hosts/resolve/utils.py b/openpype/hosts/resolve/utils.py new file mode 100644 index 0000000000..5881f153ae --- /dev/null +++ b/openpype/hosts/resolve/utils.py @@ -0,0 +1,55 @@ +import os +import shutil +from openpype.lib import Logger + +RESOLVE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def setup(env): + log = Logger.get_logger("ResolveSetup") + scripts = {} + us_env = env.get("RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR") + us_dir = env["RESOLVE_UTILITY_SCRIPTS_DIR"] + + us_paths = [os.path.join( + RESOLVE_ROOT_DIR, + "utility_scripts" + )] + + # collect script dirs + if us_env: + log.info("Utility Scripts Env: `{}`".format(us_env)) + us_paths = us_env.split( + os.pathsep) + us_paths + + # collect scripts from dirs + for path in us_paths: + scripts.update({path: os.listdir(path)}) + + log.info("Utility Scripts Dir: `{}`".format(us_paths)) + log.info("Utility Scripts: `{}`".format(scripts)) + + # make sure no script file is in folder + for s in os.listdir(us_dir): + path = os.path.join(us_dir, s) + log.info("Removing `{}`...".format(path)) + if os.path.isdir(path): + shutil.rmtree(path, onerror=None) + else: + os.remove(path) + + # copy scripts into Resolve's utility scripts dir + for d, sl in scripts.items(): + # directory and scripts list + for s in sl: + # script in script list + src = os.path.join(d, s) + dst = os.path.join(us_dir, s) + log.info("Copying `{}` to `{}`...".format(src, dst)) + if os.path.isdir(src): + shutil.copytree( + src, dst, symlinks=False, + ignore=None, ignore_dangling_symlinks=False + ) + else: + shutil.copy2(src, dst) diff --git a/openpype/hosts/standalonepublisher/__init__.py b/openpype/hosts/standalonepublisher/__init__.py index e69de29bb2..f47fa6b573 100644 --- a/openpype/hosts/standalonepublisher/__init__.py +++ b/openpype/hosts/standalonepublisher/__init__.py @@ -0,0 +1,6 @@ +from .addon import StandAlonePublishAddon + + +__all__ = ( + "StandAlonePublishAddon", +) diff --git a/openpype/hosts/standalonepublisher/addon.py b/openpype/hosts/standalonepublisher/addon.py new file mode 100644 index 0000000000..67204b581b --- /dev/null +++ b/openpype/hosts/standalonepublisher/addon.py @@ -0,0 +1,56 @@ +import os + +import click + +from openpype.lib import get_openpype_execute_args +from openpype.lib.execute import run_detached_process +from openpype.modules import OpenPypeModule, ITrayAction, IHostAddon + +STANDALONEPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class StandAlonePublishAddon(OpenPypeModule, ITrayAction, IHostAddon): + label = "Publisher (legacy)" + name = "standalonepublisher" + host_name = "standalonepublisher" + + def initialize(self, modules_settings): + self.enabled = modules_settings["standalonepublish_tool"]["enabled"] + self.publish_paths = [ + os.path.join(STANDALONEPUBLISH_ROOT_DIR, "plugins", "publish") + ] + + def tray_init(self): + return + + def on_action_trigger(self): + self.run_standalone_publisher() + + def connect_with_modules(self, enabled_modules): + """Collect publish paths from other modules.""" + + publish_paths = self.manager.collect_plugin_paths()["publish"] + self.publish_paths.extend(publish_paths) + + def run_standalone_publisher(self): + args = get_openpype_execute_args("module", self.name, "launch") + run_detached_process(args) + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group( + StandAlonePublishAddon.name, + help="StandalonePublisher related commands.") +def cli_main(): + pass + + +@cli_main.command() +def launch(): + """Launch StandalonePublisher tool UI.""" + + from openpype.tools import standalonepublish + + standalonepublish.main() diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py new file mode 100644 index 0000000000..857f3dca20 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_app_name.py @@ -0,0 +1,13 @@ +import pyblish.api + + +class CollectSAAppName(pyblish.api.ContextPlugin): + """Collect app name and label.""" + + label = "Collect App Name/Label" + order = pyblish.api.CollectorOrder - 0.5 + hosts = ["standalonepublisher"] + + def process(self, context): + context.data["appName"] = "standalone publisher" + context.data["appLabel"] = "Standalone publisher" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py deleted file mode 100644 index 4ca1f72cc4..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_batch_instances.py +++ /dev/null @@ -1,70 +0,0 @@ -import copy -import pyblish.api -from pprint import pformat - - -class CollectBatchInstances(pyblish.api.InstancePlugin): - """Collect all available instances for batch publish.""" - - label = "Collect Batch Instances" - order = pyblish.api.CollectorOrder + 0.489 - hosts = ["standalonepublisher"] - families = ["background_batch"] - - # presets - default_subset_task = { - "background_batch": "background" - } - subsets = { - "background_batch": { - "backgroundLayout": { - "task": "background", - "family": "backgroundLayout" - }, - "backgroundComp": { - "task": "background", - "family": "backgroundComp" - }, - "workfileBackground": { - "task": "background", - "family": "workfile" - } - } - } - unchecked_by_default = [] - - def process(self, instance): - context = instance.context - asset_name = instance.data["asset"] - family = instance.data["family"] - - default_task_name = self.default_subset_task.get(family) - for subset_name, subset_data in self.subsets[family].items(): - instance_name = f"{asset_name}_{subset_name}" - task_name = subset_data.get("task") or default_task_name - - # create new instance - new_instance = context.create_instance(instance_name) - - # add original instance data except name key - for key, value in instance.data.items(): - if key not in ["name"]: - # Make sure value is copy since value may be object which - # can be shared across all new created objects - new_instance.data[key] = copy.deepcopy(value) - - # add subset data from preset - new_instance.data.update(subset_data) - - new_instance.data["label"] = instance_name - new_instance.data["subset"] = subset_name - new_instance.data["task"] = task_name - - if subset_name in self.unchecked_by_default: - new_instance.data["publish"] = False - - self.log.info(f"Created new instance: {instance_name}") - self.log.debug(f"_ inst_data: {pformat(new_instance.data)}") - - # delete original instance - context.remove(instance) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py index 9f075d66cf..7925b0ecf3 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py @@ -2,8 +2,8 @@ import copy import json import pyblish.api -from avalon import io -from openpype.lib import get_subset_name_with_asset_doc +from openpype.client import get_asset_by_name +from openpype.pipeline.create import get_subset_name class CollectBulkMovInstances(pyblish.api.InstancePlugin): @@ -24,12 +24,9 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): def process(self, instance): context = instance.context + project_name = context.data["projectEntity"]["name"] asset_name = instance.data["asset"] - - asset_doc = io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) if not asset_doc: raise AssertionError(( "Couldn't find Asset document with name \"{}\"" @@ -47,12 +44,14 @@ class CollectBulkMovInstances(pyblish.api.InstancePlugin): task_name = available_task_names[_task_name_low] break - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( self.new_instance_family, self.subset_name_variant, task_name, asset_doc, - io.Session["AVALON_PROJECT"] + project_name, + host_name=context.data["hostName"], + project_settings=context.data["project_settings"] ) instance_name = f"{asset_name}_{subset_name}" diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py index 6913e0836d..2bf3917e2f 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_context.py @@ -19,7 +19,8 @@ import copy from pprint import pformat import clique import pyblish.api -from avalon import io + +from openpype.pipeline import legacy_io class CollectContextDataSAPublish(pyblish.api.ContextPlugin): @@ -37,7 +38,7 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): def process(self, context): # get json paths from os and load them - io.install() + legacy_io.install() # get json file context input_json_path = os.environ.get("SAPUBLISH_INPATH") @@ -247,7 +248,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) - instance.data["fps"] = int(component["fps"]) + if component.get("fps"): + instance.data["fps"] = int(component["fps"]) ext = component["ext"] if ext.startswith("."): diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py index 0a1d29ccdc..8633d4bf9d 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial.py @@ -19,6 +19,7 @@ import os import opentimelineio as otio import pyblish.api from openpype import lib as plib +from openpype.pipeline.context_tools import get_current_project_asset class OTIO_View(pyblish.api.Action): @@ -116,7 +117,7 @@ class CollectEditorial(pyblish.api.InstancePlugin): if extension == ".edl": # EDL has no frame rate embedded so needs explicit # frame rate else 24 is asssumed. - kwargs["rate"] = plib.get_asset()["data"]["fps"] + kwargs["rate"] = get_current_project_asset()["data"]["fps"] instance.data["otio_timeline"] = otio.adapters.read_from_file( file_path, **kwargs) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py index d0d36bb717..75c260bad7 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_editorial_instances.py @@ -1,8 +1,12 @@ import os +from copy import deepcopy + import opentimelineio as otio import pyblish.api + from openpype import lib as plib -from copy import deepcopy +from openpype.pipeline.context_tools import get_current_project_asset + class CollectInstances(pyblish.api.InstancePlugin): """Collect instances from editorial's OTIO sequence""" @@ -48,7 +52,7 @@ class CollectInstances(pyblish.api.InstancePlugin): # get timeline otio data timeline = instance.data["otio_timeline"] - fps = plib.get_asset()["data"]["fps"] + fps = get_current_project_asset()["data"]["fps"] tracks = timeline.each_child( descended_from_type=otio.schema.Track @@ -166,7 +170,8 @@ class CollectInstances(pyblish.api.InstancePlugin): "frameStart": frame_start, "frameEnd": frame_end, "frameStartH": frame_start - handle_start, - "frameEndH": frame_end + handle_end + "frameEndH": frame_end + handle_end, + "newAssetPublishing": True } for data_key in instance_data_filter: diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py index b2735f3428..9109bf6726 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py @@ -1,8 +1,11 @@ -import pyblish.api -import re import os -from avalon import io +from pprint import pformat +import re from copy import deepcopy +import pyblish.api + +from openpype.client import get_asset_by_id + class CollectHierarchyInstance(pyblish.api.ContextPlugin): """Collecting hierarchy context from `parents` and `hierarchy` data @@ -19,6 +22,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): families = ["shot"] # presets + shot_rename = True shot_rename_template = None shot_rename_search_patterns = None shot_add_hierarchy = None @@ -44,7 +48,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): parent_name = instance.context.data["assetEntity"]["name"] clip = instance.data["item"] clip_name = os.path.splitext(clip.name)[0].lower() - if self.shot_rename_search_patterns: + if self.shot_rename_search_patterns and self.shot_rename: search_text += parent_name + clip_name instance.data["anatomyData"].update({"clip_name": clip_name}) for type, pattern in self.shot_rename_search_patterns.items(): @@ -54,33 +58,38 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): continue instance.data["anatomyData"][type] = match[-1] - # format to new shot name - instance.data["asset"] = self.shot_rename_template.format( - **instance.data["anatomyData"]) + # format to new shot name + instance.data["asset"] = self.shot_rename_template.format( + **instance.data["anatomyData"]) def create_hierarchy(self, instance): - parents = list() - hierarchy = list() - visual_hierarchy = [instance.context.data["assetEntity"]] + asset_doc = instance.context.data["assetEntity"] + project_doc = instance.context.data["projectEntity"] + project_name = project_doc["name"] + visual_hierarchy = [asset_doc] + current_doc = asset_doc while True: - visual_parent = io.find_one( - {"_id": visual_hierarchy[-1]["data"]["visualParent"]} - ) - if visual_parent: - visual_hierarchy.append(visual_parent) - else: - visual_hierarchy.append( - instance.context.data["projectEntity"]) + visual_parent_id = current_doc["data"]["visualParent"] + visual_parent = None + if visual_parent_id: + visual_parent = get_asset_by_id(project_name, visual_parent_id) + + if not visual_parent: + visual_hierarchy.append(project_doc) break + visual_hierarchy.append(visual_parent) + current_doc = visual_parent # add current selection context hierarchy from standalonepublisher + parents = list() for entity in reversed(visual_hierarchy): parents.append({ "entity_type": entity["data"]["entityType"], "entity_name": entity["name"] }) - if self.shot_add_hierarchy: + hierarchy = list() + if self.shot_add_hierarchy.get("enabled"): parent_template_patern = re.compile(r"\{([a-z]*?)\}") # fill the parents parts from presets shot_add_hierarchy = self.shot_add_hierarchy.copy() @@ -124,12 +133,12 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): instance.data["parents"] = parents # print - self.log.debug(f"Hierarchy: {hierarchy}") - self.log.debug(f"parents: {parents}") + self.log.warning(f"Hierarchy: {hierarchy}") + self.log.info(f"parents: {parents}") + tasks_to_add = dict() if self.shot_add_tasks: - tasks_to_add = dict() - project_tasks = io.find_one({"type": "project"})["config"]["tasks"] + project_tasks = project_doc["config"]["tasks"] for task_name, task_data in self.shot_add_tasks.items(): _task_data = deepcopy(task_data) @@ -147,9 +156,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): task_name, list(project_tasks.keys()))) - instance.data["tasks"] = tasks_to_add - else: - instance.data["tasks"] = dict() + instance.data["tasks"] = tasks_to_add # updating hierarchy data instance.data["anatomyData"].update({ @@ -158,6 +165,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): }) def process(self, context): + self.log.info("self.shot_add_hierarchy: {}".format( + pformat(self.shot_add_hierarchy) + )) for instance in context: if instance.data["family"] in self.families: self.processing_instance(instance) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py index 0d629b1b44..82d7247b2b 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py @@ -2,9 +2,10 @@ import os import re import collections import pyblish.api -from avalon import io from pprint import pformat +from openpype.client import get_assets + class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): """ @@ -118,8 +119,9 @@ class CollectMatchingAssetToInstance(pyblish.api.InstancePlugin): def _asset_docs_by_parent_id(self, instance): # Query all assets for project and store them by parent's id to list + project_name = instance.context.data["projectEntity"]["name"] asset_docs_by_parent_id = collections.defaultdict(list) - for asset_doc in io.find({"type": "asset"}): + for asset_doc in get_assets(project_name): parent_id = asset_doc["data"]["visualParent"] asset_docs_by_parent_id[parent_id].append(asset_doc) return asset_docs_by_parent_id diff --git a/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py b/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py new file mode 100644 index 0000000000..b83a924d33 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/collect_original_basename.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +"""Collect original base name for use in templates.""" +from pathlib import Path + +import pyblish.api + + +class CollectOriginalBasename(pyblish.api.InstancePlugin): + """Collect original file base name.""" + + order = pyblish.api.CollectorOrder + 0.498 + label = "Collect Base Name" + hosts = ["standalonepublisher"] + families = ["simpleUnrealTexture"] + + def process(self, instance): + file_name = Path(instance.data["representations"][0]["files"]) + instance.data["originalBasename"] = file_name.stem diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py deleted file mode 100644 index f07499c15d..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_for_compositing.py +++ /dev/null @@ -1,242 +0,0 @@ -import os -import json -import copy -import openpype.api -from avalon import io - -PSDImage = None - - -class ExtractBGForComp(openpype.api.Extractor): - label = "Extract Background for Compositing" - families = ["backgroundComp"] - hosts = ["standalonepublisher"] - - new_instance_family = "background" - - # Presetable - allowed_group_names = [ - "OL", "BG", "MG", "FG", "SB", "UL", "SKY", "Field Guide", "Field_Guide", - "ANIM" - ] - - def process(self, instance): - # Check if python module `psd_tools` is installed - try: - global PSDImage - from psd_tools import PSDImage - except Exception: - raise AssertionError( - "BUG: Python module `psd-tools` is not installed!" - ) - - self.allowed_group_names = [ - name.lower() - for name in self.allowed_group_names - ] - - self.redo_global_plugins(instance) - - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - if not instance.data.get("transfers"): - instance.data["transfers"] = [] - - # Prepare staging dir - staging_dir = self.staging_dir(instance) - if not os.path.exists(staging_dir): - os.makedirs(staging_dir) - - for repre in tuple(repres): - # Skip all files without .psd extension - repre_ext = repre["ext"].lower() - if repre_ext.startswith("."): - repre_ext = repre_ext[1:] - - if repre_ext != "psd": - continue - - # Prepare publish dir for transfers - publish_dir = instance.data["publishDir"] - - # Prepare json filepath where extracted metadata are stored - json_filename = "{}.json".format(instance.name) - json_full_path = os.path.join(staging_dir, json_filename) - - self.log.debug(f"`staging_dir` is \"{staging_dir}\"") - - # Prepare new repre data - new_repre = { - "name": "json", - "ext": "json", - "files": json_filename, - "stagingDir": staging_dir - } - - # TODO add check of list - psd_filename = repre["files"] - psd_folder_path = repre["stagingDir"] - psd_filepath = os.path.join(psd_folder_path, psd_filename) - self.log.debug(f"psd_filepath: \"{psd_filepath}\"") - psd_object = PSDImage.open(psd_filepath) - - json_data, transfers = self.export_compositing_images( - psd_object, staging_dir, publish_dir - ) - self.log.info("Json file path: {}".format(json_full_path)) - with open(json_full_path, "w") as json_filestream: - json.dump(json_data, json_filestream, indent=4) - - instance.data["transfers"].extend(transfers) - instance.data["representations"].remove(repre) - instance.data["representations"].append(new_repre) - - def export_compositing_images(self, psd_object, output_dir, publish_dir): - json_data = { - "__schema_version__": 1, - "children": [] - } - transfers = [] - for main_idx, main_layer in enumerate(psd_object): - if ( - not main_layer.is_visible() - or main_layer.name.lower() not in self.allowed_group_names - or not main_layer.is_group - ): - continue - - export_layers = [] - layers_idx = 0 - for layer in main_layer: - # TODO this way may be added also layers next to "ADJ" - if layer.name.lower() == "adj": - for _layer in layer: - export_layers.append((layers_idx, _layer)) - layers_idx += 1 - - else: - export_layers.append((layers_idx, layer)) - layers_idx += 1 - - if not export_layers: - continue - - main_layer_data = { - "index": main_idx, - "name": main_layer.name, - "children": [] - } - - for layer_idx, layer in export_layers: - has_size = layer.width > 0 and layer.height > 0 - if not has_size: - self.log.debug(( - "Skipping layer \"{}\" because does " - "not have any content." - ).format(layer.name)) - continue - - main_layer_name = main_layer.name.replace(" ", "_") - layer_name = layer.name.replace(" ", "_") - - filename = "{:0>2}_{}_{:0>2}_{}.png".format( - main_idx + 1, main_layer_name, layer_idx + 1, layer_name - ) - layer_data = { - "index": layer_idx, - "name": layer.name, - "filename": filename - } - output_filepath = os.path.join(output_dir, filename) - dst_filepath = os.path.join(publish_dir, filename) - transfers.append((output_filepath, dst_filepath)) - - pil_object = layer.composite(viewport=psd_object.viewbox) - pil_object.save(output_filepath, "PNG") - - main_layer_data["children"].append(layer_data) - - if main_layer_data["children"]: - json_data["children"].append(main_layer_data) - - return json_data, transfers - - def redo_global_plugins(self, instance): - # TODO do this in collection phase - # Copy `families` and check if `family` is not in current families - families = instance.data.get("families") or list() - if families: - families = list(set(families)) - - if self.new_instance_family in families: - families.remove(self.new_instance_family) - - self.log.debug( - "Setting new instance families {}".format(str(families)) - ) - instance.data["families"] = families - - # Override instance data with new information - instance.data["family"] = self.new_instance_family - - subset_name = instance.data["anatomyData"]["subset"] - asset_doc = instance.data["assetEntity"] - latest_version = self.find_last_version(subset_name, asset_doc) - version_number = 1 - if latest_version is not None: - version_number += latest_version - - instance.data["latestVersion"] = latest_version - instance.data["version"] = version_number - - # Same data apply to anatomy data - instance.data["anatomyData"].update({ - "family": self.new_instance_family, - "version": version_number - }) - - # Redo publish and resources dir - anatomy = instance.context.data["anatomy"] - template_data = copy.deepcopy(instance.data["anatomyData"]) - template_data.update({ - "frame": "FRAME_TEMP", - "representation": "TEMP" - }) - anatomy_filled = anatomy.format(template_data) - if "folder" in anatomy.templates["publish"]: - publish_folder = anatomy_filled["publish"]["folder"] - else: - publish_folder = os.path.dirname(anatomy_filled["publish"]["path"]) - - publish_folder = os.path.normpath(publish_folder) - resources_folder = os.path.join(publish_folder, "resources") - - instance.data["publishDir"] = publish_folder - instance.data["resourcesDir"] = resources_folder - - self.log.debug("publishDir: \"{}\"".format(publish_folder)) - self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) - - def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_doc["_id"] - }) - - if subset_doc is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_doc = io.find_one( - { - "type": "version", - "parent": subset_doc["_id"] - }, - sort=[("name", -1)] - ) - if version_doc: - return int(version_doc["name"]) - return None diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py deleted file mode 100644 index 2c92366ae9..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_bg_main_groups.py +++ /dev/null @@ -1,246 +0,0 @@ -import os -import copy -import json -import openpype.api -import pyblish.api -from avalon import io - -PSDImage = None - - -class ExtractBGMainGroups(openpype.api.Extractor): - label = "Extract Background Layout" - order = pyblish.api.ExtractorOrder + 0.02 - families = ["backgroundLayout"] - hosts = ["standalonepublisher"] - - new_instance_family = "background" - - # Presetable - allowed_group_names = [ - "OL", "BG", "MG", "FG", "UL", "SB", "SKY", "Field Guide", "Field_Guide", - "ANIM" - ] - - def process(self, instance): - # Check if python module `psd_tools` is installed - try: - global PSDImage - from psd_tools import PSDImage - except Exception: - raise AssertionError( - "BUG: Python module `psd-tools` is not installed!" - ) - - self.allowed_group_names = [ - name.lower() - for name in self.allowed_group_names - ] - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - self.redo_global_plugins(instance) - - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - if not instance.data.get("transfers"): - instance.data["transfers"] = [] - - # Prepare staging dir - staging_dir = self.staging_dir(instance) - if not os.path.exists(staging_dir): - os.makedirs(staging_dir) - - # Prepare publish dir for transfers - publish_dir = instance.data["publishDir"] - - for repre in tuple(repres): - # Skip all files without .psd extension - repre_ext = repre["ext"].lower() - if repre_ext.startswith("."): - repre_ext = repre_ext[1:] - - if repre_ext != "psd": - continue - - # Prepare json filepath where extracted metadata are stored - json_filename = "{}.json".format(instance.name) - json_full_path = os.path.join(staging_dir, json_filename) - - self.log.debug(f"`staging_dir` is \"{staging_dir}\"") - - # Prepare new repre data - new_repre = { - "name": "json", - "ext": "json", - "files": json_filename, - "stagingDir": staging_dir - } - - # TODO add check of list - psd_filename = repre["files"] - psd_folder_path = repre["stagingDir"] - psd_filepath = os.path.join(psd_folder_path, psd_filename) - self.log.debug(f"psd_filepath: \"{psd_filepath}\"") - psd_object = PSDImage.open(psd_filepath) - - json_data, transfers = self.export_compositing_images( - psd_object, staging_dir, publish_dir - ) - self.log.info("Json file path: {}".format(json_full_path)) - with open(json_full_path, "w") as json_filestream: - json.dump(json_data, json_filestream, indent=4) - - instance.data["transfers"].extend(transfers) - instance.data["representations"].remove(repre) - instance.data["representations"].append(new_repre) - - def export_compositing_images(self, psd_object, output_dir, publish_dir): - json_data = { - "__schema_version__": 1, - "children": [] - } - output_ext = ".png" - - to_export = [] - for layer_idx, layer in enumerate(psd_object): - layer_name = layer.name.replace(" ", "_") - if ( - not layer.is_visible() - or layer_name.lower() not in self.allowed_group_names - ): - continue - - has_size = layer.width > 0 and layer.height > 0 - if not has_size: - self.log.debug(( - "Skipping layer \"{}\" because does not have any content." - ).format(layer.name)) - continue - - filebase = "{:0>2}_{}".format(layer_idx, layer_name) - if layer_name.lower() == "anim": - if not layer.is_group: - self.log.warning("ANIM layer is not a group layer.") - continue - - children = [] - for anim_idx, anim_layer in enumerate(layer): - anim_layer_name = anim_layer.name.replace(" ", "_") - filename = "{}_{:0>2}_{}{}".format( - filebase, anim_idx, anim_layer_name, output_ext - ) - children.append({ - "index": anim_idx, - "name": anim_layer.name, - "filename": filename - }) - to_export.append((anim_layer, filename)) - - json_data["children"].append({ - "index": layer_idx, - "name": layer.name, - "children": children - }) - continue - - filename = filebase + output_ext - json_data["children"].append({ - "index": layer_idx, - "name": layer.name, - "filename": filename - }) - to_export.append((layer, filename)) - - transfers = [] - for layer, filename in to_export: - output_filepath = os.path.join(output_dir, filename) - dst_filepath = os.path.join(publish_dir, filename) - transfers.append((output_filepath, dst_filepath)) - - pil_object = layer.composite(viewport=psd_object.viewbox) - pil_object.save(output_filepath, "PNG") - - return json_data, transfers - - def redo_global_plugins(self, instance): - # TODO do this in collection phase - # Copy `families` and check if `family` is not in current families - families = instance.data.get("families") or list() - if families: - families = list(set(families)) - - if self.new_instance_family in families: - families.remove(self.new_instance_family) - - self.log.debug( - "Setting new instance families {}".format(str(families)) - ) - instance.data["families"] = families - - # Override instance data with new information - instance.data["family"] = self.new_instance_family - - subset_name = instance.data["anatomyData"]["subset"] - asset_doc = instance.data["assetEntity"] - latest_version = self.find_last_version(subset_name, asset_doc) - version_number = 1 - if latest_version is not None: - version_number += latest_version - - instance.data["latestVersion"] = latest_version - instance.data["version"] = version_number - - # Same data apply to anatomy data - instance.data["anatomyData"].update({ - "family": self.new_instance_family, - "version": version_number - }) - - # Redo publish and resources dir - anatomy = instance.context.data["anatomy"] - template_data = copy.deepcopy(instance.data["anatomyData"]) - template_data.update({ - "frame": "FRAME_TEMP", - "representation": "TEMP" - }) - anatomy_filled = anatomy.format(template_data) - if "folder" in anatomy.templates["publish"]: - publish_folder = anatomy_filled["publish"]["folder"] - else: - publish_folder = os.path.dirname(anatomy_filled["publish"]["path"]) - - publish_folder = os.path.normpath(publish_folder) - resources_folder = os.path.join(publish_folder, "resources") - - instance.data["publishDir"] = publish_folder - instance.data["resourcesDir"] = resources_folder - - self.log.debug("publishDir: \"{}\"".format(publish_folder)) - self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) - - def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_doc["_id"] - }) - - if subset_doc is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_doc = io.find_one( - { - "type": "version", - "parent": subset_doc["_id"] - }, - sort=[("name", -1)] - ) - if version_doc: - return int(version_doc["name"]) - return None diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py deleted file mode 100644 index e3094b2e3f..0000000000 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_images_from_psd.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import copy -import openpype.api -import pyblish.api -from avalon import io - -PSDImage = None - - -class ExtractImagesFromPSD(openpype.api.Extractor): - # PLUGIN is not currently enabled because was decided to use different - # approach - enabled = False - active = False - label = "Extract Images from PSD" - order = pyblish.api.ExtractorOrder + 0.02 - families = ["backgroundLayout"] - hosts = ["standalonepublisher"] - - new_instance_family = "image" - ignored_instance_data_keys = ("name", "label", "stagingDir", "version") - # Presetable - allowed_group_names = [ - "OL", "BG", "MG", "FG", "UL", "SKY", "Field Guide", "Field_Guide", - "ANIM" - ] - - def process(self, instance): - # Check if python module `psd_tools` is installed - try: - global PSDImage - from psd_tools import PSDImage - except Exception: - raise AssertionError( - "BUG: Python module `psd-tools` is not installed!" - ) - - self.allowed_group_names = [ - name.lower() - for name in self.allowed_group_names - ] - repres = instance.data.get("representations") - if not repres: - self.log.info("There are no representations on instance.") - return - - for repre in tuple(repres): - # Skip all files without .psd extension - repre_ext = repre["ext"].lower() - if repre_ext.startswith("."): - repre_ext = repre_ext[1:] - - if repre_ext != "psd": - continue - - # TODO add check of list of "files" value - psd_filename = repre["files"] - psd_folder_path = repre["stagingDir"] - psd_filepath = os.path.join(psd_folder_path, psd_filename) - self.log.debug(f"psd_filepath: \"{psd_filepath}\"") - psd_object = PSDImage.open(psd_filepath) - - self.create_new_instances(instance, psd_object) - - # Remove the instance from context - instance.context.remove(instance) - - def create_new_instances(self, instance, psd_object): - asset_doc = instance.data["assetEntity"] - for layer in psd_object: - if ( - not layer.is_visible() - or layer.name.lower() not in self.allowed_group_names - ): - continue - - has_size = layer.width > 0 and layer.height > 0 - if not has_size: - self.log.debug(( - "Skipping layer \"{}\" because does " - "not have any content." - ).format(layer.name)) - continue - - layer_name = layer.name.replace(" ", "_") - instance_name = subset_name = f"image{layer_name}" - self.log.info( - f"Creating new instance with name \"{instance_name}\"" - ) - new_instance = instance.context.create_instance(instance_name) - for key, value in instance.data.items(): - if key not in self.ignored_instance_data_keys: - new_instance.data[key] = copy.deepcopy(value) - - new_instance.data["label"] = " ".join( - (new_instance.data["asset"], instance_name) - ) - - # Find latest version - latest_version = self.find_last_version(subset_name, asset_doc) - version_number = 1 - if latest_version is not None: - version_number += latest_version - - self.log.info( - "Next version of instance \"{}\" will be {}".format( - instance_name, version_number - ) - ) - - # Set family and subset - new_instance.data["family"] = self.new_instance_family - new_instance.data["subset"] = subset_name - new_instance.data["version"] = version_number - new_instance.data["latestVersion"] = latest_version - - new_instance.data["anatomyData"].update({ - "subset": subset_name, - "family": self.new_instance_family, - "version": version_number - }) - - # Copy `families` and check if `family` is not in current families - families = new_instance.data.get("families") or list() - if families: - families = list(set(families)) - - if self.new_instance_family in families: - families.remove(self.new_instance_family) - new_instance.data["families"] = families - - # Prepare staging dir for new instance - staging_dir = self.staging_dir(new_instance) - - output_filename = "{}.png".format(layer_name) - output_filepath = os.path.join(staging_dir, output_filename) - pil_object = layer.composite(viewport=psd_object.viewbox) - pil_object.save(output_filepath, "PNG") - - new_repre = { - "name": "png", - "ext": "png", - "files": output_filename, - "stagingDir": staging_dir - } - self.log.debug( - "Creating new representation: {}".format(new_repre) - ) - new_instance.data["representations"] = [new_repre] - - def find_last_version(self, subset_name, asset_doc): - subset_doc = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_doc["_id"] - }) - - if subset_doc is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_doc = io.find_one( - { - "type": "version", - "parent": subset_doc["_id"] - }, - sort=[("name", -1)] - ) - if version_doc: - return int(version_doc["name"]) - return None diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py index 23f0b104c8..9f02d65d00 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/extract_thumbnail.py @@ -1,8 +1,12 @@ import os import tempfile import pyblish.api -import openpype.api -import openpype.lib +from openpype.lib import ( + get_ffmpeg_tool_path, + get_ffprobe_streams, + path_to_subprocess_arg, + run_subprocess, +) class ExtractThumbnailSP(pyblish.api.InstancePlugin): @@ -34,85 +38,78 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): if not thumbnail_repre: return + thumbnail_repre.pop("thumbnail") files = thumbnail_repre.get("files") if not files: return if isinstance(files, list): - files_len = len(files) - file = str(files[0]) + first_filename = str(files[0]) else: - files_len = 1 - file = files + first_filename = files staging_dir = None - is_jpeg = False - if file.endswith(".jpeg") or file.endswith(".jpg"): - is_jpeg = True - if is_jpeg and files_len == 1: - # skip if already is single jpeg file - return + # Convert to jpeg if not yet + full_input_path = os.path.join( + thumbnail_repre["stagingDir"], first_filename + ) + self.log.info("input {}".format(full_input_path)) + with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp: + full_thumbnail_path = tmp.name - elif is_jpeg: - # use first frame as thumbnail if is sequence of jpegs - full_thumbnail_path = os.path.join( - thumbnail_repre["stagingDir"], file - ) - self.log.info( - "For thumbnail is used file: {}".format(full_thumbnail_path) - ) + self.log.info("output {}".format(full_thumbnail_path)) - else: - # Convert to jpeg if not yet - full_input_path = os.path.join(thumbnail_repre["stagingDir"], file) - self.log.info("input {}".format(full_input_path)) + instance.context.data["cleanupFullPaths"].append(full_thumbnail_path) - full_thumbnail_path = tempfile.mkstemp(suffix=".jpg")[1] - self.log.info("output {}".format(full_thumbnail_path)) + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_args = self.ffmpeg_args or {} - ffmpeg_args = self.ffmpeg_args or {} + jpeg_items = [ + path_to_subprocess_arg(ffmpeg_path), + # override file if already exists + "-y" + ] - jpeg_items = [ - "\"{}\"".format(ffmpeg_path), - # override file if already exists - "-y" - ] - - # add input filters from peresets - jpeg_items.extend(ffmpeg_args.get("input") or []) - # input file - jpeg_items.append("-i \"{}\"".format(full_input_path)) + # add input filters from peresets + jpeg_items.extend(ffmpeg_args.get("input") or []) + # input file + jpeg_items.extend([ + "-i", path_to_subprocess_arg(full_input_path), # extract only single file - jpeg_items.append("-frames:v 1") + "-frames:v", "1", # Add black background for transparent images - jpeg_items.append(( - "-filter_complex" - " \"color=black,format=rgb24[c]" + "-filter_complex", ( + "\"color=black,format=rgb24[c]" ";[c][0]scale2ref[c][i]" ";[c][i]overlay=format=auto:shortest=1,setsar=1\"" - )) + ), + ]) - jpeg_items.extend(ffmpeg_args.get("output") or []) + jpeg_items.extend(ffmpeg_args.get("output") or []) - # output file - jpeg_items.append("\"{}\"".format(full_thumbnail_path)) + # output file + jpeg_items.append(path_to_subprocess_arg(full_thumbnail_path)) - subprocess_jpeg = " ".join(jpeg_items) + subprocess_jpeg = " ".join(jpeg_items) - # run subprocess - self.log.debug("Executing: {}".format(subprocess_jpeg)) - openpype.api.run_subprocess( - subprocess_jpeg, shell=True, logger=self.log - ) + # run subprocess + self.log.debug("Executing: {}".format(subprocess_jpeg)) + run_subprocess( + subprocess_jpeg, shell=True, logger=self.log + ) # remove thumbnail key from origin repre - thumbnail_repre.pop("thumbnail") + streams = get_ffprobe_streams(full_thumbnail_path) + width = height = None + for stream in streams: + if "width" in stream and "height" in stream: + width = stream["width"] + height = stream["height"] + break - filename = os.path.basename(full_thumbnail_path) - staging_dir = staging_dir or os.path.dirname(full_thumbnail_path) + staging_dir, filename = os.path.split(full_thumbnail_path) # create new thumbnail representation representation = { @@ -120,12 +117,12 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): 'ext': 'jpg', 'files': filename, "stagingDir": staging_dir, - "tags": ["thumbnail"], + "tags": ["thumbnail", "delete"], + "thumbnail": True } - - # # add Delete tag when temp file was rendered - if not is_jpeg: - representation["tags"].append("delete") + if width and height: + representation["width"] = width + representation["height"] = height self.log.info(f"New representation {representation}") instance.data["representations"].append(representation) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml new file mode 100644 index 0000000000..b65d274fe5 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/help/validate_simple_texture_naming.xml @@ -0,0 +1,17 @@ + + + +Invalid texture name + +## Invalid file name + +Submitted file has invalid name: +'{invalid_file}' + +### How to repair? + + Texture file must adhere to naming conventions for Unreal: + T_{asset}_*.ext + + + \ No newline at end of file diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py index afb828474d..3d2b6d04ad 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_editorial_resources.py @@ -1,6 +1,8 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateEditorialResources(pyblish.api.InstancePlugin): @@ -13,7 +15,7 @@ class ValidateEditorialResources(pyblish.api.InstancePlugin): # make sure it is enabled only if at least both families are available match = pyblish.api.Subset - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, instance): self.log.debug( diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py index 005157af62..074c62ea0e 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_frame_ranges.py @@ -2,9 +2,11 @@ import re import pyblish.api -import openpype.api -from openpype import lib -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateFrameRange(pyblish.api.InstancePlugin): @@ -13,7 +15,7 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): label = "Validate Frame Range" hosts = ["standalonepublisher"] families = ["render"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder optional = True # published data might be sequence (.mov, .mp4) in that counting files @@ -27,7 +29,8 @@ class ValidateFrameRange(pyblish.api.InstancePlugin): for pattern in self.skip_timelines_check): self.log.info("Skipping for {} task".format(instance.data["task"])) - asset_data = lib.get_asset(instance.data["asset"])["data"] + # TODO repace query with using 'instance.data["assetEntity"]' + asset_data = get_current_project_asset(instance.data["asset"])["data"] frame_start = asset_data["frameStart"] frame_end = asset_data["frameEnd"] handle_start = asset_data["handleStart"] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py index fe655f6b74..df04ae3b66 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_shot_duplicates.py @@ -1,14 +1,17 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) + class ValidateShotDuplicates(pyblish.api.ContextPlugin): """Validating no duplicate names are in context.""" label = "Validate Shot Duplicates" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder def process(self, context): shot_names = [] diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py new file mode 100644 index 0000000000..c123bef4f8 --- /dev/null +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_simple_unreal_texture_naming.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +"""Validator for correct file naming.""" +import re +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) + + +class ValidateSimpleUnrealTextureNaming(pyblish.api.InstancePlugin): + label = "Validate Unreal Texture Names" + hosts = ["standalonepublisher"] + families = ["simpleUnrealTexture"] + order = ValidateContentsOrder + regex = "^T_{asset}.*" + + def process(self, instance): + file_name = instance.data.get("originalBasename") + self.log.info(file_name) + pattern = self.regex.format(asset=instance.data.get("asset")) + if not re.match(pattern, file_name): + msg = f"Invalid file name {file_name}" + raise PublishXmlValidationError( + self, msg, formatting_data={ + "invalid_file": file_name, + "asset": instance.data.get("asset") + }) diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py index 316f58988f..1782f53de2 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_sources.py @@ -2,8 +2,10 @@ import os import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateSources(pyblish.api.InstancePlugin): @@ -13,7 +15,7 @@ class ValidateSources(pyblish.api.InstancePlugin): got deleted between starting of SP and now. """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Check source files" optional = True # only for unforeseeable cases diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py index 825092c81b..19ea1a4778 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py @@ -1,6 +1,6 @@ import pyblish.api -from avalon import io +from openpype.client import get_assets from openpype.pipeline import PublishXmlValidationError @@ -18,15 +18,11 @@ class ValidateTaskExistence(pyblish.api.ContextPlugin): for instance in context: asset_names.add(instance.data["asset"]) - asset_docs = io.find( - { - "type": "asset", - "name": {"$in": list(asset_names)} - }, - { - "name": 1, - "data.tasks": 1 - } + project_name = context.data["projectEntity"]["name"] + asset_docs = get_assets( + project_name, + asset_names=asset_names, + fields=["name", "data.tasks"] ) tasks_by_asset_names = {} for asset_doc in asset_docs: diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py index d66fb257bb..44f69e48f7 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_batch.py @@ -1,7 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatch(pyblish.api.InstancePlugin): @@ -9,7 +11,7 @@ class ValidateTextureBatch(pyblish.api.InstancePlugin): label = "Validate Texture Presence" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["texture_batch_workfile"] optional = False diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py index 0e67464f59..f489d37f59 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_has_workfile.py @@ -1,7 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): @@ -12,7 +14,7 @@ class ValidateTextureHasWorkfile(pyblish.api.InstancePlugin): """ label = "Validate Texture Has Workfile" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["textures"] optional = True diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py index 751ad917ca..22f4a0eafc 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_name.py @@ -1,14 +1,16 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatchNaming(pyblish.api.InstancePlugin): """Validates that all instances had properly formatted name.""" label = "Validate Texture Batch Naming" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["texture_batch_workfile", "textures"] optional = False diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py index 84d9def895..dab160d537 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_versions.py @@ -1,7 +1,9 @@ import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): @@ -14,7 +16,7 @@ class ValidateTextureBatchVersions(pyblish.api.InstancePlugin): """ label = "Validate Texture Batch Versions" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["textures"] optional = False diff --git a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py index fa492a80d8..a7ae02a2eb 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py +++ b/openpype/hosts/standalonepublisher/plugins/publish/validate_texture_workfiles.py @@ -1,7 +1,11 @@ +import os import pyblish.api -import openpype.api -from openpype.pipeline import PublishXmlValidationError +from openpype.settings import get_project_settings +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, +) class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): @@ -12,27 +16,42 @@ class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin): label = "Validate Texture Workfile Has Resources" hosts = ["standalonepublisher"] - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["texture_batch_workfile"] optional = True - # from presets - main_workfile_extensions = ['mra'] - def process(self, instance): if instance.data["family"] == "workfile": ext = instance.data["representations"][0]["ext"] - if ext not in self.main_workfile_extensions: + main_workfile_extensions = self.get_main_workfile_extensions() + if ext not in main_workfile_extensions: self.log.warning("Only secondary workfile present!") return if not instance.data.get("resources"): msg = "No secondary workfile present for workfile '{}'". \ format(instance.data["name"]) - ext = self.main_workfile_extensions[0] + ext = main_workfile_extensions[0] formatting_data = {"file_name": instance.data["name"], "extension": ext} raise PublishXmlValidationError(self, msg, formatting_data=formatting_data ) + + @staticmethod + def get_main_workfile_extensions(): + project_settings = get_project_settings(os.environ["AVALON_PROJECT"]) + + try: + extensions = (project_settings["standalonepublisher"] + ["publish"] + ["CollectTextures"] + ["main_workfile_extensions"]) + except KeyError: + raise Exception("Setting 'Main workfile extensions' not found." + " The setting must be set for the" + " 'Collect Texture' publish plugin of the" + " 'Standalone Publish' tool.") + + return extensions diff --git a/openpype/hosts/testhost/README.md b/openpype/hosts/testhost/README.md deleted file mode 100644 index f69e02a3b3..0000000000 --- a/openpype/hosts/testhost/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# What is `testhost` -Host `testhost` was created to fake running host for testing of publisher. - -Does not have any proper launch mechanism at the moment. There is python script `./run_publish.py` which will show publisher window. The script requires to set few variables to run. Execution will register host `testhost`, register global publish plugins and register creator and publish plugins from `./plugins`. - -## Data -Created instances and context data are stored into json files inside `./api` folder. Can be easily modified to save them to a different place. - -## Plugins -Test host has few plugins to be able test publishing. - -### Creators -They are just example plugins using functions from `api` to create/remove/update data. One of them is auto creator which means that is triggered on each reset of create context. Others are manual creators both creating the same family. - -### Publishers -Collectors are example plugin to use `get_attribute_defs` to define attributes for specific families or for context. Validators are to test `PublishValidationError`. diff --git a/openpype/hosts/testhost/api/__init__.py b/openpype/hosts/testhost/api/__init__.py deleted file mode 100644 index 7840b25892..0000000000 --- a/openpype/hosts/testhost/api/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -import logging -import pyblish.api -import avalon.api -from openpype.pipeline import BaseCreator - -from .pipeline import ( - ls, - list_instances, - update_instances, - remove_instances, - get_context_data, - update_context_data, - get_context_title -) - - -HOST_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") - -log = logging.getLogger(__name__) - - -def install(): - log.info("OpenPype - Installing TestHost integration") - pyblish.api.register_host("testhost") - pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(BaseCreator, CREATE_PATH) - - -__all__ = ( - "ls", - "list_instances", - "update_instances", - "remove_instances", - "get_context_data", - "update_context_data", - "get_context_title", - - "install" -) diff --git a/openpype/hosts/testhost/api/context.json b/openpype/hosts/testhost/api/context.json deleted file mode 100644 index 0967ef424b..0000000000 --- a/openpype/hosts/testhost/api/context.json +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/openpype/hosts/testhost/api/instances.json b/openpype/hosts/testhost/api/instances.json deleted file mode 100644 index d955012514..0000000000 --- a/openpype/hosts/testhost/api/instances.json +++ /dev/null @@ -1,108 +0,0 @@ -[ - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMyVariant", - "version": 1, - "asset": "sq01_sh0010", - "task": "Compositing", - "variant": "myVariant", - "instance_id": "a485f148-9121-46a5-8157-aa64df0fb449", - "creator_attributes": { - "number_key": 10, - "ha": 10 - }, - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": false - } - }, - "creator_identifier": "test_one" - }, - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMyVariant2", - "version": 1, - "asset": "sq01_sh0010", - "task": "Compositing", - "variant": "myVariant2", - "creator_attributes": {}, - "instance_id": "a485f148-9121-46a5-8157-aa64df0fb444", - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - }, - "creator_identifier": "test_one" - }, - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMain", - "version": 1, - "asset": "sq01_sh0010", - "task": "Compositing", - "variant": "Main", - "creator_attributes": {}, - "instance_id": "3607bc95-75f6-4648-a58d-e699f413d09f", - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - }, - "creator_identifier": "test_two" - }, - { - "id": "pyblish.avalon.instance", - "active": true, - "family": "test", - "subset": "testMain2", - "version": 1, - "asset": "sq01_sh0020", - "task": "Compositing", - "variant": "Main2", - "instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8eb", - "creator_attributes": {}, - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - }, - "creator_identifier": "test_two" - }, - { - "id": "pyblish.avalon.instance", - "family": "test_three", - "subset": "test_threeMain2", - "active": true, - "version": 1, - "asset": "sq01_sh0020", - "task": "Compositing", - "variant": "Main2", - "instance_id": "4ccf56f6-9982-4837-967c-a49695dbe8ec", - "creator_attributes": {}, - "publish_attributes": { - "CollectFtrackApi": { - "add_ftrack_family": true - } - } - }, - { - "id": "pyblish.avalon.instance", - "family": "workfile", - "subset": "workfileMain", - "active": true, - "creator_identifier": "workfile", - "version": 1, - "asset": "Alpaca_01", - "task": "modeling", - "variant": "Main", - "instance_id": "7c9ddfc7-9f9c-4c1c-b233-38c966735fb6", - "creator_attributes": {}, - "publish_attributes": {} - } -] \ No newline at end of file diff --git a/openpype/hosts/testhost/api/pipeline.py b/openpype/hosts/testhost/api/pipeline.py deleted file mode 100644 index 1f5d680705..0000000000 --- a/openpype/hosts/testhost/api/pipeline.py +++ /dev/null @@ -1,156 +0,0 @@ -import os -import json - - -class HostContext: - instances_json_path = None - context_json_path = None - - @classmethod - def get_context_title(cls): - project_name = os.environ.get("AVALON_PROJECT") - if not project_name: - return "TestHost" - - asset_name = os.environ.get("AVALON_ASSET") - if not asset_name: - return project_name - - from avalon import io - - asset_doc = io.find_one( - {"type": "asset", "name": asset_name}, - {"data.parents": 1} - ) - parents = asset_doc.get("data", {}).get("parents") or [] - - hierarchy = [project_name] - hierarchy.extend(parents) - hierarchy.append("{}".format(asset_name)) - task_name = os.environ.get("AVALON_TASK") - if task_name: - hierarchy.append(task_name) - - return "/".join(hierarchy) - - @classmethod - def get_current_dir_filepath(cls, filename): - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), - filename - ) - - @classmethod - def get_instances_json_path(cls): - if cls.instances_json_path is None: - cls.instances_json_path = cls.get_current_dir_filepath( - "instances.json" - ) - return cls.instances_json_path - - @classmethod - def get_context_json_path(cls): - if cls.context_json_path is None: - cls.context_json_path = cls.get_current_dir_filepath( - "context.json" - ) - return cls.context_json_path - - @classmethod - def add_instance(cls, instance): - instances = cls.get_instances() - instances.append(instance) - cls.save_instances(instances) - - @classmethod - def save_instances(cls, instances): - json_path = cls.get_instances_json_path() - with open(json_path, "w") as json_stream: - json.dump(instances, json_stream, indent=4) - - @classmethod - def get_instances(cls): - json_path = cls.get_instances_json_path() - if not os.path.exists(json_path): - instances = [] - with open(json_path, "w") as json_stream: - json.dump(json_stream, instances) - else: - with open(json_path, "r") as json_stream: - instances = json.load(json_stream) - return instances - - @classmethod - def get_context_data(cls): - json_path = cls.get_context_json_path() - if not os.path.exists(json_path): - data = {} - with open(json_path, "w") as json_stream: - json.dump(data, json_stream) - else: - with open(json_path, "r") as json_stream: - data = json.load(json_stream) - return data - - @classmethod - def save_context_data(cls, data): - json_path = cls.get_context_json_path() - with open(json_path, "w") as json_stream: - json.dump(data, json_stream, indent=4) - - -def ls(): - return [] - - -def list_instances(): - return HostContext.get_instances() - - -def update_instances(update_list): - updated_instances = {} - for instance, _changes in update_list: - updated_instances[instance.id] = instance.data_to_store() - - instances = HostContext.get_instances() - for instance_data in instances: - instance_id = instance_data["instance_id"] - if instance_id in updated_instances: - new_instance_data = updated_instances[instance_id] - old_keys = set(instance_data.keys()) - new_keys = set(new_instance_data.keys()) - instance_data.update(new_instance_data) - for key in (old_keys - new_keys): - instance_data.pop(key) - - HostContext.save_instances(instances) - - -def remove_instances(instances): - if not isinstance(instances, (tuple, list)): - instances = [instances] - - current_instances = HostContext.get_instances() - for instance in instances: - instance_id = instance.data["instance_id"] - found_idx = None - for idx, _instance in enumerate(current_instances): - if instance_id == _instance["instance_id"]: - found_idx = idx - break - - if found_idx is not None: - current_instances.pop(found_idx) - HostContext.save_instances(current_instances) - - -def get_context_data(): - return HostContext.get_context_data() - - -def update_context_data(data, changes): - HostContext.save_context_data(data) - - -def get_context_title(): - return HostContext.get_context_title() diff --git a/openpype/hosts/testhost/plugins/create/test_creator_1.py b/openpype/hosts/testhost/plugins/create/test_creator_1.py deleted file mode 100644 index 7664276fa2..0000000000 --- a/openpype/hosts/testhost/plugins/create/test_creator_1.py +++ /dev/null @@ -1,94 +0,0 @@ -import json -from openpype import resources -from openpype.hosts.testhost.api import pipeline -from openpype.lib import ( - UISeparatorDef, - UILabelDef, - BoolDef, - NumberDef, - FileDef, -) -from openpype.pipeline import ( - Creator, - CreatedInstance, -) - - -class TestCreatorOne(Creator): - identifier = "test_one" - label = "test" - family = "test" - description = "Testing creator of testhost" - - create_allow_context_change = False - - def get_icon(self): - return resources.get_openpype_splash_filepath() - - def collect_instances(self): - for instance_data in pipeline.list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - pipeline.update_instances(update_list) - - def remove_instances(self, instances): - pipeline.remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def create(self, subset_name, data, pre_create_data): - print("Data that can be used in create:\n{}".format( - json.dumps(pre_create_data, indent=4) - )) - new_instance = CreatedInstance(self.family, subset_name, data, self) - pipeline.HostContext.add_instance(new_instance.data_to_store()) - self.log.info(new_instance.data) - self._add_instance_to_context(new_instance) - - def get_default_variants(self): - return [ - "myVariant", - "variantTwo", - "different_variant" - ] - - def get_instance_attr_defs(self): - output = [ - NumberDef("number_key", label="Number"), - ] - return output - - def get_pre_create_attr_defs(self): - output = [ - BoolDef("use_selection", label="Use selection"), - UISeparatorDef(), - UILabelDef("Testing label"), - FileDef("filepath", folders=True, label="Filepath"), - FileDef( - "filepath_2", multipath=True, folders=True, label="Filepath 2" - ) - ] - return output - - def get_detail_description(self): - return """# Relictus funes est Nyseides currusque nunc oblita - -## Causa sed - -Lorem markdownum posito consumptis, *plebe Amorque*, abstitimus rogatus fictaque -gladium Circe, nos? Bos aeternum quae. Utque me, si aliquem cladis, et vestigia -arbor, sic mea ferre lacrimae agantur prospiciens hactenus. Amanti dentes pete, -vos quid laudemque rastrorumque terras in gratantibus **radix** erat cedemus? - -Pudor tu ponderibus verbaque illa; ire ergo iam Venus patris certe longae -cruentum lecta, et quaeque. Sit doce nox. Anteit ad tempora magni plenaque et -videres mersit sibique auctor in tendunt mittit cunctos ventisque gravitate -volucris quemquam Aeneaden. Pectore Mensis somnus; pectora -[ferunt](http://www.mox.org/oculosbracchia)? Fertilitatis bella dulce et suum? - """ diff --git a/openpype/hosts/testhost/plugins/create/test_creator_2.py b/openpype/hosts/testhost/plugins/create/test_creator_2.py deleted file mode 100644 index f54adee8a2..0000000000 --- a/openpype/hosts/testhost/plugins/create/test_creator_2.py +++ /dev/null @@ -1,74 +0,0 @@ -from openpype.lib import NumberDef, TextDef -from openpype.hosts.testhost.api import pipeline -from openpype.pipeline import ( - Creator, - CreatedInstance, -) - - -class TestCreatorTwo(Creator): - identifier = "test_two" - label = "test" - family = "test" - description = "A second testing creator" - - def get_icon(self): - return "cube" - - def create(self, subset_name, data, pre_create_data): - new_instance = CreatedInstance(self.family, subset_name, data, self) - pipeline.HostContext.add_instance(new_instance.data_to_store()) - self.log.info(new_instance.data) - self._add_instance_to_context(new_instance) - - def collect_instances(self): - for instance_data in pipeline.list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - pipeline.update_instances(update_list) - - def remove_instances(self, instances): - pipeline.remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def get_instance_attr_defs(self): - output = [ - NumberDef("number_key"), - TextDef("text_key") - ] - return output - - def get_detail_description(self): - return """# Lorem ipsum, dolor sit amet. [![Awesome](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)](https://github.com/sindresorhus/awesome) - -> A curated list of awesome lorem ipsum generators. - -Inspired by the [awesome](https://github.com/sindresorhus/awesome) list thing. - - -## Table of Contents - -- [Legend](#legend) -- [Practical](#briefcase-practical) -- [Whimsical](#roller_coaster-whimsical) - - [Animals](#rabbit-animals) - - [Eras](#tophat-eras) - - [Famous Individuals](#sunglasses-famous-individuals) - - [Music](#microphone-music) - - [Food and Drink](#pizza-food-and-drink) - - [Geographic and Dialects](#earth_africa-geographic-and-dialects) - - [Literature](#books-literature) - - [Miscellaneous](#cyclone-miscellaneous) - - [Sports and Fitness](#bicyclist-sports-and-fitness) - - [TV and Film](#movie_camera-tv-and-film) -- [Tools, Apps, and Extensions](#wrench-tools-apps-and-extensions) -- [Contribute](#contribute) -- [TODO](#todo) -""" diff --git a/openpype/hosts/testhost/plugins/publish/collect_context.py b/openpype/hosts/testhost/plugins/publish/collect_context.py deleted file mode 100644 index 0ab98fb84b..0000000000 --- a/openpype/hosts/testhost/plugins/publish/collect_context.py +++ /dev/null @@ -1,34 +0,0 @@ -import pyblish.api - -from openpype.pipeline import ( - OpenPypePyblishPluginMixin, - attribute_definitions -) - - -class CollectContextDataTestHost( - pyblish.api.ContextPlugin, OpenPypePyblishPluginMixin -): - """ - Collecting temp json data sent from a host context - and path for returning json data back to hostself. - """ - - label = "Collect Source - Test Host" - order = pyblish.api.CollectorOrder - 0.4 - hosts = ["testhost"] - - @classmethod - def get_attribute_defs(cls): - return [ - attribute_definitions.BoolDef( - "test_bool", - True, - label="Bool input" - ) - ] - - def process(self, context): - # get json paths from os and load them - for instance in context: - instance.data["source"] = "testhost" diff --git a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py b/openpype/hosts/testhost/plugins/publish/collect_instance_1.py deleted file mode 100644 index c7241a15a8..0000000000 --- a/openpype/hosts/testhost/plugins/publish/collect_instance_1.py +++ /dev/null @@ -1,52 +0,0 @@ -import json -import pyblish.api - -from openpype.lib import attribute_definitions -from openpype.pipeline import OpenPypePyblishPluginMixin - - -class CollectInstanceOneTestHost( - pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin -): - """ - Collecting temp json data sent from a host context - and path for returning json data back to hostself. - """ - - label = "Collect Instance 1 - Test Host" - order = pyblish.api.CollectorOrder - 0.3 - hosts = ["testhost"] - - @classmethod - def get_attribute_defs(cls): - return [ - attribute_definitions.NumberDef( - "version", - default=1, - minimum=1, - maximum=999, - decimals=0, - label="Version" - ) - ] - - def process(self, instance): - self._debug_log(instance) - - publish_attributes = instance.data.get("publish_attributes") - if not publish_attributes: - return - - values = publish_attributes.get(self.__class__.__name__) - if not values: - return - - instance.data["version"] = values["version"] - - def _debug_log(self, instance): - def _default_json(value): - return str(value) - - self.log.info( - json.dumps(instance.data, indent=4, default=_default_json) - ) diff --git a/openpype/hosts/testhost/plugins/publish/validate_context_with_error.py b/openpype/hosts/testhost/plugins/publish/validate_context_with_error.py deleted file mode 100644 index 46e996a569..0000000000 --- a/openpype/hosts/testhost/plugins/publish/validate_context_with_error.py +++ /dev/null @@ -1,57 +0,0 @@ -import pyblish.api -from openpype.pipeline import PublishValidationError - - -class ValidateInstanceAssetRepair(pyblish.api.Action): - """Repair the instance asset.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - pass - - -description = """ -## Publish plugins - -### Validate Scene Settings - -#### Skip Resolution Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip resolution check against values from DB. - -#### Skip Timeline Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip `frameStart`, `frameEnd` check against values from DB. - -### AfterEffects Submit to Deadline - -* `Use Published scene` - Set to True (green) when Deadline should take published scene as a source instead of uploaded local one. -* `Priority` - priority of job on farm -* `Primary Pool` - here is list of pool fetched from server you can select from. -* `Secondary Pool` -* `Frames Per Task` - number of sequence division between individual tasks (chunks) -making one job on farm. -""" - - -class ValidateContextWithError(pyblish.api.ContextPlugin): - """Validate the instance asset is the current selected context asset. - - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong asset! - - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. - """ - - label = "Validate Context With Error" - hosts = ["testhost"] - actions = [ValidateInstanceAssetRepair] - order = pyblish.api.ValidatorOrder - - def process(self, context): - raise PublishValidationError("Crashing", "Context error", description) diff --git a/openpype/hosts/testhost/plugins/publish/validate_with_error.py b/openpype/hosts/testhost/plugins/publish/validate_with_error.py deleted file mode 100644 index 5a2888a8b0..0000000000 --- a/openpype/hosts/testhost/plugins/publish/validate_with_error.py +++ /dev/null @@ -1,57 +0,0 @@ -import pyblish.api -from openpype.pipeline import PublishValidationError - - -class ValidateInstanceAssetRepair(pyblish.api.Action): - """Repair the instance asset.""" - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - pass - - -description = """ -## Publish plugins - -### Validate Scene Settings - -#### Skip Resolution Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip resolution check against values from DB. - -#### Skip Timeline Check for Tasks - -Set regex pattern(s) to look for in a Task name to skip `frameStart`, `frameEnd` check against values from DB. - -### AfterEffects Submit to Deadline - -* `Use Published scene` - Set to True (green) when Deadline should take published scene as a source instead of uploaded local one. -* `Priority` - priority of job on farm -* `Primary Pool` - here is list of pool fetched from server you can select from. -* `Secondary Pool` -* `Frames Per Task` - number of sequence division between individual tasks (chunks) -making one job on farm. -""" - - -class ValidateWithError(pyblish.api.InstancePlugin): - """Validate the instance asset is the current selected context asset. - - As it might happen that multiple worfiles are opened, switching - between them would mess with selected context. - In that case outputs might be output under wrong asset! - - Repair action will use Context asset value (from Workfiles or Launcher) - Closing and reopening with Workfiles will refresh Context value. - """ - - label = "Validate With Error" - hosts = ["testhost"] - actions = [ValidateInstanceAssetRepair] - order = pyblish.api.ValidatorOrder - - def process(self, instance): - raise PublishValidationError("Crashing", "Instance error", description) diff --git a/openpype/hosts/testhost/run_publish.py b/openpype/hosts/testhost/run_publish.py deleted file mode 100644 index 44860a30e4..0000000000 --- a/openpype/hosts/testhost/run_publish.py +++ /dev/null @@ -1,70 +0,0 @@ -import os -import sys - -mongo_url = "" -project_name = "" -asset_name = "" -task_name = "" -ftrack_url = "" -ftrack_username = "" -ftrack_api_key = "" - - -def multi_dirname(path, times=1): - for _ in range(times): - path = os.path.dirname(path) - return path - - -host_name = "testhost" -current_file = os.path.abspath(__file__) -openpype_dir = multi_dirname(current_file, 4) - -os.environ["OPENPYPE_MONGO"] = mongo_url -os.environ["OPENPYPE_ROOT"] = openpype_dir -os.environ["AVALON_MONGO"] = mongo_url -os.environ["AVALON_PROJECT"] = project_name -os.environ["AVALON_ASSET"] = asset_name -os.environ["AVALON_TASK"] = task_name -os.environ["AVALON_APP"] = host_name -os.environ["OPENPYPE_DATABASE_NAME"] = "openpype" -os.environ["AVALON_CONFIG"] = "openpype" -os.environ["AVALON_TIMEOUT"] = "1000" -os.environ["AVALON_DB"] = "avalon" -os.environ["FTRACK_SERVER"] = ftrack_url -os.environ["FTRACK_API_USER"] = ftrack_username -os.environ["FTRACK_API_KEY"] = ftrack_api_key -for path in [ - openpype_dir, - r"{}\repos\avalon-core".format(openpype_dir), - r"{}\.venv\Lib\site-packages".format(openpype_dir) -]: - sys.path.append(path) - -from Qt import QtWidgets, QtCore - -from openpype.tools.publisher.window import PublisherWindow - - -def main(): - """Main function for testing purposes.""" - import avalon.api - import pyblish.api - from openpype.modules import ModulesManager - from openpype.hosts.testhost import api as testhost - - manager = ModulesManager() - for plugin_path in manager.collect_plugin_paths()["publish"]: - pyblish.api.register_plugin_path(plugin_path) - - avalon.api.install(testhost) - - QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) - app = QtWidgets.QApplication([]) - window = PublisherWindow() - window.show() - app.exec_() - - -if __name__ == "__main__": - main() diff --git a/openpype/hosts/traypublisher/__init__.py b/openpype/hosts/traypublisher/__init__.py new file mode 100644 index 0000000000..77ba908ddd --- /dev/null +++ b/openpype/hosts/traypublisher/__init__.py @@ -0,0 +1,6 @@ +from .addon import TrayPublishAddon + + +__all__ = ( + "TrayPublishAddon", +) diff --git a/openpype/hosts/traypublisher/addon.py b/openpype/hosts/traypublisher/addon.py new file mode 100644 index 0000000000..3b34f9e6e8 --- /dev/null +++ b/openpype/hosts/traypublisher/addon.py @@ -0,0 +1,55 @@ +import os + +import click + +from openpype.lib import get_openpype_execute_args +from openpype.lib.execute import run_detached_process +from openpype.modules import OpenPypeModule, ITrayAction, IHostAddon + +TRAYPUBLISH_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class TrayPublishAddon(OpenPypeModule, IHostAddon, ITrayAction): + label = "Publisher" + name = "traypublisher" + host_name = "traypublisher" + + def initialize(self, modules_settings): + self.enabled = True + self.publish_paths = [ + os.path.join(TRAYPUBLISH_ROOT_DIR, "plugins", "publish") + ] + + def tray_init(self): + return + + def on_action_trigger(self): + self.run_traypublisher() + + def connect_with_modules(self, enabled_modules): + """Collect publish paths from other modules.""" + publish_paths = self.manager.collect_plugin_paths()["publish"] + self.publish_paths.extend(publish_paths) + + def run_traypublisher(self): + args = get_openpype_execute_args( + "module", self.name, "launch" + ) + run_detached_process(args) + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group(TrayPublishAddon.name, help="TrayPublisher related commands.") +def cli_main(): + pass + + +@cli_main.command() +def launch(): + """Launch TrayPublish tool UI.""" + + from openpype.tools import traypublisher + + traypublisher.main() diff --git a/openpype/hosts/traypublisher/api/__init__.py b/openpype/hosts/traypublisher/api/__init__.py index c461c0c526..4e7284b09a 100644 --- a/openpype/hosts/traypublisher/api/__init__.py +++ b/openpype/hosts/traypublisher/api/__init__.py @@ -1,20 +1,8 @@ from .pipeline import ( - install, - ls, - - set_project_name, - get_context_title, - get_context_data, - update_context_data, + TrayPublisherHost, ) __all__ = ( - "install", - "ls", - - "set_project_name", - "get_context_title", - "get_context_data", - "update_context_data", + "TrayPublisherHost", ) diff --git a/openpype/hosts/traypublisher/api/editorial.py b/openpype/hosts/traypublisher/api/editorial.py new file mode 100644 index 0000000000..293db542a9 --- /dev/null +++ b/openpype/hosts/traypublisher/api/editorial.py @@ -0,0 +1,326 @@ +import re +from copy import deepcopy + +from openpype.client import get_asset_by_id +from openpype.pipeline.create import CreatorError + + +class ShotMetadataSolver: + """ Solving hierarchical metadata + + Used during editorial publishing. Works with imput + clip name and settings defining python formatable + template. Settings also define searching patterns + and its token keys used for formating in templates. + """ + + NO_DECOR_PATERN = re.compile(r"\{([a-z]*?)\}") + + # presets + clip_name_tokenizer = None + shot_rename = True + shot_hierarchy = None + shot_add_tasks = None + + def __init__( + self, + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + logger + ): + self.clip_name_tokenizer = clip_name_tokenizer + self.shot_rename = shot_rename + self.shot_hierarchy = shot_hierarchy + self.shot_add_tasks = shot_add_tasks + self.log = logger + + def _rename_template(self, data): + """Shot renaming function + + Args: + data (dict): formating data + + Raises: + CreatorError: If missing keys + + Returns: + str: formated new name + """ + shot_rename_template = self.shot_rename[ + "shot_rename_template"] + try: + # format to new shot name + return shot_rename_template.format(**data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct:: \n\n" + f"From template string {shot_rename_template} > " + f"`{_E}` has no equivalent in \n" + f"{list(data.keys())} input formating keys!" + )) + + def _generate_tokens(self, clip_name, source_data): + """Token generator + + Settings defines token pairs key and regex expression. + + Args: + clip_name (str): name of clip in editorial + source_data (dict): data for formating + + Raises: + CreatorError: if missing key + + Returns: + dict: updated source_data + """ + output_data = deepcopy(source_data["anatomy_data"]) + output_data["clip_name"] = clip_name + + if not self.clip_name_tokenizer: + return output_data + + parent_name = source_data["selected_asset_doc"]["name"] + + search_text = parent_name + clip_name + + for token_key, pattern in self.clip_name_tokenizer.items(): + p = re.compile(pattern) + match = p.findall(search_text) + if not match: + raise CreatorError(( + "Make sure regex expression works with your data: \n\n" + f"'{token_key}' with regex '{pattern}' in your settings\n" + "can't find any match in your clip name " + f"'{search_text}'!\n\nLook to: " + "'project_settings/traypublisher/editorial_creators" + "/editorial_simple/clip_name_tokenizer'\n" + "at your project settings..." + )) + + # QUESTION:how to refactory `match[-1]` to some better way? + output_data[token_key] = match[-1] + + return output_data + + def _create_parents_from_settings(self, parents, data): + """Formating parent components. + + Args: + parents (list): list of dict parent components + data (dict): formating data + + Raises: + CreatorError: missing formating key + CreatorError: missing token key + KeyError: missing parent token + + Returns: + list: list of dict of parent components + """ + # fill the parents parts from presets + shot_hierarchy = deepcopy(self.shot_hierarchy) + hierarchy_parents = shot_hierarchy["parents"] + + # fill parent keys data template from anatomy data + try: + _parent_tokens_formating_data = { + parent_token["name"]: parent_token["value"].format(**data) + for parent_token in hierarchy_parents + } + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n" + f"`{_E}` has no equivalent in \n{list(data.keys())}" + )) + + _parent_tokens_type = { + parent_token["name"]: parent_token["type"] + for parent_token in hierarchy_parents + } + for _index, _parent in enumerate( + shot_hierarchy["parents_path"].split("/") + ): + # format parent token with value which is formated + try: + parent_name = _parent.format( + **_parent_tokens_formating_data) + except KeyError as _E: + raise CreatorError(( + "Make sure all keys in settings are correct : \n\n" + f"`{_E}` from template string " + f"{shot_hierarchy['parents_path']}, " + f" has no equivalent in \n" + f"{list(_parent_tokens_formating_data.keys())} parents" + )) + + parent_token_name = ( + self.NO_DECOR_PATERN.findall(_parent).pop()) + + if not parent_token_name: + raise KeyError( + f"Parent token is not found in: `{_parent}`") + + # find parent type + parent_token_type = _parent_tokens_type[parent_token_name] + + # in case selected context is set to the same asset + if ( + _index == 0 + and parents[-1]["entity_name"] == parent_name + ): + continue + + # in case first parent is project then start parents from start + if ( + _index == 0 + and parent_token_type == "Project" + ): + project_parent = parents[0] + parents = [project_parent] + continue + + parents.append({ + "entity_type": parent_token_type, + "entity_name": parent_name + }) + + return parents + + def _create_hierarchy_path(self, parents): + """Converting hierarchy path from parents + + Args: + parents (list): list of dict parent components + + Returns: + str: hierarchy path + """ + return "/".join( + [ + p["entity_name"] for p in parents + if p["entity_type"] != "Project" + ] + ) if parents else "" + + def _get_parents_from_selected_asset( + self, + asset_doc, + project_doc + ): + """Returning parents from context on selected asset. + + Context defined in Traypublisher project tree. + + Args: + asset_doc (db obj): selected asset doc + project_doc (db obj): actual project doc + + Returns: + list: list of dict parent components + """ + project_name = project_doc["name"] + visual_hierarchy = [asset_doc] + current_doc = asset_doc + + # looping trought all available visual parents + # if they are not available anymore than it breaks + while True: + visual_parent_id = current_doc["data"]["visualParent"] + visual_parent = None + if visual_parent_id: + visual_parent = get_asset_by_id(project_name, visual_parent_id) + + if not visual_parent: + visual_hierarchy.append(project_doc) + break + visual_hierarchy.append(visual_parent) + current_doc = visual_parent + + # add current selection context hierarchy + return [ + { + "entity_type": entity["data"]["entityType"], + "entity_name": entity["name"] + } + for entity in reversed(visual_hierarchy) + ] + + def _generate_tasks_from_settings(self, project_doc): + """Convert settings inputs to task data. + + Args: + project_doc (db obj): actual project doc + + Raises: + KeyError: Missing task type in project doc + + Returns: + dict: tasks data + """ + tasks_to_add = {} + + project_tasks = project_doc["config"]["tasks"] + for task_name, task_data in self.shot_add_tasks.items(): + _task_data = deepcopy(task_data) + + # check if task type in project task types + if _task_data["type"] in project_tasks.keys(): + tasks_to_add[task_name] = _task_data + else: + raise KeyError( + "Missing task type `{}` for `{}` is not" + " existing in `{}``".format( + _task_data["type"], + task_name, + list(project_tasks.keys()) + ) + ) + + return tasks_to_add + + def generate_data(self, clip_name, source_data): + """Metadata generator. + + Converts input data to hierarchy mentadata. + + Args: + clip_name (str): clip name + source_data (dict): formating data + + Returns: + (str, dict): shot name and hierarchy data + """ + + tasks = {} + asset_doc = source_data["selected_asset_doc"] + project_doc = source_data["project_doc"] + + # match clip to shot name at start + shot_name = clip_name + + # parse all tokens and generate formating data + formating_data = self._generate_tokens(shot_name, source_data) + + # generate parents from selected asset + parents = self._get_parents_from_selected_asset(asset_doc, project_doc) + + if self.shot_rename["enabled"]: + shot_name = self._rename_template(formating_data) + self.log.info(f"Renamed shot name: {shot_name}") + + if self.shot_hierarchy["enabled"]: + parents = self._create_parents_from_settings( + parents, formating_data) + + if self.shot_add_tasks: + tasks = self._generate_tasks_from_settings( + project_doc) + + return shot_name, { + "hierarchy": self._create_hierarchy_path(parents), + "parents": parents, + "tasks": tasks + } diff --git a/openpype/hosts/traypublisher/api/pipeline.py b/openpype/hosts/traypublisher/api/pipeline.py index a39e5641ae..3264f52b0f 100644 --- a/openpype/hosts/traypublisher/api/pipeline.py +++ b/openpype/hosts/traypublisher/api/pipeline.py @@ -3,11 +3,14 @@ import json import tempfile import atexit -from avalon import io -import avalon.api import pyblish.api -from openpype.pipeline import BaseCreator +from openpype.pipeline import ( + register_creator_plugin_path, + legacy_io, +) +from openpype.host import HostBase, IPublishHost + ROOT_DIR = os.path.dirname(os.path.dirname( os.path.abspath(__file__) @@ -16,6 +19,35 @@ PUBLISH_PATH = os.path.join(ROOT_DIR, "plugins", "publish") CREATE_PATH = os.path.join(ROOT_DIR, "plugins", "create") +class TrayPublisherHost(HostBase, IPublishHost): + name = "traypublisher" + + def install(self): + os.environ["AVALON_APP"] = self.name + legacy_io.Session["AVALON_APP"] = self.name + + pyblish.api.register_host("traypublisher") + pyblish.api.register_plugin_path(PUBLISH_PATH) + register_creator_plugin_path(CREATE_PATH) + + def get_context_title(self): + return HostContext.get_project_name() + + def get_context_data(self): + return HostContext.get_context_data() + + def update_context_data(self, data, changes): + HostContext.save_context_data(data) + + def set_project_name(self, project_name): + # TODO Deregister project specific plugins and register new project + # plugins + os.environ["AVALON_PROJECT"] = project_name + legacy_io.Session["AVALON_PROJECT"] = project_name + legacy_io.install() + HostContext.set_project_name(project_name) + + class HostContext: _context_json_path = None @@ -149,32 +181,3 @@ def get_context_data(): def update_context_data(data, changes): HostContext.save_context_data(data) - - -def get_context_title(): - return HostContext.get_project_name() - - -def ls(): - """Probably will never return loaded containers.""" - return [] - - -def install(): - """This is called before a project is known. - - Project is defined with 'set_project_name'. - """ - os.environ["AVALON_APP"] = "traypublisher" - - pyblish.api.register_host("traypublisher") - pyblish.api.register_plugin_path(PUBLISH_PATH) - avalon.api.register_plugin_path(BaseCreator, CREATE_PATH) - - -def set_project_name(project_name): - # TODO Deregister project specific plugins and register new project plugins - os.environ["AVALON_PROJECT"] = project_name - avalon.api.Session["AVALON_PROJECT"] = project_name - io.install() - HostContext.set_project_name(project_name) diff --git a/openpype/hosts/traypublisher/api/plugin.py b/openpype/hosts/traypublisher/api/plugin.py new file mode 100644 index 0000000000..75930f0f31 --- /dev/null +++ b/openpype/hosts/traypublisher/api/plugin.py @@ -0,0 +1,160 @@ +from openpype.lib.attribute_definitions import FileDef +from openpype.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS +from openpype.pipeline.create import ( + Creator, + HiddenCreator, + CreatedInstance, + cache_and_get_instances, + PRE_CREATE_THUMBNAIL_KEY, +) +from .pipeline import ( + list_instances, + update_instances, + remove_instances, + HostContext, +) + +REVIEW_EXTENSIONS = set(IMAGE_EXTENSIONS) | set(VIDEO_EXTENSIONS) +SHARED_DATA_KEY = "openpype.traypublisher.instances" + + +class HiddenTrayPublishCreator(HiddenCreator): + host_name = "traypublisher" + + def collect_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + # Add instance to current context + self._add_instance_to_context(new_instance) + + +class TrayPublishCreator(Creator): + create_allow_context_change = True + host_name = "traypublisher" + + def collect_instances(self): + instances_by_identifier = cache_and_get_instances( + self, SHARED_DATA_KEY, list_instances + ) + for instance_data in instances_by_identifier[self.identifier]: + instance = CreatedInstance.from_existing(instance_data, self) + self._add_instance_to_context(instance) + + def update_instances(self, update_list): + update_instances(update_list) + + def remove_instances(self, instances): + remove_instances(instances) + for instance in instances: + self._remove_instance_from_context(instance) + + def _store_new_instance(self, new_instance): + """Tray publisher specific method to store instance. + + Instance is stored into "workfile" of traypublisher and also add it + to CreateContext. + + Args: + new_instance (CreatedInstance): Instance that should be stored. + """ + + # Host implementation of storing metadata about instance + HostContext.add_instance(new_instance.data_to_store()) + new_instance.mark_as_stored() + + # Add instance to current context + self._add_instance_to_context(new_instance) + + +class SettingsCreator(TrayPublishCreator): + create_allow_context_change = True + create_allow_thumbnail = True + + extensions = [] + + def create(self, subset_name, data, pre_create_data): + # Pass precreate data to creator attributes + thumbnail_path = pre_create_data.pop(PRE_CREATE_THUMBNAIL_KEY, None) + + data["creator_attributes"] = pre_create_data + data["settings_creator"] = True + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, data, self) + + self._store_new_instance(new_instance) + + if thumbnail_path: + self.set_instance_thumbnail_path(new_instance.id, thumbnail_path) + + def get_instance_attr_defs(self): + return [ + FileDef( + "representation_files", + folders=False, + extensions=self.extensions, + allow_sequences=self.allow_sequences, + single_item=not self.allow_multiple_items, + label="Representations", + ), + FileDef( + "reviewable", + folders=False, + extensions=REVIEW_EXTENSIONS, + allow_sequences=True, + single_item=True, + label="Reviewable representations", + extensions_label="Single reviewable item" + ) + ] + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attrobites + return self.get_instance_attr_defs() + + @classmethod + def from_settings(cls, item_data): + identifier = item_data["identifier"] + family = item_data["family"] + if not identifier: + identifier = "settings_{}".format(family) + return type( + "{}{}".format(cls.__name__, identifier), + (cls, ), + { + "family": family, + "identifier": identifier, + "label": item_data["label"].strip(), + "icon": item_data["icon"], + "description": item_data["description"], + "detailed_description": item_data["detailed_description"], + "extensions": item_data["extensions"], + "allow_sequences": item_data["allow_sequences"], + "allow_multiple_items": item_data["allow_multiple_items"], + "default_variants": item_data["default_variants"] + } + ) diff --git a/openpype/hosts/traypublisher/batch_parsing.py b/openpype/hosts/traypublisher/batch_parsing.py new file mode 100644 index 0000000000..3ce3b095b9 --- /dev/null +++ b/openpype/hosts/traypublisher/batch_parsing.py @@ -0,0 +1,88 @@ +"""Functions to parse asset names, versions from file names""" +import os +import re + +from openpype.lib import Logger +from openpype.client import get_assets, get_asset_by_name + + +def get_asset_doc_from_file_name(source_filename, project_name, + version_regex, all_selected_asset_ids=None): + """Try to parse out asset name from file name provided. + + Artists might provide various file name formats. + Currently handled: + - chair.mov + - chair_v001.mov + - my_chair_to_upload.mov + """ + version = None + asset_name = os.path.splitext(source_filename)[0] + # Always first check if source filename is directly asset (eg. 'chair.mov') + matching_asset_doc = get_asset_by_name_case_not_sensitive( + project_name, asset_name, all_selected_asset_ids) + + if matching_asset_doc is None: + # name contains also a version + matching_asset_doc, version = ( + parse_with_version(project_name, asset_name, version_regex, + all_selected_asset_ids)) + + if matching_asset_doc is None: + matching_asset_doc = parse_containing(project_name, asset_name, + all_selected_asset_ids) + + return matching_asset_doc, version + + +def parse_with_version(project_name, asset_name, version_regex, + all_selected_asset_ids=None, log=None): + """Try to parse asset name from a file name containing version too + + Eg. 'chair_v001.mov' >> 'chair', 1 + """ + if not log: + log = Logger.get_logger(__name__) + log.debug( + ("Asset doc by \"{}\" was not found, trying version regex.". + format(asset_name))) + + matching_asset_doc = version_number = None + + regex_result = version_regex.findall(asset_name) + if regex_result: + _asset_name, _version_number = regex_result[0] + matching_asset_doc = get_asset_by_name_case_not_sensitive( + project_name, _asset_name, + all_selected_asset_ids=all_selected_asset_ids) + if matching_asset_doc: + version_number = int(_version_number) + + return matching_asset_doc, version_number + + +def parse_containing(project_name, asset_name, all_selected_asset_ids=None): + """Look if file name contains any existing asset name""" + for asset_doc in get_assets(project_name, asset_ids=all_selected_asset_ids, + fields=["name"]): + if asset_doc["name"].lower() in asset_name.lower(): + return get_asset_by_name(project_name, asset_doc["name"]) + + +def get_asset_by_name_case_not_sensitive(project_name, asset_name, + all_selected_asset_ids=None, + log=None): + """Handle more cases in file names""" + if not log: + log = Logger.get_logger(__name__) + asset_name = re.compile(asset_name, re.IGNORECASE) + + assets = list(get_assets(project_name, asset_ids=all_selected_asset_ids, + asset_names=[asset_name])) + if assets: + if len(assets) > 1: + log.warning("Too many records found for {}".format( + asset_name)) + return + + return assets.pop() diff --git a/openpype/hosts/traypublisher/plugins/create/create_editorial.py b/openpype/hosts/traypublisher/plugins/create/create_editorial.py new file mode 100644 index 0000000000..73be43444e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_editorial.py @@ -0,0 +1,864 @@ +import os +from copy import deepcopy +import opentimelineio as otio +from openpype.client import ( + get_asset_by_name, + get_project +) +from openpype.hosts.traypublisher.api.plugin import ( + TrayPublishCreator, + HiddenTrayPublishCreator +) +from openpype.hosts.traypublisher.api.editorial import ( + ShotMetadataSolver +) +from openpype.pipeline import CreatedInstance +from openpype.lib import ( + get_ffprobe_data, + convert_ffprobe_fps_value, + + FileDef, + TextDef, + NumberDef, + EnumDef, + BoolDef, + UISeparatorDef, + UILabelDef +) + + +CLIP_ATTR_DEFS = [ + EnumDef( + "fps", + items=[ + {"value": "from_selection", "label": "From selection"}, + {"value": 23.997, "label": "23.976"}, + {"value": 24, "label": "24"}, + {"value": 25, "label": "25"}, + {"value": 29.97, "label": "29.97"}, + {"value": 30, "label": "30"} + ], + label="FPS" + ), + NumberDef( + "workfile_start_frame", + default=1001, + label="Workfile start frame" + ), + NumberDef( + "handle_start", + default=0, + label="Handle start" + ), + NumberDef( + "handle_end", + default=0, + label="Handle end" + ) +] + + +class EditorialClipInstanceCreatorBase(HiddenTrayPublishCreator): + """ Wrapper class for clip family creators + + Args: + HiddenTrayPublishCreator (BaseCreator): hidden supporting class + """ + host_name = "traypublisher" + + def create(self, instance_data, source_data=None): + subset_name = instance_data["subset"] + + # Create new instance + new_instance = CreatedInstance( + self.family, subset_name, instance_data, self + ) + + self._store_new_instance(new_instance) + + return new_instance + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + +class EditorialShotInstanceCreator(EditorialClipInstanceCreatorBase): + """ Shot family class + + The shot metadata instance carrier. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_shot" + family = "shot" + label = "Editorial Shot" + + def get_instance_attr_defs(self): + attr_defs = [ + TextDef( + "asset_name", + label="Asset name", + ) + ] + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs + + +class EditorialPlateInstanceCreator(EditorialClipInstanceCreatorBase): + """ Plate family class + + Plate representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_plate" + family = "plate" + label = "Editorial Plate" + + +class EditorialAudioInstanceCreator(EditorialClipInstanceCreatorBase): + """ Audio family class + + Audio representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_audio" + family = "audio" + label = "Editorial Audio" + + +class EditorialReviewInstanceCreator(EditorialClipInstanceCreatorBase): + """ Review family class + + Review representation instance. + + Args: + EditorialClipInstanceCreatorBase (BaseCreator): hidden supporting class + """ + identifier = "editorial_review" + family = "review" + label = "Editorial Review" + + +class EditorialSimpleCreator(TrayPublishCreator): + """ Editorial creator class + + Simple workflow creator. This creator only disecting input + video file into clip chunks and then converts each to + defined format defined Settings for each subset preset. + + Args: + TrayPublishCreator (Creator): Tray publisher plugin class + """ + + label = "Editorial Simple" + family = "editorial" + identifier = "editorial_simple" + default_variants = [ + "main" + ] + description = "Editorial files to generate shots." + detailed_description = """ +Supporting publishing new shots to project +or updating already created. Publishing will create OTIO file. +""" + icon = "fa.file" + + def __init__( + self, project_settings, *args, **kwargs + ): + super(EditorialSimpleCreator, self).__init__( + project_settings, *args, **kwargs + ) + editorial_creators = deepcopy( + project_settings["traypublisher"]["editorial_creators"] + ) + # get this creator settings by identifier + self._creator_settings = editorial_creators.get(self.identifier) + + clip_name_tokenizer = self._creator_settings["clip_name_tokenizer"] + shot_rename = self._creator_settings["shot_rename"] + shot_hierarchy = self._creator_settings["shot_hierarchy"] + shot_add_tasks = self._creator_settings["shot_add_tasks"] + + self._shot_metadata_solver = ShotMetadataSolver( + clip_name_tokenizer, + shot_rename, + shot_hierarchy, + shot_add_tasks, + self.log + ) + + # try to set main attributes from settings + if self._creator_settings.get("default_variants"): + self.default_variants = self._creator_settings["default_variants"] + + def create(self, subset_name, instance_data, pre_create_data): + allowed_family_presets = self._get_allowed_family_presets( + pre_create_data) + + clip_instance_properties = { + k: v for k, v in pre_create_data.items() + if k != "sequence_filepath_data" + if k not in [ + i["family"] for i in self._creator_settings["family_presets"] + ] + } + # Create otio editorial instance + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + + if pre_create_data["fps"] == "from_selection": + # get asset doc data attributes + fps = asset_doc["data"]["fps"] + else: + fps = float(pre_create_data["fps"]) + + instance_data.update({ + "fps": fps + }) + + # get path of sequence + sequence_path_data = pre_create_data["sequence_filepath_data"] + media_path_data = pre_create_data["media_filepaths_data"] + + sequence_paths = self._get_path_from_file_data( + sequence_path_data, multi=True) + media_path = self._get_path_from_file_data(media_path_data) + + first_otio_timeline = None + for seq_path in sequence_paths: + # get otio timeline + otio_timeline = self._create_otio_timeline( + seq_path, fps) + + # Create all clip instances + clip_instance_properties.update({ + "fps": fps, + "parent_asset_name": asset_name, + "variant": instance_data["variant"] + }) + + # create clip instances + self._get_clip_instances( + otio_timeline, + media_path, + clip_instance_properties, + allowed_family_presets, + os.path.basename(seq_path), + first_otio_timeline + ) + + if not first_otio_timeline: + # assing otio timeline for multi file to layer + first_otio_timeline = otio_timeline + + # create otio editorial instance + self._create_otio_instance( + subset_name, + instance_data, + seq_path, media_path, + first_otio_timeline + ) + + def _create_otio_instance( + self, + subset_name, + data, + sequence_path, + media_path, + otio_timeline + ): + """Otio instance creating function + + Args: + subset_name (str): name of subset + data (dict): instnance data + sequence_path (str): path to sequence file + media_path (str): path to media file + otio_timeline (otio.Timeline): otio timeline object + """ + # Pass precreate data to creator attributes + data.update({ + "sequenceFilePath": sequence_path, + "editorialSourcePath": media_path, + "otioTimeline": otio.adapters.write_to_string(otio_timeline) + }) + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._store_new_instance(new_instance) + + def _create_otio_timeline(self, sequence_path, fps): + """Creating otio timeline from sequence path + + Args: + sequence_path (str): path to sequence file + fps (float): frame per second + + Returns: + otio.Timeline: otio timeline object + """ + # get editorial sequence file into otio timeline object + extension = os.path.splitext(sequence_path)[1] + + kwargs = {} + if extension == ".edl": + # EDL has no frame rate embedded so needs explicit + # frame rate else 24 is asssumed. + kwargs["rate"] = fps + kwargs["ignore_timecode_mismatch"] = True + + return otio.adapters.read_from_file(sequence_path, **kwargs) + + def _get_path_from_file_data(self, file_path_data, multi=False): + """Converting creator path data to single path string + + Args: + file_path_data (FileDefItem): creator path data inputs + multi (bool): switch to multiple files mode + + Raises: + FileExistsError: in case nothing had been set + + Returns: + str: path string + """ + return_path_list = [] + + + if isinstance(file_path_data, list): + return_path_list = [ + os.path.join(f["directory"], f["filenames"][0]) + for f in file_path_data + ] + + if not return_path_list: + raise FileExistsError( + f"File path was not added: {file_path_data}") + + return return_path_list if multi else return_path_list[0] + + def _get_clip_instances( + self, + otio_timeline, + media_path, + instance_data, + family_presets, + sequence_file_name, + first_otio_timeline=None + ): + """Helping function fro creating clip instance + + Args: + otio_timeline (otio.Timeline): otio timeline object + media_path (str): media file path string + instance_data (dict): clip instance data + family_presets (list): list of dict settings subset presets + """ + self.asset_name_check = [] + + tracks = otio_timeline.each_child( + descended_from_type=otio.schema.Track + ) + + # media data for audio sream and reference solving + media_data = self._get_media_source_metadata(media_path) + + for track in tracks: + track.name = f"{sequence_file_name} - {otio_timeline.name}" + try: + track_start_frame = ( + abs(track.source_range.start_time.value) + ) + track_start_frame -= self.timeline_frame_start + except AttributeError: + track_start_frame = 0 + + + for clip in track.each_child(): + if not self._validate_clip_for_processing(clip): + continue + + # get available frames info to clip data + self._create_otio_reference(clip, media_path, media_data) + + # convert timeline range to source range + self._restore_otio_source_range(clip) + + base_instance_data = self._get_base_instance_data( + clip, + instance_data, + track_start_frame + ) + + parenting_data = { + "instance_label": None, + "instance_id": None + } + + for _fpreset in family_presets: + # exclude audio family if no audio stream + if ( + _fpreset["family"] == "audio" + and not media_data.get("audio") + ): + continue + + instance = self._make_subset_instance( + clip, + _fpreset, + deepcopy(base_instance_data), + parenting_data + ) + + # add track to first otioTimeline if it is in input args + if first_otio_timeline: + first_otio_timeline.tracks.append(deepcopy(track)) + + def _restore_otio_source_range(self, otio_clip): + """Infusing source range. + + Otio clip is missing proper source clip range so + here we add them from from parent timeline frame range. + + Args: + otio_clip (otio.Clip): otio clip object + """ + otio_clip.source_range = otio_clip.range_in_parent() + + def _create_otio_reference( + self, + otio_clip, + media_path, + media_data + ): + """Creating otio reference at otio clip. + + Args: + otio_clip (otio.Clip): otio clip object + media_path (str): media file path string + media_data (dict): media metadata + """ + start_frame = media_data["start_frame"] + frame_duration = media_data["duration"] + fps = media_data["fps"] + + available_range = otio.opentime.TimeRange( + start_time=otio.opentime.RationalTime( + start_frame, fps), + duration=otio.opentime.RationalTime( + frame_duration, fps) + ) + # in case old OTIO or video file create `ExternalReference` + media_reference = otio.schema.ExternalReference( + target_url=media_path, + available_range=available_range + ) + otio_clip.media_reference = media_reference + + def _get_media_source_metadata(self, path): + """Get all available metadata from file + + Args: + path (str): media file path string + + Raises: + AssertionError: ffprobe couldn't read metadata + + Returns: + dict: media file metadata + """ + return_data = {} + + try: + media_data = get_ffprobe_data( + path, self.log + ) + + # get video stream data + video_stream = media_data["streams"][0] + return_data = { + "video": True, + "start_frame": 0, + "duration": int(video_stream["nb_frames"]), + "fps": float( + convert_ffprobe_fps_value( + video_stream["r_frame_rate"] + ) + ) + } + + # get audio streams data + audio_stream = [ + stream for stream in media_data["streams"] + if stream["codec_type"] == "audio" + ] + + if audio_stream: + return_data["audio"] = True + + except Exception as exc: + raise AssertionError(( + "FFprobe couldn't read information about input file: " + f"\"{path}\". Error message: {exc}" + )) + + return return_data + + def _make_subset_instance( + self, + otio_clip, + preset, + instance_data, + parenting_data + ): + """Making subset instance from input preset + + Args: + otio_clip (otio.Clip): otio clip object + preset (dict): sigle family preset + instance_data (dict): instance data + parenting_data (dict): shot instance parent data + + Returns: + CreatedInstance: creator instance object + """ + family = preset["family"] + label = self._make_subset_naming( + preset, + instance_data + ) + instance_data["label"] = label + + # add file extension filter only if it is not shot family + if family == "shot": + instance_data["otioClip"] = ( + otio.adapters.write_to_string(otio_clip)) + c_instance = self.create_context.creators[ + "editorial_shot"].create( + instance_data) + parenting_data.update({ + "instance_label": label, + "instance_id": c_instance.data["instance_id"] + }) + else: + # add review family if defined + instance_data.update({ + "outputFileType": preset["output_file_type"], + "parent_instance_id": parenting_data["instance_id"], + "creator_attributes": { + "parent_instance": parenting_data["instance_label"], + "add_review_family": preset.get("review") + } + }) + + creator_identifier = f"editorial_{family}" + editorial_clip_creator = self.create_context.creators[ + creator_identifier] + c_instance = editorial_clip_creator.create( + instance_data) + + return c_instance + + def _make_subset_naming( + self, + preset, + instance_data + ): + """ Subset name maker + + Args: + preset (dict): single preset item + instance_data (dict): instance data + + Returns: + str: label string + """ + shot_name = instance_data["shotName"] + variant_name = instance_data["variant"] + family = preset["family"] + + # get variant name from preset or from inharitance + _variant_name = preset.get("variant") or variant_name + + # subset name + subset_name = "{}{}".format( + family, _variant_name.capitalize() + ) + label = "{}_{}".format( + shot_name, + subset_name + ) + + instance_data.update({ + "family": family, + "label": label, + "variant": _variant_name, + "subset": subset_name, + }) + + return label + + def _get_base_instance_data( + self, + otio_clip, + instance_data, + track_start_frame, + ): + """ Factoring basic set of instance data. + + Args: + otio_clip (otio.Clip): otio clip object + instance_data (dict): precreate instance data + track_start_frame (int): track start frame + + Returns: + dict: instance data + """ + # get clip instance properties + parent_asset_name = instance_data["parent_asset_name"] + handle_start = instance_data["handle_start"] + handle_end = instance_data["handle_end"] + timeline_offset = instance_data["timeline_offset"] + workfile_start_frame = instance_data["workfile_start_frame"] + fps = instance_data["fps"] + variant_name = instance_data["variant"] + + # basic unique asset name + clip_name = os.path.splitext(otio_clip.name)[0].lower() + project_doc = get_project(self.project_name) + + shot_name, shot_metadata = self._shot_metadata_solver.generate_data( + clip_name, + { + "anatomy_data": { + "project": { + "name": self.project_name, + "code": project_doc["data"]["code"] + }, + "parent": parent_asset_name, + "app": self.host_name + }, + "selected_asset_doc": get_asset_by_name( + self.project_name, parent_asset_name), + "project_doc": project_doc + } + ) + + self._validate_name_uniqueness(shot_name) + + timing_data = self._get_timing_data( + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ) + + # create creator attributes + creator_attributes = { + "asset_name": shot_name, + "Parent hierarchy path": shot_metadata["hierarchy"], + "workfile_start_frame": workfile_start_frame, + "fps": fps, + "handle_start": int(handle_start), + "handle_end": int(handle_end) + } + creator_attributes.update(timing_data) + + # create shared new instance data + base_instance_data = { + "shotName": shot_name, + "variant": variant_name, + + # HACK: just for temporal bug workaround + # TODO: should loockup shot name for update + "asset": parent_asset_name, + "task": "", + + "newAssetPublishing": True, + + # parent time properties + "trackStartFrame": track_start_frame, + "timelineOffset": timeline_offset, + # creator_attributes + "creator_attributes": creator_attributes + } + # add hierarchy shot metadata + base_instance_data.update(shot_metadata) + + return base_instance_data + + def _get_timing_data( + self, + otio_clip, + timeline_offset, + track_start_frame, + workfile_start_frame + ): + """Returning available timing data + + Args: + otio_clip (otio.Clip): otio clip object + timeline_offset (int): offset value + track_start_frame (int): starting frame input + workfile_start_frame (int): start frame for shot's workfiles + + Returns: + dict: timing metadata + """ + # frame ranges data + clip_in = otio_clip.range_in_parent().start_time.value + clip_in += track_start_frame + clip_out = otio_clip.range_in_parent().end_time_inclusive().value + clip_out += track_start_frame + + # add offset in case there is any + if timeline_offset: + clip_in += timeline_offset + clip_out += timeline_offset + + clip_duration = otio_clip.duration().value + source_in = otio_clip.trimmed_range().start_time.value + source_out = source_in + clip_duration + + # define starting frame for future shot + frame_start = ( + clip_in if workfile_start_frame is None + else workfile_start_frame + ) + frame_end = frame_start + (clip_duration - 1) + + return { + "frameStart": int(frame_start), + "frameEnd": int(frame_end), + "clipIn": int(clip_in), + "clipOut": int(clip_out), + "clipDuration": int(otio_clip.duration().value), + "sourceIn": int(source_in), + "sourceOut": int(source_out) + } + + def _get_allowed_family_presets(self, pre_create_data): + """ Filter out allowed family presets. + + Args: + pre_create_data (dict): precreate attributes inputs + + Returns: + list: lit of dict with preset items + """ + return [ + {"family": "shot"}, + *[ + preset for preset in self._creator_settings["family_presets"] + if pre_create_data[preset["family"]] + ] + ] + + def _validate_clip_for_processing(self, otio_clip): + """Validate otio clip attribues + + Args: + otio_clip (otio.Clip): otio clip object + + Returns: + bool: True if all passing conditions + """ + if otio_clip.name is None: + return False + + if isinstance(otio_clip, otio.schema.Gap): + return False + + # skip all generators like black empty + if isinstance( + otio_clip.media_reference, + otio.schema.GeneratorReference): + return False + + # Transitions are ignored, because Clips have the full frame + # range. + if isinstance(otio_clip, otio.schema.Transition): + return False + + return True + + def _validate_name_uniqueness(self, name): + """ Validating name uniqueness. + + In context of other clip names in sequence file. + + Args: + name (str): shot name string + """ + if name not in self.asset_name_check: + self.asset_name_check.append(name) + else: + self.log.warning( + f"Duplicate shot name: {name}! " + "Please check names in the input sequence files." + ) + + def get_pre_create_attr_defs(self): + """ Creating pre-create attributes at creator plugin. + + Returns: + list: list of attribute object instances + """ + # Use same attributes as for instance attrobites + attr_defs = [ + FileDef( + "sequence_filepath_data", + folders=False, + extensions=[ + ".edl", + ".xml", + ".aaf", + ".fcpxml" + ], + allow_sequences=False, + single_item=False, + label="Sequence file", + ), + FileDef( + "media_filepaths_data", + folders=False, + extensions=[ + ".mov", + ".mp4", + ".wav" + ], + allow_sequences=False, + single_item=False, + label="Media files", + ), + # TODO: perhpas better would be timecode and fps input + NumberDef( + "timeline_offset", + default=0, + label="Timeline offset" + ), + UISeparatorDef(), + UILabelDef("Clip instance attributes"), + UISeparatorDef() + ] + # add variants swithers + attr_defs.extend( + BoolDef(_var["family"], label=_var["family"]) + for _var in self._creator_settings["family_presets"] + ) + attr_defs.append(UISeparatorDef()) + + attr_defs.extend(CLIP_ATTR_DEFS) + return attr_defs diff --git a/openpype/hosts/traypublisher/plugins/create/create_from_settings.py b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py new file mode 100644 index 0000000000..df6253b0c2 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_from_settings.py @@ -0,0 +1,23 @@ +import os +from openpype.lib import Logger +from openpype.settings import get_project_settings + +log = Logger.get_logger(__name__) + + +def initialize(): + from openpype.hosts.traypublisher.api.plugin import SettingsCreator + + project_name = os.environ["AVALON_PROJECT"] + project_settings = get_project_settings(project_name) + + simple_creators = project_settings["traypublisher"]["simple_creators"] + + global_variables = globals() + for item in simple_creators: + + dynamic_plugin = SettingsCreator.from_settings(item) + global_variables[dynamic_plugin.__name__] = dynamic_plugin + + +initialize() diff --git a/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py new file mode 100644 index 0000000000..d077131e4c --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_movie_batch.py @@ -0,0 +1,154 @@ +import copy +import os +import re + +from openpype.lib import ( + FileDef, + BoolDef, +) +from openpype.pipeline import ( + CreatedInstance, +) +from openpype.pipeline.create import ( + get_subset_name, + TaskNotSetError, +) + +from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator +from openpype.hosts.traypublisher.batch_parsing import ( + get_asset_doc_from_file_name +) + + +class BatchMovieCreator(TrayPublishCreator): + """Creates instances from movie file(s). + + Intended for .mov files, but should work for any video file. + Doesn't handle image sequences though. + """ + identifier = "render_movie_batch" + label = "Batch Movies" + family = "render" + description = "Publish batch of video files" + + create_allow_context_change = False + version_regex = re.compile(r"^(.+)_v([0-9]+)$") + # Position batch creator after simple creators + order = 110 + + def __init__(self, project_settings, *args, **kwargs): + super(BatchMovieCreator, self).__init__(project_settings, + *args, **kwargs) + creator_settings = ( + project_settings["traypublisher"]["BatchMovieCreator"] + ) + self.default_variants = creator_settings["default_variants"] + self.default_tasks = creator_settings["default_tasks"] + self.extensions = creator_settings["extensions"] + + def get_icon(self): + return "fa.file" + + def create(self, subset_name, data, pre_create_data): + file_paths = pre_create_data.get("filepath") + if not file_paths: + return + + for file_info in file_paths: + instance_data = copy.deepcopy(data) + file_name = file_info["filenames"][0] + filepath = os.path.join(file_info["directory"], file_name) + instance_data["creator_attributes"] = {"filepath": filepath} + + asset_doc, version = get_asset_doc_from_file_name( + file_name, self.project_name, self.version_regex) + + subset_name, task_name = self._get_subset_and_task( + asset_doc, data["variant"], self.project_name) + + instance_data["task"] = task_name + instance_data["asset"] = asset_doc["name"] + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, + instance_data, self) + self._store_new_instance(new_instance) + + def _get_subset_and_task(self, asset_doc, variant, project_name): + """Create subset name according to standard template process""" + task_name = self._get_task_name(asset_doc) + + try: + subset_name = get_subset_name( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + except TaskNotSetError: + # Create instance with fake task + # - instance will be marked as invalid so it can't be published + # but user have ability to change it + # NOTE: This expect that there is not task 'Undefined' on asset + task_name = "Undefined" + subset_name = get_subset_name( + self.family, + variant, + task_name, + asset_doc, + project_name + ) + + return subset_name, task_name + + def _get_task_name(self, asset_doc): + """Get applicable task from 'asset_doc' """ + available_task_names = {} + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + for task_name in asset_tasks.keys(): + available_task_names[task_name.lower()] = task_name + + task_name = None + for _task_name in self.default_tasks: + _task_name_low = _task_name.lower() + if _task_name_low in available_task_names: + task_name = available_task_names[_task_name_low] + break + + return task_name + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_pre_create_attr_defs(self): + # Use same attributes as for instance attributes + return [ + FileDef( + "filepath", + folders=False, + single_item=False, + extensions=self.extensions, + allow_sequences=False, + label="Filepath" + ), + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_detail_description(self): + return """# Publish batch of .mov to multiple assets. + + File names must then contain only asset name, or asset name + version. + (eg. 'chair.mov', 'chair_v001.mov', not really safe `my_chair_v001.mov` + """ + diff --git a/openpype/hosts/traypublisher/plugins/create/create_online.py b/openpype/hosts/traypublisher/plugins/create/create_online.py new file mode 100644 index 0000000000..199fae6d2c --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/create/create_online.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +"""Creator of online files. + +Online file retain their original name and use it as subset name. To +avoid conflicts, this creator checks if subset with this name already +exists under selected asset. +""" +from pathlib import Path + +# from openpype.client import get_subset_by_name, get_asset_by_name +from openpype.lib.attribute_definitions import FileDef, BoolDef +from openpype.pipeline import ( + CreatedInstance, + CreatorError +) +from openpype.hosts.traypublisher.api.plugin import TrayPublishCreator + + +class OnlineCreator(TrayPublishCreator): + """Creates instance from file and retains its original name.""" + + identifier = "io.openpype.creators.traypublisher.online" + label = "Online" + family = "online" + description = "Publish file retaining its original file name" + extensions = [".mov", ".mp4", ".mxf", ".m4v", ".mpg", ".exr", + ".dpx", ".tif", ".png", ".jpg"] + + def get_detail_description(self): + return """# Create file retaining its original file name. + + This will publish files using template helping to retain original + file name and that file name is used as subset name. + + Bz default it tries to guard against multiple publishes of the same + file.""" + + def get_icon(self): + return "fa.file" + + def create(self, subset_name, instance_data, pre_create_data): + repr_file = pre_create_data.get("representation_file") + if not repr_file: + raise CreatorError("No files specified") + + files = repr_file.get("filenames") + if not files: + # this should never happen + raise CreatorError("Missing files from representation") + + origin_basename = Path(files[0]).stem + + # disable check for existing subset with the same name + """ + asset = get_asset_by_name( + self.project_name, instance_data["asset"], fields=["_id"]) + + if get_subset_by_name( + self.project_name, origin_basename, asset["_id"], + fields=["_id"]): + raise CreatorError(f"subset with {origin_basename} already " + "exists in selected asset") + """ + + instance_data["originalBasename"] = origin_basename + subset_name = origin_basename + + instance_data["creator_attributes"] = { + "path": (Path(repr_file["directory"]) / files[0]).as_posix() + } + + # Create new instance + new_instance = CreatedInstance(self.family, subset_name, + instance_data, self) + self._store_new_instance(new_instance) + + def get_instance_attr_defs(self): + return [ + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_pre_create_attr_defs(self): + return [ + FileDef( + "representation_file", + folders=False, + extensions=self.extensions, + allow_sequences=True, + single_item=True, + label="Representation", + ), + BoolDef( + "add_review_family", + default=True, + label="Review" + ) + ] + + def get_subset_name( + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None + ): + if instance is None: + return "{originalBasename}" + + return instance.data["subset"] diff --git a/openpype/hosts/traypublisher/plugins/create/create_workfile.py b/openpype/hosts/traypublisher/plugins/create/create_workfile.py deleted file mode 100644 index 2db4770bbc..0000000000 --- a/openpype/hosts/traypublisher/plugins/create/create_workfile.py +++ /dev/null @@ -1,97 +0,0 @@ -from openpype.hosts.traypublisher.api import pipeline -from openpype.pipeline import ( - Creator, - CreatedInstance, - lib -) - - -class WorkfileCreator(Creator): - identifier = "workfile" - label = "Workfile" - family = "workfile" - description = "Publish backup of workfile" - - create_allow_context_change = True - - extensions = [ - # Maya - ".ma", ".mb", - # Nuke - ".nk", - # Hiero - ".hrox", - # Houdini - ".hip", ".hiplc", ".hipnc", - # Blender - ".blend", - # Celaction - ".scn", - # TVPaint - ".tvpp", - # Fusion - ".comp", - # Harmony - ".zip", - # Premiere - ".prproj", - # Resolve - ".drp", - # Photoshop - ".psd", ".psb", - # Aftereffects - ".aep" - ] - - def get_icon(self): - return "fa.file" - - def collect_instances(self): - for instance_data in pipeline.list_instances(): - creator_id = instance_data.get("creator_identifier") - if creator_id == self.identifier: - instance = CreatedInstance.from_existing( - instance_data, self - ) - self._add_instance_to_context(instance) - - def update_instances(self, update_list): - pipeline.update_instances(update_list) - - def remove_instances(self, instances): - pipeline.remove_instances(instances) - for instance in instances: - self._remove_instance_from_context(instance) - - def create(self, subset_name, data, pre_create_data): - # Pass precreate data to creator attributes - data["creator_attributes"] = pre_create_data - # Create new instance - new_instance = CreatedInstance(self.family, subset_name, data, self) - # Host implementation of storing metadata about instance - pipeline.HostContext.add_instance(new_instance.data_to_store()) - # Add instance to current context - self._add_instance_to_context(new_instance) - - def get_default_variants(self): - return [ - "Main" - ] - - def get_instance_attr_defs(self): - output = [ - lib.FileDef( - "filepath", - folders=False, - extensions=self.extensions, - label="Filepath" - ) - ] - return output - - def get_pre_create_attr_defs(self): - # Use same attributes as for instance attrobites - return self.get_instance_attr_defs() - - def get_detail_description(self): - return """# Publish workfile backup""" diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py b/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py new file mode 100644 index 0000000000..e38d10e70f --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_app_name.py @@ -0,0 +1,13 @@ +import pyblish.api + + +class CollectTrayPublisherAppName(pyblish.api.ContextPlugin): + """Collect app name and label.""" + + label = "Collect App Name/Label" + order = pyblish.api.CollectorOrder - 0.5 + hosts = ["traypublisher"] + + def process(self, context): + context.data["appName"] = "tray publisher" + context.data["appLabel"] = "Tray publisher" diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py new file mode 100644 index 0000000000..bdf7c05f3d --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_clip_instances.py @@ -0,0 +1,36 @@ +from pprint import pformat +import pyblish.api + + +class CollectClipInstance(pyblish.api.InstancePlugin): + """Collect clip instances and resolve its parent""" + + label = "Collect Clip Instances" + order = pyblish.api.CollectorOrder - 0.081 + + hosts = ["traypublisher"] + families = ["plate", "review", "audio"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + instance.data["families"].append("clip") + + parent_instance_id = instance.data["parent_instance_id"] + edit_shared_data = instance.context.data["editorialSharedData"] + instance.data.update( + edit_shared_data[parent_instance_id] + ) + + if "editorialSourcePath" in instance.context.data.keys(): + instance.data["editorialSourcePath"] = ( + instance.context.data["editorialSourcePath"]) + instance.data["families"].append("trimming") + + self.log.debug(pformat(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py new file mode 100644 index 0000000000..e181d0abe5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_instances.py @@ -0,0 +1,48 @@ +import os +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectEditorialInstance(pyblish.api.InstancePlugin): + """Collect data for instances created by settings creators.""" + + label = "Collect Editorial Instances" + order = pyblish.api.CollectorOrder - 0.1 + + hosts = ["traypublisher"] + families = ["editorial"] + + def process(self, instance): + + if "families" not in instance.data: + instance.data["families"] = [] + + if "representations" not in instance.data: + instance.data["representations"] = [] + + fpath = instance.data["sequenceFilePath"] + otio_timeline_string = instance.data.pop("otioTimeline") + otio_timeline = otio.adapters.read_from_string( + otio_timeline_string) + + instance.context.data["otioTimeline"] = otio_timeline + instance.context.data["editorialSourcePath"] = ( + instance.data["editorialSourcePath"]) + + self.log.info(fpath) + + instance.data["stagingDir"] = os.path.dirname(fpath) + + _, ext = os.path.splitext(fpath) + + instance.data["representations"].append({ + "ext": ext[1:], + "name": ext[1:], + "stagingDir": instance.data["stagingDir"], + "files": os.path.basename(fpath) + }) + + self.log.debug("Created Editorial Instance {}".format( + pformat(instance.data) + )) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py new file mode 100644 index 0000000000..4af4fb94e9 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_editorial_reviewable.py @@ -0,0 +1,30 @@ +import pyblish.api + + +class CollectEditorialReviewable(pyblish.api.InstancePlugin): + """ Collect review input from user. + + Adds the input to instance data. + """ + + label = "Collect Editorial Reviewable" + order = pyblish.api.CollectorOrder + + families = ["plate", "review", "audio"] + hosts = ["traypublisher"] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if creator_identifier not in [ + "editorial_plate", + "editorial_audio", + "editorial_review" + ]: + return + + creator_attributes = instance.data["creator_attributes"] + + if creator_attributes["add_review_family"]: + instance.data["families"].append("review") + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py new file mode 100644 index 0000000000..5f8b2878b7 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_movie_batch.py @@ -0,0 +1,48 @@ +import os + +import pyblish.api +from openpype.pipeline import OpenPypePyblishPluginMixin + + +class CollectMovieBatch( + pyblish.api.InstancePlugin, OpenPypePyblishPluginMixin +): + """Collect file url for batch movies and create representation. + + Adds review on instance and to repre.tags based on value of toggle button + on creator. + """ + + label = "Collect Movie Batch Files" + order = pyblish.api.CollectorOrder + + hosts = ["traypublisher"] + + def process(self, instance): + if instance.data.get("creator_identifier") != "render_movie_batch": + return + + creator_attributes = instance.data["creator_attributes"] + + file_url = creator_attributes["filepath"] + file_name = os.path.basename(file_url) + _, ext = os.path.splitext(file_name) + + repre = { + "name": ext[1:], + "ext": ext[1:], + "files": file_name, + "stagingDir": os.path.dirname(file_url), + "tags": [] + } + instance.data["representations"].append(repre) + + if creator_attributes["add_review_family"]: + repre["tags"].append("review") + instance.data["families"].append("review") + if not instance.data.get("thumbnailSource"): + instance.data["thumbnailSource"] = file_url + + instance.data["source"] = file_url + + self.log.debug("instance.data {}".format(instance.data)) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py b/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py new file mode 100644 index 0000000000..05b00e9516 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_online_file.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +import pyblish.api +from pathlib import Path + + +class CollectOnlineFile(pyblish.api.InstancePlugin): + """Collect online file and retain its file name.""" + label = "Collect Online File" + order = pyblish.api.CollectorOrder + families = ["online"] + hosts = ["traypublisher"] + + def process(self, instance): + file = Path(instance.data["creator_attributes"]["path"]) + review = instance.data["creator_attributes"]["add_review_family"] + instance.data["review"] = review + if "review" not in instance.data["families"]: + instance.data["families"].append("review") + self.log.info(f"Adding review: {review}") + + instance.data["representations"].append( + { + "name": file.suffix.lstrip("."), + "ext": file.suffix.lstrip("."), + "files": file.name, + "stagingDir": file.parent.as_posix(), + "tags": ["review"] if review else [] + } + ) diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py new file mode 100644 index 0000000000..78c1f14e4e --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_shot_instances.py @@ -0,0 +1,208 @@ +from pprint import pformat +import pyblish.api +import opentimelineio as otio + + +class CollectShotInstance(pyblish.api.InstancePlugin): + """ Collect shot instances + + Resolving its user inputs from creator attributes + to instance data. + """ + + label = "Collect Shot Instances" + order = pyblish.api.CollectorOrder - 0.09 + + hosts = ["traypublisher"] + families = ["shot"] + + SHARED_KEYS = [ + "asset", + "fps", + "handleStart", + "handleEnd", + "frameStart", + "frameEnd", + "clipIn", + "clipOut", + "clipDuration", + "sourceIn", + "sourceOut", + "otioClip", + "workfileFrameStart" + ] + + def process(self, instance): + creator_identifier = instance.data["creator_identifier"] + if "editorial" not in creator_identifier: + return + + # get otio clip object + otio_clip = self._get_otio_clip(instance) + instance.data["otioClip"] = otio_clip + + # first solve the inputs from creator attr + data = self._solve_inputs_to_data(instance) + instance.data.update(data) + + # distribute all shared keys to clips instances + self._distribute_shared_data(instance) + self._solve_hierarchy_context(instance) + + self.log.debug(pformat(instance.data)) + + def _get_otio_clip(self, instance): + """ Converts otio string data. + + Convert them to proper otio object + and finds its equivalent at otio timeline. + This process is a hack to support also + resolving parent range. + + Args: + instance (obj): publishing instance + + Returns: + otio.Clip: otio clip object + """ + context = instance.context + # convert otio clip from string to object + otio_clip_string = instance.data.pop("otioClip") + otio_clip = otio.adapters.read_from_string( + otio_clip_string) + + otio_timeline = context.data["otioTimeline"] + + clips = [ + clip for clip in otio_timeline.each_child( + descended_from_type=otio.schema.Clip) + if clip.name == otio_clip.name + ] + + otio_clip = clips.pop() + + return otio_clip + + def _distribute_shared_data(self, instance): + """ Distribute all defined keys. + + All data are shared between all related + instances in context. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + instance_id = instance.data["instance_id"] + + if not context.data.get("editorialSharedData"): + context.data["editorialSharedData"] = {} + + context.data["editorialSharedData"][instance_id] = { + _k: _v for _k, _v in instance.data.items() + if _k in self.SHARED_KEYS + } + + def _solve_inputs_to_data(self, instance): + """ Resolve all user inputs into instance data. + + Args: + instance (obj): publishing instance + + Returns: + dict: instance data updating data + """ + _cr_attrs = instance.data["creator_attributes"] + workfile_start_frame = _cr_attrs["workfile_start_frame"] + frame_start = _cr_attrs["frameStart"] + frame_end = _cr_attrs["frameEnd"] + frame_dur = frame_end - frame_start + + return { + "asset": _cr_attrs["asset_name"], + "fps": float(_cr_attrs["fps"]), + "handleStart": _cr_attrs["handle_start"], + "handleEnd": _cr_attrs["handle_end"], + "frameStart": workfile_start_frame, + "frameEnd": workfile_start_frame + frame_dur, + "clipIn": _cr_attrs["clipIn"], + "clipOut": _cr_attrs["clipOut"], + "clipDuration": _cr_attrs["clipDuration"], + "sourceIn": _cr_attrs["sourceIn"], + "sourceOut": _cr_attrs["sourceOut"], + "workfileFrameStart": workfile_start_frame + } + + def _solve_hierarchy_context(self, instance): + """ Adding hierarchy data to context shared data. + + Args: + instance (obj): publishing instance + """ + context = instance.context + + final_context = ( + context.data["hierarchyContext"] + if context.data.get("hierarchyContext") + else {} + ) + + name = instance.data["asset"] + + # get handles + handle_start = int(instance.data["handleStart"]) + handle_end = int(instance.data["handleEnd"]) + + in_info = { + "entity_type": "Shot", + "custom_attributes": { + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": instance.data["frameStart"], + "frameEnd": instance.data["frameEnd"], + "clipIn": instance.data["clipIn"], + "clipOut": instance.data["clipOut"], + "fps": instance.data["fps"] + }, + "tasks": instance.data["tasks"] + } + + parents = instance.data.get('parents', []) + + actual = {name: in_info} + + for parent in reversed(parents): + parent_name = parent["entity_name"] + next_dict = { + parent_name: { + "entity_type": parent["entity_type"], + "childs": actual + } + } + actual = next_dict + + final_context = self._update_dict(final_context, actual) + + # adding hierarchy context to instance + context.data["hierarchyContext"] = final_context + + def _update_dict(self, ex_dict, new_dict): + """ Recursion function + + Updating nested data with another nested data. + + Args: + ex_dict (dict): nested data + new_dict (dict): nested data + + Returns: + dict: updated nested data + """ + for key in ex_dict: + if key in new_dict and isinstance(ex_dict[key], dict): + new_dict[key] = self._update_dict(ex_dict[key], new_dict[key]) + elif not ex_dict.get(key) or not new_dict.get(key): + new_dict[key] = ex_dict[key] + + return new_dict diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py new file mode 100644 index 0000000000..183195a515 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/collect_simple_instances.py @@ -0,0 +1,243 @@ +import os +import tempfile +from pathlib import Path + +import clique +import pyblish.api + + +class CollectSettingsSimpleInstances(pyblish.api.InstancePlugin): + """Collect data for instances created by settings creators. + + Plugin create representations for simple instances based + on 'representation_files' attribute stored on instance data. + + There is also possibility to have reviewable representation which can be + stored under 'reviewable' attribute stored on instance data. If there was + already created representation with the same files as 'revieable' containes + + Representations can be marked for review and in that case is also added + 'review' family to instance families. For review can be marked only one + representation so **first** representation that has extension available + in '_review_extensions' is used for review. + + For instance 'source' is used path from last representation created + from 'representation_files'. + + Set staging directory on instance. That is probably never used because + each created representation has it's own staging dir. + """ + + label = "Collect Settings Simple Instances" + order = pyblish.api.CollectorOrder - 0.49 + + hosts = ["traypublisher"] + + def process(self, instance): + if not instance.data.get("settings_creator"): + return + + instance_label = instance.data["name"] + # Create instance's staging dir in temp + tmp_folder = tempfile.mkdtemp(prefix="traypublisher_") + instance.data["stagingDir"] = tmp_folder + instance.context.data["cleanupFullPaths"].append(tmp_folder) + + self.log.debug(( + "Created temp staging directory for instance {}. {}" + ).format(instance_label, tmp_folder)) + + # Store filepaths for validation of their existence + source_filepaths = [] + # Make sure there are no representations with same name + repre_names_counter = {} + # Store created names for logging + repre_names = [] + # Store set of filepaths per each representation + representation_files_mapping = [] + source = self._create_main_representations( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + + self._create_review_representation( + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ) + source_filepaths = list(set(source_filepaths)) + instance.data["source"] = source + instance.data["sourceFilepaths"] = source_filepaths + + # NOTE: Missing filepaths should not cause crashes (at least not here) + # - if filepaths are required they should crash on validation + if source_filepaths: + # NOTE: Original basename is not handling sequences + # - we should maybe not fill the key when sequence is used? + origin_basename = Path(source_filepaths[0]).stem + instance.data["originalBasename"] = origin_basename + + self.log.debug( + ( + "Created Simple Settings instance \"{}\"" + " with {} representations: {}" + ).format( + instance_label, + len(instance.data["representations"]), + ", ".join(repre_names) + ) + ) + + def _create_main_representations( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + creator_attributes = instance.data["creator_attributes"] + filepath_items = creator_attributes["representation_files"] + if not isinstance(filepath_items, list): + filepath_items = [filepath_items] + + source = None + for filepath_item in filepath_items: + # Skip if filepath item does not have filenames + if not filepath_item["filenames"]: + continue + + filepaths = { + os.path.join(filepath_item["directory"], filename) + for filename in filepath_item["filenames"] + } + source_filepaths.extend(filepaths) + + source = self._calculate_source(filepaths) + representation = self._create_representation_data( + filepath_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(representation) + representation_files_mapping.append( + (filepaths, representation, source) + ) + return source + + def _create_review_representation( + self, + instance, + source_filepaths, + repre_names_counter, + repre_names, + representation_files_mapping + ): + # Skip review representation creation if there are no representations + # created for "main" part + # - review representation must not be created in that case so + # validation can care about it + if not representation_files_mapping: + self.log.warning(( + "There are missing source representations." + " Creation of review representation was skipped." + )) + return + + creator_attributes = instance.data["creator_attributes"] + review_file_item = creator_attributes["reviewable"] + filenames = review_file_item.get("filenames") + if not filenames: + self.log.debug(( + "Filepath for review is not defined." + " Skipping review representation creation." + )) + return + + item_dir = review_file_item["directory"] + first_filepath = os.path.join(item_dir, filenames[0]) + + filepaths = { + os.path.join(item_dir, filename) + for filename in filenames + } + source_filepaths.extend(filepaths) + # First try to find out representation with same filepaths + # so it's not needed to create new representation just for review + review_representation = None + # Review path (only for logging) + review_path = None + for item in representation_files_mapping: + _filepaths, representation, repre_path = item + if _filepaths == filepaths: + review_representation = representation + review_path = repre_path + break + + if review_representation is None: + self.log.debug("Creating new review representation") + review_path = self._calculate_source(filepaths) + review_representation = self._create_representation_data( + review_file_item, repre_names_counter, repre_names + ) + instance.data["representations"].append(review_representation) + + if "review" not in instance.data["families"]: + instance.data["families"].append("review") + + if not instance.data.get("thumbnailSource"): + instance.data["thumbnailSource"] = first_filepath + + review_representation["tags"].append("review") + self.log.debug("Representation {} was marked for review. {}".format( + review_representation["name"], review_path + )) + + def _create_representation_data( + self, filepath_item, repre_names_counter, repre_names + ): + """Create new representation data based on file item. + + Args: + filepath_item (Dict[str, Any]): Item with information about + representation paths. + repre_names_counter (Dict[str, int]): Store count of representation + names. + repre_names (List[str]): All used representation names. For + logging purposes. + + Returns: + Dict: Prepared base representation data. + """ + + filenames = filepath_item["filenames"] + _, ext = os.path.splitext(filenames[0]) + if len(filenames) == 1: + filenames = filenames[0] + + repre_name = repre_ext = ext[1:] + if repre_name not in repre_names_counter: + repre_names_counter[repre_name] = 2 + else: + counter = repre_names_counter[repre_name] + repre_names_counter[repre_name] += 1 + repre_name = "{}_{}".format(repre_name, counter) + repre_names.append(repre_name) + return { + "ext": repre_ext, + "name": repre_name, + "stagingDir": filepath_item["directory"], + "files": filenames, + "tags": [] + } + + def _calculate_source(self, filepaths): + cols, rems = clique.assemble(filepaths) + if cols: + source = cols[0].format("{head}{padding}{tail}") + elif rems: + source = rems[0] + return source diff --git a/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py b/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py deleted file mode 100644 index d48bace047..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/collect_workfile.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import pyblish.api - - -class CollectWorkfile(pyblish.api.InstancePlugin): - """Collect representation of workfile instances.""" - - label = "Collect Workfile" - order = pyblish.api.CollectorOrder - 0.49 - families = ["workfile"] - hosts = ["traypublisher"] - - def process(self, instance): - if "representations" not in instance.data: - instance.data["representations"] = [] - repres = instance.data["representations"] - - creator_attributes = instance.data["creator_attributes"] - filepath = creator_attributes["filepath"] - instance.data["sourceFilepath"] = filepath - - staging_dir = os.path.dirname(filepath) - filename = os.path.basename(filepath) - ext = os.path.splitext(filename)[-1] - - repres.append({ - "ext": ext, - "name": ext, - "stagingDir": staging_dir, - "files": filename - }) diff --git a/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml new file mode 100644 index 0000000000..933df1c7c5 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/help/validate_frame_ranges.xml @@ -0,0 +1,15 @@ + + + +Invalid frame range + +## Invalid frame range + +Expected duration or '{duration}' frames set in database, workfile contains only '{found}' frames. + +### How to repair? + +Modify configuration in the database or tweak frame range in the workfile. + + + \ No newline at end of file diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py new file mode 100644 index 0000000000..749199fbd3 --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_filepaths.py @@ -0,0 +1,68 @@ +import os +import pyblish.api +from openpype.pipeline import PublishValidationError + + +class ValidateFilePath(pyblish.api.InstancePlugin): + """Validate existence of source filepaths on instance. + + Plugins looks into key 'sourceFilepaths' and validate if paths there + actually exist on disk. + + Also validate if the key is filled but is empty. In that case also + crashes so do not fill the key if unfilled value should not cause error. + + This is primarily created for Simple Creator instances. + """ + + label = "Validate Workfile" + order = pyblish.api.ValidatorOrder - 0.49 + + hosts = ["traypublisher"] + + def process(self, instance): + if "sourceFilepaths" not in instance.data: + self.log.info(( + "Skipped validation of source filepaths existence." + " Instance does not have collected 'sourceFilepaths'" + )) + return + + family = instance.data["family"] + label = instance.data["name"] + filepaths = instance.data["sourceFilepaths"] + if not filepaths: + raise PublishValidationError( + ( + "Source filepaths of '{}' instance \"{}\" are not filled" + ).format(family, label), + "File not filled", + ( + "## Files were not filled" + "\nThis mean that you didn't enter any files into required" + " file input." + "\n- Please refresh publishing and check instance" + " {}" + ).format(label) + ) + + not_found_files = [ + filepath + for filepath in filepaths + if not os.path.exists(filepath) + ] + if not_found_files: + joined_paths = "\n".join([ + "- {}".format(filepath) + for filepath in not_found_files + ]) + raise PublishValidationError( + ( + "Filepath of '{}' instance \"{}\" does not exist:\n{}" + ).format(family, label, joined_paths), + "File not found", + ( + "## Files were not found\nFiles\n{}" + "\n\nCheck if the path is still available." + ).format(joined_paths) + ) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py new file mode 100644 index 0000000000..b962ea464a --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_frame_ranges.py @@ -0,0 +1,75 @@ +import re + +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishXmlValidationError, + OptionalPyblishPluginMixin, +) + + +class ValidateFrameRange(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validating frame range of rendered files against state in DB.""" + + label = "Validate Frame Range" + hosts = ["traypublisher"] + families = ["render"] + order = ValidateContentsOrder + + optional = True + # published data might be sequence (.mov, .mp4) in that counting files + # doesnt make sense + check_extensions = ["exr", "dpx", "jpg", "jpeg", "png", "tiff", "tga", + "gif", "svg"] + skip_timelines_check = [] # skip for specific task names (regex) + + def process(self, instance): + # Skip the instance if is not active by data on the instance + if not self.is_active(instance.data): + return + + if (self.skip_timelines_check and + any(re.search(pattern, instance.data["task"]) + for pattern in self.skip_timelines_check)): + self.log.info("Skipping for {} task".format(instance.data["task"])) + + asset_doc = instance.data["assetEntity"] + asset_data = asset_doc["data"] + frame_start = asset_data["frameStart"] + frame_end = asset_data["frameEnd"] + handle_start = asset_data["handleStart"] + handle_end = asset_data["handleEnd"] + duration = (frame_end - frame_start + 1) + handle_start + handle_end + + repres = instance.data.get("representations") + if not repres: + self.log.info("No representations, skipping.") + return + + first_repre = repres[0] + ext = first_repre['ext'].replace(".", '') + + if not ext or ext.lower() not in self.check_extensions: + self.log.warning("Cannot check for extension {}".format(ext)) + return + + files = first_repre["files"] + if isinstance(files, str): + files = [files] + frames = len(files) + + msg = ( + "Frame duration from DB:'{}' doesn't match number of files:'{}'" + " Please change frame range for Asset or limit no. of files" + ). format(int(duration), frames) + + formatting_data = {"duration": duration, + "found": frames} + if frames != duration: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + self.log.debug("Valid ranges expected '{}' - found '{}'". + format(int(duration), frames)) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py b/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py new file mode 100644 index 0000000000..2db865ca2b --- /dev/null +++ b/openpype/hosts/traypublisher/plugins/publish/validate_online_file.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +import pyblish.api + +from openpype.pipeline.publish import ( + ValidateContentsOrder, + PublishValidationError, + OptionalPyblishPluginMixin, +) +from openpype.client import get_subset_by_name + + +class ValidateOnlineFile(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate that subset doesn't exist yet.""" + label = "Validate Existing Online Files" + hosts = ["traypublisher"] + families = ["online"] + order = ValidateContentsOrder + + optional = True + + def process(self, instance): + if not self.is_active(instance.data): + return + project_name = instance.context.data["projectName"] + asset_id = instance.data["assetEntity"]["_id"] + subset = get_subset_by_name( + project_name, instance.data["subset"], asset_id) + + if subset: + raise PublishValidationError( + "Subset to be published already exists.", + title=self.label + ) diff --git a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py b/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py deleted file mode 100644 index 88339d2aac..0000000000 --- a/openpype/hosts/traypublisher/plugins/publish/validate_workfile.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import pyblish.api -from openpype.pipeline import PublishValidationError - - -class ValidateWorkfilePath(pyblish.api.InstancePlugin): - """Validate existence of workfile instance existence.""" - - label = "Collect Workfile" - order = pyblish.api.ValidatorOrder - 0.49 - families = ["workfile"] - hosts = ["traypublisher"] - - def process(self, instance): - filepath = instance.data["sourceFilepath"] - if not filepath: - raise PublishValidationError(( - "Filepath of 'workfile' instance \"{}\" is not set" - ).format(instance.data["name"])) - - if not os.path.exists(filepath): - raise PublishValidationError(( - "Filepath of 'workfile' instance \"{}\" does not exist: {}" - ).format(instance.data["name"], filepath)) diff --git a/openpype/hosts/tvpaint/__init__.py b/openpype/hosts/tvpaint/__init__.py index 09b7c52cd1..b98680f204 100644 --- a/openpype/hosts/tvpaint/__init__.py +++ b/openpype/hosts/tvpaint/__init__.py @@ -1,20 +1,12 @@ -import os +from .addon import ( + get_launch_script_path, + TVPaintAddon, + TVPAINT_ROOT_DIR, +) -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value - - -def get_launch_script_path(): - current_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join( - current_dir, - "api", - "launch_script.py" - ) +__all__ = ( + "get_launch_script_path", + "TVPaintAddon", + "TVPAINT_ROOT_DIR", +) diff --git a/openpype/hosts/tvpaint/addon.py b/openpype/hosts/tvpaint/addon.py new file mode 100644 index 0000000000..b695bf8ecc --- /dev/null +++ b/openpype/hosts/tvpaint/addon.py @@ -0,0 +1,40 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +TVPAINT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def get_launch_script_path(): + return os.path.join( + TVPAINT_ROOT_DIR, + "api", + "launch_script.py" + ) + + +class TVPaintAddon(OpenPypeModule, IHostAddon): + name = "tvpaint" + host_name = "tvpaint" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, _app): + """Modify environments to contain all required for implementation.""" + + defaults = { + "OPENPYPE_LOG_NO_COLORS": "True" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(TVPAINT_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".tvpp"] diff --git a/openpype/hosts/tvpaint/api/__init__.py b/openpype/hosts/tvpaint/api/__init__.py index c461b33f4b..7b53aad9a4 100644 --- a/openpype/hosts/tvpaint/api/__init__.py +++ b/openpype/hosts/tvpaint/api/__init__.py @@ -1,49 +1,11 @@ from .communication_server import CommunicationWrapper -from . import lib -from . import launch_script -from . import workio -from . import pipeline -from . import plugin from .pipeline import ( - install, - uninstall, - maintained_selection, - remove_instance, - list_instances, - ls -) - -from .workio import ( - open_file, - save_file, - current_file, - has_unsaved_changes, - file_extensions, - work_root, + TVPaintHost, ) __all__ = ( "CommunicationWrapper", - "lib", - "launch_script", - "workio", - "pipeline", - "plugin", - - "install", - "uninstall", - "maintained_selection", - "remove_instance", - "list_instances", - "ls", - - # Workfiles API - "open_file", - "save_file", - "current_file", - "has_unsaved_changes", - "file_extensions", - "work_root" + "TVPaintHost", ) diff --git a/openpype/hosts/tvpaint/api/communication_server.py b/openpype/hosts/tvpaint/api/communication_server.py index 65cb9aa2f3..6ac3e6324c 100644 --- a/openpype/hosts/tvpaint/api/communication_server.py +++ b/openpype/hosts/tvpaint/api/communication_server.py @@ -707,6 +707,9 @@ class BaseCommunicator: if exit_code is not None: self.exit_code = exit_code + if self.exit_code is None: + self.exit_code = 0 + def stop(self): """Stop communication and currently running python process.""" log.info("Stopping communication") diff --git a/openpype/hosts/tvpaint/api/launch_script.py b/openpype/hosts/tvpaint/api/launch_script.py index e66bf61df6..614dbe8a6e 100644 --- a/openpype/hosts/tvpaint/api/launch_script.py +++ b/openpype/hosts/tvpaint/api/launch_script.py @@ -6,14 +6,14 @@ import ctypes import platform import logging -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui -from avalon import api from openpype import style -from openpype.hosts.tvpaint.api.communication_server import ( - CommunicationWrapper +from openpype.pipeline import install_host +from openpype.hosts.tvpaint.api import ( + TVPaintHost, + CommunicationWrapper, ) -from openpype.hosts.tvpaint import api as tvpaint_host log = logging.getLogger(__name__) @@ -30,8 +30,9 @@ def main(launch_args): # - QApplicaiton is also main thread/event loop of the server qt_app = QtWidgets.QApplication([]) + tvpaint_host = TVPaintHost() # Execute pipeline installation - api.install(tvpaint_host) + install_host(tvpaint_host) # Create Communicator object and trigger launch # - this must be done before anything is processed diff --git a/openpype/hosts/tvpaint/api/lib.py b/openpype/hosts/tvpaint/api/lib.py index 9e6404e72f..5e64773b8e 100644 --- a/openpype/hosts/tvpaint/api/lib.py +++ b/openpype/hosts/tvpaint/api/lib.py @@ -2,9 +2,7 @@ import os import logging import tempfile -import avalon.io - -from . import CommunicationWrapper +from .communication_server import CommunicationWrapper log = logging.getLogger(__name__) @@ -167,12 +165,12 @@ def parse_group_data(data): if not group_raw: continue - parts = group_raw.split(" ") + parts = group_raw.split("|") # Check for length and concatenate 2 last items until length match # - this happens if name contain spaces while len(parts) > 6: last_item = parts.pop(-1) - parts[-1] = " ".join([parts[-1], last_item]) + parts[-1] = "|".join([parts[-1], last_item]) clip_id, group_id, red, green, blue, name = parts group = { @@ -203,11 +201,16 @@ def get_groups_data(communicator=None): george_script_lines = ( # Variable containing full path to output file "output_path = \"{}\"".format(output_filepath), - "loop = 1", - "FOR idx = 1 TO 12", + "empty = 0", + # Loop over 100 groups + "FOR idx = 1 TO 100", + # Receive information about groups "tv_layercolor \"getcolor\" 0 idx", - "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' result", - "END" + "PARSE result clip_id group_index c_red c_green c_blue group_name", + # Create and add line to output file + "line = clip_id'|'group_index'|'c_red'|'c_green'|'c_blue'|'group_name", + "tv_writetextfile \"strict\" \"append\" '\"'output_path'\"' line", + "END", ) george_script = "\n".join(george_script_lines) execute_george_through_file(george_script, communicator) diff --git a/openpype/hosts/tvpaint/api/pipeline.py b/openpype/hosts/tvpaint/api/pipeline.py index ec880a1abc..249326791b 100644 --- a/openpype/hosts/tvpaint/api/pipeline.py +++ b/openpype/hosts/tvpaint/api/pipeline.py @@ -1,23 +1,21 @@ import os import json -import contextlib import tempfile import logging import requests import pyblish.api -import avalon.api -from avalon import io - -from openpype.hosts import tvpaint -from openpype.api import get_current_project_settings +from openpype.client import get_project, get_asset_by_name +from openpype.host import HostBase, IWorkfileHost, ILoadHost +from openpype.hosts.tvpaint import TVPAINT_ROOT_DIR +from openpype.settings import get_current_project_settings from openpype.lib import register_event_callback from openpype.pipeline import ( - LegacyCreator, + legacy_io, register_loader_plugin_path, - deregister_loader_plugin_path, + register_creator_plugin_path, AVALON_CONTAINER_ID, ) @@ -28,11 +26,6 @@ from .lib import ( log = logging.getLogger(__name__) -HOST_DIR = os.path.dirname(os.path.abspath(tvpaint.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") -LOAD_PATH = os.path.join(PLUGINS_DIR, "load") -CREATE_PATH = os.path.join(PLUGINS_DIR, "create") METADATA_SECTION = "avalon" SECTION_NAME_CONTEXT = "context" @@ -65,46 +58,152 @@ instances=2 """ -def install(): - """Install Maya-specific functionality of avalon-core. +class TVPaintHost(HostBase, IWorkfileHost, ILoadHost): + name = "tvpaint" - This function is called automatically on calling `api.install(maya)`. + def install(self): + """Install TVPaint-specific functionality.""" - """ - log.info("OpenPype - Installing TVPaint integration") - io.install() + log.info("OpenPype - Installing TVPaint integration") + legacy_io.install() - # Create workdir folder if does not exist yet - workdir = io.Session["AVALON_WORKDIR"] - if not os.path.exists(workdir): - os.makedirs(workdir) + # Create workdir folder if does not exist yet + workdir = legacy_io.Session["AVALON_WORKDIR"] + if not os.path.exists(workdir): + os.makedirs(workdir) - pyblish.api.register_host("tvpaint") - pyblish.api.register_plugin_path(PUBLISH_PATH) - register_loader_plugin_path(LOAD_PATH) - avalon.api.register_plugin_path(LegacyCreator, CREATE_PATH) + plugins_dir = os.path.join(TVPAINT_ROOT_DIR, "plugins") + publish_dir = os.path.join(plugins_dir, "publish") + load_dir = os.path.join(plugins_dir, "load") + create_dir = os.path.join(plugins_dir, "create") - registered_callbacks = ( - pyblish.api.registered_callbacks().get("instanceToggled") or [] - ) - if on_instance_toggle not in registered_callbacks: - pyblish.api.register_callback("instanceToggled", on_instance_toggle) + pyblish.api.register_host("tvpaint") + pyblish.api.register_plugin_path(publish_dir) + register_loader_plugin_path(load_dir) + register_creator_plugin_path(create_dir) - register_event_callback("application.launched", initial_launch) - register_event_callback("application.exit", application_exit) + registered_callbacks = ( + pyblish.api.registered_callbacks().get("instanceToggled") or [] + ) + if self.on_instance_toggle not in registered_callbacks: + pyblish.api.register_callback( + "instanceToggled", self.on_instance_toggle + ) + register_event_callback("application.launched", self.initial_launch) + register_event_callback("application.exit", self.application_exit) -def uninstall(): - """Uninstall TVPaint-specific functionality of avalon-core. + def open_workfile(self, filepath): + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( + filepath.replace("\\", "/") + ) + return execute_george_through_file(george_script) - This function is called automatically on calling `api.uninstall()`. + def save_workfile(self, filepath=None): + if not filepath: + filepath = self.get_current_workfile() + context = { + "project": legacy_io.Session["AVALON_PROJECT"], + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] + } + save_current_workfile_context(context) - """ - log.info("OpenPype - Uninstalling TVPaint integration") - pyblish.api.deregister_host("tvpaint") - pyblish.api.deregister_plugin_path(PUBLISH_PATH) - deregister_loader_plugin_path(LOAD_PATH) - avalon.api.deregister_plugin_path(LegacyCreator, CREATE_PATH) + # Execute george script to save workfile. + george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/")) + return execute_george(george_script) + + def work_root(self, session): + return session["AVALON_WORKDIR"] + + def get_current_workfile(self): + return execute_george("tv_GetProjectName") + + def workfile_has_unsaved_changes(self): + return None + + def get_workfile_extensions(self): + return [".tvpp"] + + def get_containers(self): + return get_containers() + + def initial_launch(self): + # Setup project settings if its the template that's launched. + # TODO also check for template creation when it's possible to define + # templates + last_workfile = os.environ.get("AVALON_LAST_WORKFILE") + if not last_workfile or os.path.exists(last_workfile): + return + + log.info("Setting up project...") + set_context_settings() + + def remove_instance(self, instance): + """Remove instance from current workfile metadata. + + Implementation for Subset manager tool. + """ + + current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES) + instance_id = instance.get("uuid") + found_idx = None + if instance_id: + for idx, _inst in enumerate(current_instances): + if _inst["uuid"] == instance_id: + found_idx = idx + break + + if found_idx is None: + return + current_instances.pop(found_idx) + write_instances(current_instances) + + def application_exit(self): + """Logic related to TimerManager. + + Todo: + This should be handled out of TVPaint integration logic. + """ + + data = get_current_project_settings() + stop_timer = data["tvpaint"]["stop_timer_on_application_exit"] + + if not stop_timer: + return + + # Stop application timer. + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + requests.post(rest_api_url) + + def on_instance_toggle(self, instance, old_value, new_value): + """Update instance data in workfile on publish toggle.""" + # Review may not have real instance in wokrfile metadata + if not instance.data.get("uuid"): + return + + instance_id = instance.data["uuid"] + found_idx = None + current_instances = list_instances() + for idx, workfile_instance in enumerate(current_instances): + if workfile_instance["uuid"] == instance_id: + found_idx = idx + break + + if found_idx is None: + return + + if "active" in current_instances[found_idx]: + current_instances[found_idx]["active"] = new_value + self.write_instances(current_instances) + + def list_instances(self): + """List all created instances from current workfile.""" + return list_instances() + + def write_instances(self, data): + return write_instances(data) def containerise( @@ -134,7 +233,7 @@ def containerise( "representation": str(context["representation"]["_id"]) } if current_containers is None: - current_containers = ls() + current_containers = get_containers() # Add container to containers list current_containers.append(container_data) @@ -145,15 +244,6 @@ def containerise( return container_data -@contextlib.contextmanager -def maintained_selection(): - # TODO implement logic - try: - yield - finally: - pass - - def split_metadata_string(text, chunk_length=None): """Split string by length. @@ -351,23 +441,6 @@ def save_current_workfile_context(context): return write_workfile_metadata(SECTION_NAME_CONTEXT, context) -def remove_instance(instance): - """Remove instance from current workfile metadata.""" - current_instances = get_workfile_metadata(SECTION_NAME_INSTANCES) - instance_id = instance.get("uuid") - found_idx = None - if instance_id: - for idx, _inst in enumerate(current_instances): - if _inst["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: - return - current_instances.pop(found_idx) - write_instances(current_instances) - - def list_instances(): """List all created instances from current workfile.""" return get_workfile_metadata(SECTION_NAME_INSTANCES) @@ -377,83 +450,31 @@ def write_instances(data): return write_workfile_metadata(SECTION_NAME_INSTANCES, data) -# Backwards compatibility -def _write_instances(*args, **kwargs): - return write_instances(*args, **kwargs) - - -def ls(): +def get_containers(): output = get_workfile_metadata(SECTION_NAME_CONTAINERS) if output: for item in output: if "objectName" not in item and "members" in item: members = item["members"] if isinstance(members, list): - members = "|".join(members) + members = "|".join([str(member) for member in members]) item["objectName"] = members return output -def on_instance_toggle(instance, old_value, new_value): - """Update instance data in workfile on publish toggle.""" - # Review may not have real instance in wokrfile metadata - if not instance.data.get("uuid"): - return - - instance_id = instance.data["uuid"] - found_idx = None - current_instances = list_instances() - for idx, workfile_instance in enumerate(current_instances): - if workfile_instance["uuid"] == instance_id: - found_idx = idx - break - - if found_idx is None: - return - - if "active" in current_instances[found_idx]: - current_instances[found_idx]["active"] = new_value - write_instances(current_instances) - - -def initial_launch(): - # Setup project settings if its the template that's launched. - # TODO also check for template creation when it's possible to define - # templates - last_workfile = os.environ.get("AVALON_LAST_WORKFILE") - if not last_workfile or os.path.exists(last_workfile): - return - - log.info("Setting up project...") - set_context_settings() - - -def application_exit(): - data = get_current_project_settings() - stop_timer = data["tvpaint"]["stop_timer_on_application_exit"] - - if not stop_timer: - return - - # Stop application timer. - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) - - def set_context_settings(asset_doc=None): """Set workfile settings by asset document data. Change fps, resolution and frame start/end. """ - if asset_doc is None: - # Use current session asset if not passed - asset_doc = avalon.io.find_one({ - "type": "asset", - "name": avalon.io.Session["AVALON_ASSET"] - }) - project_doc = avalon.io.find_one({"type": "project"}) + project_name = legacy_io.active_project() + if asset_doc is None: + asset_name = legacy_io.Session["AVALON_ASSET"] + # Use current session asset if not passed + asset_doc = get_asset_by_name(project_name, asset_name) + + project_doc = get_project(project_name) framerate = asset_doc["data"].get("fps") if framerate is None: diff --git a/openpype/hosts/tvpaint/api/plugin.py b/openpype/hosts/tvpaint/api/plugin.py index 15ad8905e0..da456e7067 100644 --- a/openpype/hosts/tvpaint/api/plugin.py +++ b/openpype/hosts/tvpaint/api/plugin.py @@ -4,11 +4,11 @@ import uuid from openpype.pipeline import ( LegacyCreator, LoaderPlugin, + registered_host, ) -from openpype.hosts.tvpaint.api import ( - pipeline, - lib -) + +from .lib import get_layers_data +from .pipeline import get_current_workfile_context class Creator(LegacyCreator): @@ -22,7 +22,7 @@ class Creator(LegacyCreator): dynamic_data = super(Creator, cls).get_dynamic_data(*args, **kwargs) # Change asset and name by current workfile context - workfile_context = pipeline.get_current_workfile_context() + workfile_context = get_current_workfile_context() asset_name = workfile_context.get("asset") task_name = workfile_context.get("task") if "asset" not in dynamic_data and asset_name: @@ -67,10 +67,12 @@ class Creator(LegacyCreator): self.log.debug( "Storing instance data to workfile. {}".format(str(data)) ) - return pipeline.write_instances(data) + host = registered_host() + return host.write_instances(data) def process(self): - data = pipeline.list_instances() + host = registered_host() + data = host.list_instances() data.append(self.data) self.write_instances(data) @@ -108,7 +110,7 @@ class Loader(LoaderPlugin): counter_regex = re.compile(r"_(\d{3})$") higher_counter = 0 - for layer in lib.get_layers_data(): + for layer in get_layers_data(): layer_name = layer["name"] if not layer_name.startswith(layer_name_base): continue diff --git a/openpype/hosts/tvpaint/api/workio.py b/openpype/hosts/tvpaint/api/workio.py deleted file mode 100644 index 88bdd7117e..0000000000 --- a/openpype/hosts/tvpaint/api/workio.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Host API required for Work Files. -# TODO @iLLiCiT implement functions: - has_unsaved_changes -""" - -from avalon import api -from openpype.pipeline import HOST_WORKFILE_EXTENSIONS -from .lib import ( - execute_george, - execute_george_through_file -) -from .pipeline import save_current_workfile_context - - -def open_file(filepath): - """Open the scene file in Blender.""" - george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( - filepath.replace("\\", "/") - ) - return execute_george_through_file(george_script) - - -def save_file(filepath): - """Save the open scene file.""" - # Store context to workfile before save - context = { - "project": api.Session["AVALON_PROJECT"], - "asset": api.Session["AVALON_ASSET"], - "task": api.Session["AVALON_TASK"] - } - save_current_workfile_context(context) - - # Execute george script to save workfile. - george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/")) - return execute_george(george_script) - - -def current_file(): - """Return the path of the open scene file.""" - george_script = "tv_GetProjectName" - return execute_george(george_script) - - -def has_unsaved_changes(): - """Does the open scene file have unsaved changes?""" - return False - - -def file_extensions(): - """Return the supported file extensions for Blender scene files.""" - return HOST_WORKFILE_EXTENSIONS["tvpaint"] - - -def work_root(session): - """Return the default root to browse for work files.""" - return session["AVALON_WORKDIR"] diff --git a/openpype/hosts/tvpaint/hooks/pre_launch_args.py b/openpype/hosts/tvpaint/hooks/pre_launch_args.py index 2a8f49d5b0..c31403437a 100644 --- a/openpype/hosts/tvpaint/hooks/pre_launch_args.py +++ b/openpype/hosts/tvpaint/hooks/pre_launch_args.py @@ -1,14 +1,8 @@ -import os -import shutil - -from openpype.hosts import tvpaint from openpype.lib import ( PreLaunchHook, get_openpype_execute_args ) -import avalon - class TvpaintPrelaunchHook(PreLaunchHook): """Launch arguments preparation. diff --git a/openpype/hosts/tvpaint/lib.py b/openpype/hosts/tvpaint/lib.py index 715ebb4a9d..95653b6ecb 100644 --- a/openpype/hosts/tvpaint/lib.py +++ b/openpype/hosts/tvpaint/lib.py @@ -573,7 +573,7 @@ def composite_rendered_layers( layer_ids_by_position[layer_position] = layer["layer_id"] # Sort layer positions - sorted_positions = tuple(sorted(layer_ids_by_position.keys())) + sorted_positions = tuple(reversed(sorted(layer_ids_by_position.keys()))) # Prepare variable where filepaths without any rendered content # - transparent will be created transparent_filepaths = set() @@ -646,9 +646,6 @@ def rename_filepaths_by_frame_start( filepaths_by_frame, range_start, range_end, new_frame_start ): """Change frames in filenames of finished images to new frame start.""" - # Skip if source first frame is same as destination first frame - if range_start == new_frame_start: - return # Calculate frame end new_frame_end = range_end + (new_frame_start - range_start) @@ -669,14 +666,17 @@ def rename_filepaths_by_frame_start( source_range = range(range_start, range_end + 1) output_range = range(new_frame_start, new_frame_end + 1) + # Skip if source first frame is same as destination first frame new_dst_filepaths = {} for src_frame, dst_frame in zip(source_range, output_range): - src_filepath = filepaths_by_frame[src_frame] - src_dirpath = os.path.dirname(src_filepath) + src_filepath = os.path.normpath(filepaths_by_frame[src_frame]) + dirpath, src_filename = os.path.split(src_filepath) dst_filename = filename_template.format(frame=dst_frame) - dst_filepath = os.path.join(src_dirpath, dst_filename) + dst_filepath = os.path.join(dirpath, dst_filename) - os.rename(src_filepath, dst_filepath) + if src_filename != dst_filename: + os.rename(src_filepath, dst_filepath) new_dst_filepaths[dst_frame] = dst_filepath + return new_dst_filepaths diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py index c1af9632b1..009b69c4f1 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_layer.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_layer.py @@ -1,11 +1,15 @@ -from openpype.pipeline import CreatorError from openpype.lib import prepare_template_data +from openpype.pipeline import CreatorError from openpype.hosts.tvpaint.api import ( plugin, - pipeline, - lib, CommunicationWrapper ) +from openpype.hosts.tvpaint.api.lib import ( + get_layers_data, + get_groups_data, + execute_george_through_file, +) +from openpype.hosts.tvpaint.api.pipeline import list_instances class CreateRenderlayer(plugin.Creator): @@ -24,7 +28,9 @@ class CreateRenderlayer(plugin.Creator): " {clip_id} {group_id} {r} {g} {b} \"{name}\"" ) - dynamic_subset_keys = ["render_pass", "render_layer", "group"] + dynamic_subset_keys = [ + "renderpass", "renderlayer", "render_pass", "render_layer", "group" + ] @classmethod def get_dynamic_data( @@ -34,12 +40,17 @@ class CreateRenderlayer(plugin.Creator): variant, task_name, asset_id, project_name, host_name ) # Use render pass name from creator's plugin - dynamic_data["render_pass"] = cls.render_pass + dynamic_data["renderpass"] = cls.render_pass # Add variant to render layer - dynamic_data["render_layer"] = variant + dynamic_data["renderlayer"] = variant # Change family for subset name fill dynamic_data["family"] = "render" + # TODO remove - Backwards compatibility for old subset name templates + # - added 2022/04/28 + dynamic_data["render_pass"] = dynamic_data["renderpass"] + dynamic_data["render_layer"] = dynamic_data["renderlayer"] + return dynamic_data @classmethod @@ -56,7 +67,7 @@ class CreateRenderlayer(plugin.Creator): # Validate that communication is initialized if CommunicationWrapper.communicator: # Get currently selected layers - layers_data = lib.get_layers_data() + layers_data = get_layers_data() selected_layers = [ layer @@ -74,8 +85,8 @@ class CreateRenderlayer(plugin.Creator): def process(self): self.log.debug("Query data from workfile.") - instances = pipeline.list_instances() - layers_data = lib.get_layers_data() + instances = list_instances() + layers_data = get_layers_data() self.log.debug("Checking for selection groups.") # Collect group ids from selection @@ -102,7 +113,7 @@ class CreateRenderlayer(plugin.Creator): self.log.debug(f"Selected group id is \"{group_id}\".") self.data["group_id"] = group_id - group_data = lib.get_groups_data() + group_data = get_groups_data() group_name = None for group in group_data: if group["group_id"] == group_id: @@ -169,7 +180,7 @@ class CreateRenderlayer(plugin.Creator): return self.log.debug("Querying groups data from workfile.") - groups_data = lib.get_groups_data() + groups_data = get_groups_data() self.log.debug("Changing name of the group.") selected_group = None @@ -188,7 +199,7 @@ class CreateRenderlayer(plugin.Creator): b=selected_group["blue"], name=new_group_name ) - lib.execute_george_through_file(rename_script) + execute_george_through_file(rename_script) self.log.info( f"Name of group with index {group_id}" @@ -196,8 +207,8 @@ class CreateRenderlayer(plugin.Creator): ) def _ask_user_subset_override(self, instance): - from Qt import QtCore - from Qt.QtWidgets import QMessageBox + from qtpy import QtCore + from qtpy.QtWidgets import QMessageBox title = "Subset \"{}\" already exist".format(instance["subset"]) text = ( diff --git a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py index a7f717ccec..a44cb29f20 100644 --- a/openpype/hosts/tvpaint/plugins/create/create_render_pass.py +++ b/openpype/hosts/tvpaint/plugins/create/create_render_pass.py @@ -2,10 +2,10 @@ from openpype.pipeline import CreatorError from openpype.lib import prepare_template_data from openpype.hosts.tvpaint.api import ( plugin, - pipeline, - lib, CommunicationWrapper ) +from openpype.hosts.tvpaint.api.lib import get_layers_data +from openpype.hosts.tvpaint.api.pipeline import list_instances class CreateRenderPass(plugin.Creator): @@ -20,7 +20,9 @@ class CreateRenderPass(plugin.Creator): icon = "cube" defaults = ["Main"] - dynamic_subset_keys = ["render_pass", "render_layer"] + dynamic_subset_keys = [ + "renderpass", "renderlayer", "render_pass", "render_layer" + ] @classmethod def get_dynamic_data( @@ -29,9 +31,13 @@ class CreateRenderPass(plugin.Creator): dynamic_data = super(CreateRenderPass, cls).get_dynamic_data( variant, task_name, asset_id, project_name, host_name ) - dynamic_data["render_pass"] = variant + dynamic_data["renderpass"] = variant dynamic_data["family"] = "render" + # TODO remove - Backwards compatibility for old subset name templates + # - added 2022/04/28 + dynamic_data["render_pass"] = dynamic_data["renderpass"] + return dynamic_data @classmethod @@ -48,7 +54,7 @@ class CreateRenderPass(plugin.Creator): # Validate that communication is initialized if CommunicationWrapper.communicator: # Get currently selected layers - layers_data = lib.layers_data() + layers_data = get_layers_data() selected_layers = [ layer @@ -66,8 +72,8 @@ class CreateRenderPass(plugin.Creator): def process(self): self.log.debug("Query data from workfile.") - instances = pipeline.list_instances() - layers_data = lib.layers_data() + instances = list_instances() + layers_data = get_layers_data() self.log.debug("Checking selection.") # Get all selected layers and their group ids @@ -115,6 +121,7 @@ class CreateRenderPass(plugin.Creator): else: render_layer = beauty_instance["variant"] + subset_name_fill_data["renderlayer"] = render_layer subset_name_fill_data["render_layer"] = render_layer # Format dynamic keys in subset name @@ -129,7 +136,7 @@ class CreateRenderPass(plugin.Creator): self.data["group_id"] = group_id self.data["pass"] = variant - self.data["render_layer"] = render_layer + self.data["renderlayer"] = render_layer # Collect selected layer ids to be stored into instance layer_names = [layer["name"] for layer in selected_layers] diff --git a/openpype/hosts/tvpaint/plugins/load/load_image.py b/openpype/hosts/tvpaint/plugins/load/load_image.py index f861d0119e..5283d04355 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_image.py @@ -1,5 +1,6 @@ -import qargparse -from openpype.hosts.tvpaint.api import lib, plugin +from openpype.lib.attribute_definitions import BoolDef +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import execute_george_through_file class ImportImage(plugin.Loader): @@ -26,26 +27,28 @@ class ImportImage(plugin.Loader): "preload": True } - options = [ - qargparse.Boolean( - "stretch", - label="Stretch to project size", - default=True, - help="Stretch loaded image/s to project resolution?" - ), - qargparse.Boolean( - "timestretch", - label="Stretch to timeline length", - default=True, - help="Clip loaded image/s to timeline length?" - ), - qargparse.Boolean( - "preload", - label="Preload loaded image/s", - default=True, - help="Preload image/s?" - ) - ] + @classmethod + def get_options(cls, contexts): + return [ + BoolDef( + "stretch", + label="Stretch to project size", + default=cls.defaults["stretch"], + tooltip="Stretch loaded image/s to project resolution?" + ), + BoolDef( + "timestretch", + label="Stretch to timeline length", + default=cls.defaults["timestretch"], + tooltip="Clip loaded image/s to timeline length?" + ), + BoolDef( + "preload", + label="Preload loaded image/s", + default=cls.defaults["preload"], + tooltip="Preload image/s?" + ) + ] def load(self, context, name, namespace, options): stretch = options.get("stretch", self.defaults["stretch"]) @@ -79,4 +82,4 @@ class ImportImage(plugin.Loader): layer_name, load_options_str ) - return lib.execute_george_through_file(george_script) + return execute_george_through_file(george_script) diff --git a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py index 5e4e3965d2..7f7a68cc41 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_reference_image.py +++ b/openpype/hosts/tvpaint/plugins/load/load_reference_image.py @@ -1,7 +1,20 @@ import collections -import qargparse -from avalon.pipeline import get_representation_context -from openpype.hosts.tvpaint.api import lib, pipeline, plugin + +from openpype.lib.attribute_definitions import BoolDef +from openpype.pipeline import ( + get_representation_context, + register_host, +) +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import ( + get_layers_data, + execute_george_through_file, +) +from openpype.hosts.tvpaint.api.pipeline import ( + write_workfile_metadata, + SECTION_NAME_CONTAINERS, + containerise, +) class LoadImage(plugin.Loader): @@ -28,26 +41,28 @@ class LoadImage(plugin.Loader): "preload": True } - options = [ - qargparse.Boolean( - "stretch", - label="Stretch to project size", - default=True, - help="Stretch loaded image/s to project resolution?" - ), - qargparse.Boolean( - "timestretch", - label="Stretch to timeline length", - default=True, - help="Clip loaded image/s to timeline length?" - ), - qargparse.Boolean( - "preload", - label="Preload loaded image/s", - default=True, - help="Preload image/s?" - ) - ] + @classmethod + def get_options(cls, contexts): + return [ + BoolDef( + "stretch", + label="Stretch to project size", + default=cls.defaults["stretch"], + tooltip="Stretch loaded image/s to project resolution?" + ), + BoolDef( + "timestretch", + label="Stretch to timeline length", + default=cls.defaults["timestretch"], + tooltip="Clip loaded image/s to timeline length?" + ), + BoolDef( + "preload", + label="Preload loaded image/s", + default=cls.defaults["preload"], + tooltip="Preload image/s?" + ) + ] def load(self, context, name, namespace, options): stretch = options.get("stretch", self.defaults["stretch"]) @@ -79,10 +94,10 @@ class LoadImage(plugin.Loader): load_options_str ) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) loaded_layer = None - layers = lib.layers_data() + layers = get_layers_data() for layer in layers: if layer["name"] == layer_name: loaded_layer = layer @@ -95,7 +110,7 @@ class LoadImage(plugin.Loader): layer_names = [loaded_layer["name"]] namespace = namespace or layer_name - return pipeline.containerise( + return containerise( name=name, namespace=namespace, members=layer_names, @@ -109,7 +124,7 @@ class LoadImage(plugin.Loader): return if layers is None: - layers = lib.layers_data() + layers = get_layers_data() available_ids = set(layer["layer_id"] for layer in layers) @@ -152,14 +167,15 @@ class LoadImage(plugin.Loader): line = "tv_layerkill {}".format(layer_id) george_script_lines.append(line) george_script = "\n".join(george_script_lines) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) def _remove_container(self, container, members=None): if not container: return representation = container["representation"] members = self.get_members_from_container(container) - current_containers = pipeline.ls() + host = register_host() + current_containers = host.get_containers() pop_idx = None for idx, cur_con in enumerate(current_containers): cur_members = self.get_members_from_container(cur_con) @@ -179,8 +195,8 @@ class LoadImage(plugin.Loader): return current_containers.pop(pop_idx) - pipeline.write_workfile_metadata( - pipeline.SECTION_NAME_CONTAINERS, current_containers + write_workfile_metadata( + SECTION_NAME_CONTAINERS, current_containers ) def remove(self, container): @@ -214,7 +230,7 @@ class LoadImage(plugin.Loader): break old_layers = [] - layers = lib.layers_data() + layers = get_layers_data() previous_layer_ids = set(layer["layer_id"] for layer in layers) if old_layers_are_ids: for layer in layers: @@ -263,7 +279,7 @@ class LoadImage(plugin.Loader): new_container = self.load(context, name, namespace, {}) new_layer_names = self.get_members_from_container(new_container) - layers = lib.layers_data() + layers = get_layers_data() new_layers = [] for layer in layers: @@ -304,4 +320,4 @@ class LoadImage(plugin.Loader): # Execute george scripts if there are any if george_script_lines: george_script = "\n".join(george_script_lines) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) diff --git a/openpype/hosts/tvpaint/plugins/load/load_sound.py b/openpype/hosts/tvpaint/plugins/load/load_sound.py index 3f42370f5c..f312db262a 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_sound.py +++ b/openpype/hosts/tvpaint/plugins/load/load_sound.py @@ -1,6 +1,9 @@ import os import tempfile -from openpype.hosts.tvpaint.api import lib, plugin +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import ( + execute_george_through_file, +) class ImportSound(plugin.Loader): @@ -64,7 +67,7 @@ class ImportSound(plugin.Loader): ) self.log.info("*** George script:\n{}\n***".format(george_script)) # Execute geoge script - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) # Read output file lines = [] diff --git a/openpype/hosts/tvpaint/plugins/load/load_workfile.py b/openpype/hosts/tvpaint/plugins/load/load_workfile.py index d224cfc390..fc7588f56e 100644 --- a/openpype/hosts/tvpaint/plugins/load/load_workfile.py +++ b/openpype/hosts/tvpaint/plugins/load/load_workfile.py @@ -1,14 +1,23 @@ import os -from avalon import api, io -from openpype.lib import ( - StringTemplate, +from openpype.lib import StringTemplate +from openpype.pipeline import ( + registered_host, + legacy_io, + Anatomy, +) +from openpype.pipeline.workfile import ( get_workfile_template_key_from_context, - get_workdir_data, get_last_workfile_with_version, ) -from openpype.api import Anatomy -from openpype.hosts.tvpaint.api import lib, pipeline, plugin +from openpype.pipeline.template_data import get_template_data_with_names +from openpype.hosts.tvpaint.api import plugin +from openpype.hosts.tvpaint.api.lib import ( + execute_george_through_file, +) +from openpype.hosts.tvpaint.api.pipeline import ( + get_current_workfile_context, +) class LoadWorkfile(plugin.Loader): @@ -22,10 +31,10 @@ class LoadWorkfile(plugin.Loader): def load(self, context, name, namespace, options): # Load context of current workfile as first thing # - which context and extension has - host = api.registered_host() - current_file = host.current_file() + host = registered_host() + current_file = host.get_current_workfile() - context = pipeline.get_current_workfile_context() + context = get_current_workfile_context() filepath = self.fname.replace("\\", "/") @@ -37,47 +46,42 @@ class LoadWorkfile(plugin.Loader): george_script = "tv_LoadProject '\"'\"{}\"'\"'".format( filepath ) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) # Save workfile. host_name = "tvpaint" + project_name = context.get("project") asset_name = context.get("asset") task_name = context.get("task") # Far cases when there is workfile without context if not asset_name: - asset_name = io.Session["AVALON_ASSET"] - task_name = io.Session["AVALON_TASK"] - - project_doc = io.find_one({ - "type": "project" - }) - asset_doc = io.find_one({ - "type": "asset", - "name": asset_name - }) - project_name = project_doc["name"] + project_name = legacy_io.active_project() + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] template_key = get_workfile_template_key_from_context( asset_name, task_name, host_name, - project_name=project_name, - dbcon=io + project_name=project_name ) anatomy = Anatomy(project_name) - data = get_workdir_data(project_doc, asset_doc, task_name, host_name) + data = get_template_data_with_names( + project_name, asset_name, task_name, host_name + ) data["root"] = anatomy.roots file_template = anatomy.templates[template_key]["file"] # Define saving file extension + extensions = host.get_workfile_extensions() if current_file: # Match the extension of current file _, extension = os.path.splitext(current_file) else: # Fall back to the first extension supported for this host. - extension = host.file_extensions()[0] + extension = extensions[0] data["ext"] = extension @@ -86,7 +90,7 @@ class LoadWorkfile(plugin.Loader): folder_template, data ) version = get_last_workfile_with_version( - work_root, file_template, data, host.file_extensions() + work_root, file_template, data, extensions )[1] if version is None: @@ -100,4 +104,4 @@ class LoadWorkfile(plugin.Loader): file_template, data ) path = os.path.join(work_root, filename) - host.save_file(path) + host.save_workfile(path) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py index f291c363b8..d5b79758ad 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instance_frames.py @@ -6,7 +6,7 @@ class CollectOutputFrameRange(pyblish.api.ContextPlugin): When instances are collected context does not contain `frameStart` and `frameEnd` keys yet. They are collected in global plugin - `CollectAvalonEntities`. + `CollectContextEntities`. """ label = "Collect output frame range" order = pyblish.api.CollectorOrder diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py index 9cbfb61550..ae1326a5bd 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_instances.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_instances.py @@ -1,10 +1,10 @@ -import os import json import copy import pyblish.api -from avalon import io -from openpype.lib import get_subset_name_with_asset_doc +from openpype.client import get_asset_by_name +from openpype.pipeline import legacy_io +from openpype.pipeline.create import get_subset_name class CollectInstances(pyblish.api.ContextPlugin): @@ -20,30 +20,57 @@ class CollectInstances(pyblish.api.ContextPlugin): json.dumps(workfile_instances, indent=4) )) + filtered_instance_data = [] # Backwards compatibility for workfiles that already have review # instance in metadata. review_instance_exist = False for instance_data in workfile_instances: - if instance_data["family"] == "review": + family = instance_data["family"] + if family == "review": review_instance_exist = True - break + + elif family not in ("renderPass", "renderLayer"): + self.log.info("Unknown family \"{}\". Skipping {}".format( + family, json.dumps(instance_data, indent=4) + )) + continue + + filtered_instance_data.append(instance_data) # Fake review instance if review was not found in metadata families if not review_instance_exist: - workfile_instances.append( + filtered_instance_data.append( self._create_review_instance_data(context) ) - for instance_data in workfile_instances: + for instance_data in filtered_instance_data: instance_data["fps"] = context.data["sceneFps"] + # Conversion from older instances + # - change 'render_layer' to 'renderlayer' + render_layer = instance_data.get("instance_data") + if not render_layer: + # Render Layer has only variant + if instance_data["family"] == "renderLayer": + render_layer = instance_data.get("variant") + + # Backwards compatibility for renderPasses + elif "render_layer" in instance_data: + render_layer = instance_data["render_layer"] + + if render_layer: + instance_data["renderlayer"] = render_layer + # Store workfile instance data to instance data instance_data["originData"] = copy.deepcopy(instance_data) # Global instance data modifications # Fill families family = instance_data["family"] + families = [family] + if family != "review": + families.append("review") # Add `review` family for thumbnail integration - instance_data["families"] = [family, "review"] + instance_data["families"] = families # Instance name subset_name = instance_data["subset"] @@ -66,29 +93,28 @@ class CollectInstances(pyblish.api.ContextPlugin): if family == "review": # Change subset name of review instance + # Project name from workfile context + project_name = context.data["workfile_context"]["project"] + # Collect asset doc to get asset id # - not sure if it's good idea to require asset id in # get_subset_name? asset_name = context.data["workfile_context"]["asset"] - asset_doc = io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] # Host name from environment variable - host_name = os.environ["AVALON_APP"] + host_name = context.data["hostName"] # Use empty variant value variant = "" - task_name = io.Session["AVALON_TASK"] - new_subset_name = get_subset_name_with_asset_doc( + task_name = legacy_io.Session["AVALON_TASK"] + new_subset_name = get_subset_name( family, variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) instance_data["subset"] = new_subset_name @@ -106,12 +132,6 @@ class CollectInstances(pyblish.api.ContextPlugin): instance = self.create_render_pass_instance( context, instance_data ) - else: - raise AssertionError( - "Instance with unknown family \"{}\": {}".format( - family, instance_data - ) - ) if instance is None: continue @@ -151,7 +171,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Change subset name # Final family of an instance will be `render` new_family = "render" - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = "{}{}_{}_Beauty".format( new_family, task_name.capitalize(), name ) @@ -186,7 +206,7 @@ class CollectInstances(pyblish.api.ContextPlugin): "Creating render pass instance. \"{}\"".format(pass_name) ) # Change label - render_layer = instance_data["render_layer"] + render_layer = instance_data["renderlayer"] # Backwards compatibility # - subset names were not stored as final subset names during creation @@ -196,7 +216,7 @@ class CollectInstances(pyblish.api.ContextPlugin): # Final family of an instance will be `render` new_family = "render" old_subset_name = instance_data["subset"] - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] new_subset_name = "{}{}_{}_{}".format( new_family, task_name.capitalize(), render_layer, pass_name ) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py new file mode 100644 index 0000000000..92a2815ba0 --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py @@ -0,0 +1,114 @@ +import json +import copy +import pyblish.api + +from openpype.client import get_asset_by_name +from openpype.pipeline.create import get_subset_name + + +class CollectRenderScene(pyblish.api.ContextPlugin): + """Collect instance which renders whole scene in PNG. + + Creates instance with family 'renderScene' which will have all layers + to render which will be composite into one result. The instance is not + collected from scene. + + Scene will be rendered with all visible layers similar way like review is. + + Instance is disabled if there are any created instances of 'renderLayer' + or 'renderPass'. That is because it is expected that this instance is + used as lazy publish of TVPaint file. + + Subset name is created similar way like 'renderLayer' family. It can use + `renderPass` and `renderLayer` keys which can be set using settings and + `variant` is filled using `renderPass` value. + """ + label = "Collect Render Scene" + order = pyblish.api.CollectorOrder - 0.39 + hosts = ["tvpaint"] + + # Value of 'render_pass' in subset name template + render_pass = "beauty" + + # Settings attributes + enabled = False + # Value of 'render_layer' and 'variant' in subset name template + render_layer = "Main" + + def process(self, context): + # Check if there are created instances of renderPass and renderLayer + # - that will define if renderScene instance is enabled after + # collection + any_created_instance = False + for instance in context: + family = instance.data["family"] + if family in ("renderPass", "renderLayer"): + any_created_instance = True + break + + # Global instance data modifications + # Fill families + family = "renderScene" + # Add `review` family for thumbnail integration + families = [family, "review"] + + # Collect asset doc to get asset id + # - not sure if it's good idea to require asset id in + # get_subset_name? + workfile_context = context.data["workfile_context"] + # Project name from workfile context + project_name = context.data["workfile_context"]["project"] + asset_name = workfile_context["asset"] + asset_doc = get_asset_by_name(project_name, asset_name) + + # Host name from environment variable + host_name = context.data["hostName"] + # Variant is using render pass name + variant = self.render_layer + dynamic_data = { + "renderlayer": self.render_layer, + "renderpass": self.render_pass, + } + # TODO remove - Backwards compatibility for old subset name templates + # - added 2022/04/28 + dynamic_data["render_layer"] = dynamic_data["renderlayer"] + dynamic_data["render_pass"] = dynamic_data["renderpass"] + + task_name = workfile_context["task"] + subset_name = get_subset_name( + "render", + variant, + task_name, + asset_doc, + project_name, + host_name, + dynamic_data=dynamic_data, + project_settings=context.data["project_settings"] + ) + + instance_data = { + "family": family, + "families": families, + "fps": context.data["sceneFps"], + "subset": subset_name, + "name": subset_name, + "label": "{} [{}-{}]".format( + subset_name, + context.data["sceneMarkIn"] + 1, + context.data["sceneMarkOut"] + 1 + ), + "active": not any_created_instance, + "publish": not any_created_instance, + "representations": [], + "layers": copy.deepcopy(context.data["layersData"]), + "asset": asset_name, + "task": task_name, + # Add render layer to instance data + "renderlayer": self.render_layer + } + + instance = context.create_instance(**instance_data) + + self.log.debug("Created instance: {}\n{}".format( + instance, json.dumps(instance.data, indent=4) + )) diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py index 89348037d3..8c7c8c3899 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile.py @@ -1,9 +1,10 @@ import os import json import pyblish.api -from avalon import io -from openpype.lib import get_subset_name_with_asset_doc +from openpype.client import get_asset_by_name +from openpype.pipeline import legacy_io +from openpype.pipeline.create import get_subset_name class CollectWorkfile(pyblish.api.ContextPlugin): @@ -22,31 +23,30 @@ class CollectWorkfile(pyblish.api.ContextPlugin): basename, ext = os.path.splitext(filename) instance = context.create_instance(name=basename) + # Project name from workfile context + project_name = context.data["workfile_context"]["project"] + # Get subset name of workfile instance # Collect asset doc to get asset id # - not sure if it's good idea to require asset id in # get_subset_name? family = "workfile" asset_name = context.data["workfile_context"]["asset"] - asset_doc = io.find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) - # Project name from workfile context - project_name = context.data["workfile_context"]["project"] # Host name from environment variable host_name = os.environ["AVALON_APP"] # Use empty variant value variant = "" - task_name = io.Session["AVALON_TASK"] - subset_name = get_subset_name_with_asset_doc( + task_name = legacy_io.Session["AVALON_TASK"] + subset_name = get_subset_name( family, variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) # Create Workfile instance diff --git a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py index f5c86c613b..8fe71a4a46 100644 --- a/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py +++ b/openpype/hosts/tvpaint/plugins/publish/collect_workfile_data.py @@ -3,8 +3,24 @@ import json import tempfile import pyblish.api -import avalon.api -from openpype.hosts.tvpaint.api import pipeline, lib + +from openpype.pipeline import legacy_io +from openpype.hosts.tvpaint.api.lib import ( + execute_george, + execute_george_through_file, + get_layers_data, + get_groups_data, +) +from openpype.hosts.tvpaint.api.pipeline import ( + SECTION_NAME_CONTEXT, + SECTION_NAME_INSTANCES, + SECTION_NAME_CONTAINERS, + + get_workfile_metadata_string, + write_workfile_metadata, + get_current_workfile_context, + list_instances, +) class ResetTVPaintWorkfileMetadata(pyblish.api.Action): @@ -14,12 +30,12 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action): def process(self, context, plugin): metadata_keys = { - pipeline.SECTION_NAME_CONTEXT: {}, - pipeline.SECTION_NAME_INSTANCES: [], - pipeline.SECTION_NAME_CONTAINERS: [] + SECTION_NAME_CONTEXT: {}, + SECTION_NAME_INSTANCES: [], + SECTION_NAME_CONTAINERS: [] } for metadata_key, default in metadata_keys.items(): - json_string = pipeline.get_workfile_metadata_string(metadata_key) + json_string = get_workfile_metadata_string(metadata_key) if not json_string: continue @@ -34,7 +50,7 @@ class ResetTVPaintWorkfileMetadata(pyblish.api.Action): ).format(metadata_key, default, json_string), exc_info=True ) - pipeline.write_workfile_metadata(metadata_key, default) + write_workfile_metadata(metadata_key, default) class CollectWorkfileData(pyblish.api.ContextPlugin): @@ -44,14 +60,14 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): actions = [ResetTVPaintWorkfileMetadata] def process(self, context): - current_project_id = lib.execute_george("tv_projectcurrentid") - lib.execute_george("tv_projectselect {}".format(current_project_id)) + current_project_id = execute_george("tv_projectcurrentid") + execute_george("tv_projectselect {}".format(current_project_id)) # Collect and store current context to have reference current_context = { - "project": avalon.api.Session["AVALON_PROJECT"], - "asset": avalon.api.Session["AVALON_ASSET"], - "task": avalon.api.Session["AVALON_TASK"] + "project": legacy_io.Session["AVALON_PROJECT"], + "asset": legacy_io.Session["AVALON_ASSET"], + "task": legacy_io.Session["AVALON_TASK"] } context.data["previous_context"] = current_context self.log.debug("Current context is: {}".format(current_context)) @@ -59,7 +75,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect context from workfile metadata self.log.info("Collecting workfile context") - workfile_context = pipeline.get_current_workfile_context() + workfile_context = get_current_workfile_context() # Store workfile context to pyblish context context.data["workfile_context"] = workfile_context if workfile_context: @@ -69,7 +85,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): ("AVALON_TASK", "task") ) for env_key, key in key_map: - avalon.api.Session[env_key] = workfile_context[key] + legacy_io.Session[env_key] = workfile_context[key] os.environ[env_key] = workfile_context[key] self.log.info("Context changed to: {}".format(workfile_context)) @@ -95,7 +111,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect instances self.log.info("Collecting instance data from workfile") - instance_data = pipeline.list_instances() + instance_data = list_instances() context.data["workfileInstances"] = instance_data self.log.debug( "Instance data:\"{}".format(json.dumps(instance_data, indent=4)) @@ -103,7 +119,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect information about layers self.log.info("Collecting layers data from workfile") - layers_data = lib.layers_data() + layers_data = get_layers_data() layers_by_name = {} for layer in layers_data: layer_name = layer["name"] @@ -119,14 +135,14 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): # Collect information about groups self.log.info("Collecting groups data from workfile") - group_data = lib.groups_data() + group_data = get_groups_data() context.data["groupsData"] = group_data self.log.debug( "Group data:\"{}".format(json.dumps(group_data, indent=4)) ) self.log.info("Collecting scene data from workfile") - workfile_info_parts = lib.execute_george("tv_projectinfo").split(" ") + workfile_info_parts = execute_george("tv_projectinfo").split(" ") # Project frame start - not used workfile_info_parts.pop(-1) @@ -138,10 +154,10 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): workfile_path = " ".join(workfile_info_parts).replace("\"", "") # Marks return as "{frame - 1} {state} ", example "0 set". - result = lib.execute_george("tv_markin") + result = execute_george("tv_markin") mark_in_frame, mark_in_state, _ = result.split(" ") - result = lib.execute_george("tv_markout") + result = execute_george("tv_markout") mark_out_frame, mark_out_state, _ = result.split(" ") scene_data = { @@ -155,7 +171,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): "sceneMarkInState": mark_in_state == "set", "sceneMarkOut": int(mark_out_frame), "sceneMarkOutState": mark_out_state == "set", - "sceneStartFrame": int(lib.execute_george("tv_startframe")), + "sceneStartFrame": int(execute_george("tv_startframe")), "sceneBgColor": self._get_bg_color() } self.log.debug( @@ -187,7 +203,7 @@ class CollectWorkfileData(pyblish.api.ContextPlugin): ] george_script = "\n".join(george_script_lines) - lib.execute_george_through_file(george_script) + execute_george_through_file(george_script) with open(output_filepath, "r") as stream: data = stream.read() diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py b/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py new file mode 100644 index 0000000000..ab5bbc5e2c --- /dev/null +++ b/openpype/hosts/tvpaint/plugins/publish/extract_convert_to_exr.py @@ -0,0 +1,99 @@ +"""Plugin converting png files from ExtractSequence into exrs. + +Requires: + ExtractSequence - source of PNG + ExtractReview - review was already created so we can convert to any exr +""" +import os +import json + +import pyblish.api +from openpype.lib import ( + get_oiio_tools_path, + run_subprocess, +) +from openpype.pipeline import KnownPublishError + + +class ExtractConvertToEXR(pyblish.api.InstancePlugin): + # Offset to get after ExtractSequence plugin. + order = pyblish.api.ExtractorOrder + 0.1 + label = "Extract Sequence EXR" + hosts = ["tvpaint"] + families = ["render"] + + enabled = False + + # Replace source PNG files or just add + replace_pngs = True + # EXR compression + exr_compression = "ZIP" + + def process(self, instance): + repres = instance.data.get("representations") + if not repres: + return + + oiio_path = get_oiio_tools_path() + # Raise an exception when oiiotool is not available + # - this can currently happen on MacOS machines + if not os.path.exists(oiio_path): + KnownPublishError( + "OpenImageIO tool is not available on this machine." + ) + + new_repres = [] + for repre in repres: + if repre["name"] != "png": + continue + + self.log.info( + "Processing representation: {}".format( + json.dumps(repre, sort_keys=True, indent=4) + ) + ) + + src_filepaths = set() + new_filenames = [] + for src_filename in repre["files"]: + dst_filename = os.path.splitext(src_filename)[0] + ".exr" + new_filenames.append(dst_filename) + + src_filepath = os.path.join(repre["stagingDir"], src_filename) + dst_filepath = os.path.join(repre["stagingDir"], dst_filename) + + src_filepaths.add(src_filepath) + + args = [ + oiio_path, src_filepath, + "--compression", self.exr_compression, + # TODO how to define color conversion? + "--colorconvert", "sRGB", "linear", + "-o", dst_filepath + ] + run_subprocess(args) + + new_repres.append( + { + "name": "exr", + "ext": "exr", + "files": new_filenames, + "stagingDir": repre["stagingDir"], + "tags": list(repre["tags"]) + } + ) + + if self.replace_pngs: + instance.data["representations"].remove(repre) + + for filepath in src_filepaths: + instance.context.data["cleanupFullPaths"].append(filepath) + + instance.data["representations"].extend(new_repres) + self.log.info( + "Representations: {}".format( + json.dumps( + instance.data["representations"], sort_keys=True, indent=4 + ) + ) + ) diff --git a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py index 729c545545..78074f720c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py +++ b/openpype/hosts/tvpaint/plugins/publish/extract_sequence.py @@ -5,21 +5,27 @@ import tempfile from PIL import Image import pyblish.api -from openpype.hosts.tvpaint.api import lib + +from openpype.hosts.tvpaint.api.lib import ( + execute_george, + execute_george_through_file, + get_layers_pre_post_behavior, + get_layers_exposure_frames, +) from openpype.hosts.tvpaint.lib import ( calculate_layers_extraction_data, get_frame_filename_template, fill_reference_frames, composite_rendered_layers, rename_filepaths_by_frame_start, - composite_images ) class ExtractSequence(pyblish.api.Extractor): label = "Extract Sequence" hosts = ["tvpaint"] - families = ["review", "renderPass", "renderLayer"] + families = ["review", "renderPass", "renderLayer", "renderScene"] + families_to_review = ["review"] # Modifiable with settings review_bg = [255, 255, 255, 255] @@ -62,7 +68,7 @@ class ExtractSequence(pyblish.api.Extractor): # different way when Start Frame is not `0` # NOTE It will be set back after rendering scene_start_frame = instance.context.data["sceneStartFrame"] - lib.execute_george("tv_startframe 0") + execute_george("tv_startframe 0") # Frame start/end may be stored as float frame_start = int(instance.data["frameStart"]) @@ -74,14 +80,8 @@ class ExtractSequence(pyblish.api.Extractor): scene_bg_color = instance.context.data["sceneBgColor"] - # --- Fallbacks ---------------------------------------------------- - # This is required if validations of ranges are ignored. - # - all of this code won't change processing if range to render - # match to range of expected output - # Prepare output frames output_frame_start = frame_start - handle_start - output_frame_end = frame_end + handle_end # Change output frame start to 0 if handles cause it's negative number if output_frame_start < 0: @@ -91,32 +91,8 @@ class ExtractSequence(pyblish.api.Extractor): ).format(frame_start, handle_start)) output_frame_start = 0 - # Check Marks range and output range - output_range = output_frame_end - output_frame_start - marks_range = mark_out - mark_in - - # Lower Mark Out if mark range is bigger than output - # - do not rendered not used frames - if output_range < marks_range: - new_mark_out = mark_out - (marks_range - output_range) - self.log.warning(( - "Lowering render range to {} frames. Changed Mark Out {} -> {}" - ).format(marks_range + 1, mark_out, new_mark_out)) - # Assign new mark out to variable - mark_out = new_mark_out - - # Lower output frame end so representation has right `frameEnd` value - elif output_range > marks_range: - new_output_frame_end = ( - output_frame_end - (output_range - marks_range) - ) - self.log.warning(( - "Lowering representation range to {} frames." - " Changed frame end {} -> {}" - ).format(output_range + 1, mark_out, new_output_frame_end)) - output_frame_end = new_output_frame_end - - # ------------------------------------------------------------------- + # Calculate frame end + output_frame_end = output_frame_start + (mark_out - mark_in) # Save to staging dir output_dir = instance.data.get("stagingDir") @@ -144,7 +120,7 @@ class ExtractSequence(pyblish.api.Extractor): output_filepaths_by_frame_idx, thumbnail_fullpath = result # Change scene frame Start back to previous value - lib.execute_george("tv_startframe {}".format(scene_start_frame)) + execute_george("tv_startframe {}".format(scene_start_frame)) # Sequence of one frame if not output_filepaths_by_frame_idx: @@ -158,9 +134,9 @@ class ExtractSequence(pyblish.api.Extractor): output_frame_start ) - # Fill tags and new families + # Fill tags and new families from project settings tags = [] - if family_lowered in ("review", "renderlayer"): + if family_lowered in self.families_to_review: tags.append("review") # Sequence of one frame @@ -186,7 +162,7 @@ class ExtractSequence(pyblish.api.Extractor): instance.data["representations"].append(new_repre) - if family_lowered in ("renderpass", "renderlayer"): + if family_lowered in ("renderpass", "renderlayer", "renderscene"): # Change family to render instance.data["family"] = "render" @@ -272,7 +248,7 @@ class ExtractSequence(pyblish.api.Extractor): george_script_lines.append(" ".join(orig_color_command)) - lib.execute_george_through_file("\n".join(george_script_lines)) + execute_george_through_file("\n".join(george_script_lines)) first_frame_filepath = None output_filepaths_by_frame_idx = {} @@ -335,8 +311,8 @@ class ExtractSequence(pyblish.api.Extractor): return [], None self.log.debug("Collecting pre/post behavior of individual layers.") - behavior_by_layer_id = lib.get_layers_pre_post_behavior(layer_ids) - exposure_frames_by_layer_id = lib.get_layers_exposure_frames( + behavior_by_layer_id = get_layers_pre_post_behavior(layer_ids) + exposure_frames_by_layer_id = get_layers_exposure_frames( layer_ids, layers ) extraction_data_by_layer_id = calculate_layers_extraction_data( @@ -441,7 +417,7 @@ class ExtractSequence(pyblish.api.Extractor): ",".join(frames_to_render), layer_id, layer["name"] )) # Let TVPaint render layer's image - lib.execute_george_through_file("\n".join(george_script_lines)) + execute_george_through_file("\n".join(george_script_lines)) # Fill frames between `frame_start_index` and `frame_end_index` self.log.debug("Filling frames not rendered frames.") diff --git a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py b/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py index 24d6558168..a85caf2557 100644 --- a/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py +++ b/openpype/hosts/tvpaint/plugins/publish/increment_workfile_version.py @@ -1,7 +1,7 @@ import pyblish.api -from openpype.api import version_up -from openpype.hosts.tvpaint.api import workio +from openpype.lib import version_up +from openpype.pipeline import registered_host class IncrementWorkfileVersion(pyblish.api.ContextPlugin): @@ -17,6 +17,7 @@ class IncrementWorkfileVersion(pyblish.api.ContextPlugin): assert all(result["success"] for result in context.data["results"]), ( "Publishing not successful so version is not increased.") + host = registered_host() path = context.data["currentFile"] - workio.save_file(version_up(path)) + host.save_workfile(version_up(path)) self.log.info('Incrementing workfile version') diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py index 70816f9f18..7e35726030 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_asset_name.py @@ -1,6 +1,9 @@ import pyblish.api from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import pipeline +from openpype.hosts.tvpaint.api.pipeline import ( + list_instances, + write_instances, +) class FixAssetNames(pyblish.api.Action): @@ -15,7 +18,7 @@ class FixAssetNames(pyblish.api.Action): def process(self, context, plugin): context_asset_name = context.data["asset"] - old_instance_items = pipeline.list_instances() + old_instance_items = list_instances() new_instance_items = [] for instance_item in old_instance_items: instance_asset_name = instance_item.get("asset") @@ -25,7 +28,7 @@ class FixAssetNames(pyblish.api.Action): ): instance_item["asset"] = context_asset_name new_instance_items.append(instance_item) - pipeline._write_instances(new_instance_items) + write_instances(new_instance_items) class ValidateAssetNames(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py index 7ea0587b8f..d3a04cc69f 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_layers_visibility.py @@ -8,7 +8,7 @@ class ValidateLayersVisiblity(pyblish.api.InstancePlugin): label = "Validate Layers Visibility" order = pyblish.api.ValidatorOrder - families = ["review", "renderPass", "renderLayer"] + families = ["review", "renderPass", "renderLayer", "renderScene"] def process(self, instance): layer_names = set() diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py index d1f299e006..0030b0fd1c 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_marks.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_marks.py @@ -2,7 +2,7 @@ import json import pyblish.api from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import lib +from openpype.hosts.tvpaint.api.lib import execute_george class ValidateMarksRepair(pyblish.api.Action): @@ -15,10 +15,10 @@ class ValidateMarksRepair(pyblish.api.Action): def process(self, context, plugin): expected_data = ValidateMarks.get_expected_data(context) - lib.execute_george( + execute_george( "tv_markin {} set".format(expected_data["markIn"]) ) - lib.execute_george( + execute_george( "tv_markout {} set".format(expected_data["markOut"]) ) @@ -39,7 +39,7 @@ class ValidateMarks(pyblish.api.ContextPlugin): def get_expected_data(context): scene_mark_in = context.data["sceneMarkIn"] - # Data collected in `CollectAvalonEntities` + # Data collected in `CollectContextEntities` frame_end = context.data["frameEnd"] frame_start = context.data["frameStart"] handle_start = context.data["handleStart"] diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py index ddc738c6ed..066e54c670 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py @@ -1,6 +1,6 @@ import pyblish.api from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import lib +from openpype.hosts.tvpaint.api.lib import execute_george class RepairStartFrame(pyblish.api.Action): @@ -11,7 +11,7 @@ class RepairStartFrame(pyblish.api.Action): on = "failed" def process(self, context, plugin): - lib.execute_george("tv_startframe 0") + execute_george("tv_startframe 0") class ValidateStartFrame(pyblish.api.ContextPlugin): @@ -24,7 +24,7 @@ class ValidateStartFrame(pyblish.api.ContextPlugin): optional = True def process(self, context): - start_frame = lib.execute_george("tv_startframe") + start_frame = execute_george("tv_startframe") if start_frame == 0: return diff --git a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py index eac345f395..d66ae50c60 100644 --- a/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py +++ b/openpype/hosts/tvpaint/plugins/publish/validate_workfile_metadata.py @@ -1,6 +1,5 @@ import pyblish.api -from openpype.pipeline import PublishXmlValidationError -from openpype.hosts.tvpaint.api import save_file +from openpype.pipeline import PublishXmlValidationError, registered_host class ValidateWorkfileMetadataRepair(pyblish.api.Action): @@ -13,8 +12,9 @@ class ValidateWorkfileMetadataRepair(pyblish.api.Action): def process(self, context, _plugin): """Save current workfile which should trigger storing of metadata.""" current_file = context.data["currentFile"] + host = registered_host() # Save file should trigger - save_file(current_file) + host.save_workfile(current_file) class ValidateWorkfileMetadata(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp index bb67715cbd..88106bc770 100644 --- a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp +++ b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_code/library.cpp @@ -302,8 +302,9 @@ private: std::string websocket_url; // Should be avalon plugin available? // - this may change during processing if websocketet url is not set or server is down - bool use_avalon; + bool server_available; public: + Communicator(std::string url); Communicator(); websocket_endpoint endpoint; bool is_connected(); @@ -314,43 +315,45 @@ public: void call_notification(std::string method_name, nlohmann::json params); }; -Communicator::Communicator() { + +Communicator::Communicator(std::string url) { // URL to websocket server - websocket_url = std::getenv("WEBSOCKET_URL"); + websocket_url = url; // Should be avalon plugin available? // - this may change during processing if websocketet url is not set or server is down - if (websocket_url == "") { - use_avalon = false; + if (url == "") { + server_available = false; } else { - use_avalon = true; + server_available = true; } } + bool Communicator::is_connected(){ return endpoint.connected(); } bool Communicator::is_usable(){ - return use_avalon; + return server_available; } void Communicator::connect() { - if (!use_avalon) { + if (!server_available) { return; } int con_result; con_result = endpoint.connect(websocket_url); if (con_result == -1) { - use_avalon = false; + server_available = false; } else { - use_avalon = true; + server_available = true; } } void Communicator::call_notification(std::string method_name, nlohmann::json params) { - if (!use_avalon || !is_connected()) {return;} + if (!server_available || !is_connected()) {return;} jsonrpcpp::Notification notification = {method_name, params}; endpoint.send_notification(¬ification); @@ -358,7 +361,7 @@ void Communicator::call_notification(std::string method_name, nlohmann::json par jsonrpcpp::Response Communicator::call_method(std::string method_name, nlohmann::json params) { jsonrpcpp::Response response; - if (!use_avalon || !is_connected()) + if (!server_available || !is_connected()) { return response; } @@ -382,7 +385,7 @@ jsonrpcpp::Response Communicator::call_method(std::string method_name, nlohmann: } void Communicator::process_requests() { - if (!use_avalon || !is_connected() || Data.messages.empty()) {return;} + if (!server_available || !is_connected() || Data.messages.empty()) {return;} std::string msg = Data.messages.front(); Data.messages.pop(); @@ -458,7 +461,7 @@ void register_callbacks(){ parser.register_request_callback("execute_george", execute_george); } -Communicator communication; +Communicator* communication = nullptr; //////////////////////////////////////////////////////////////////////////////////////// @@ -484,7 +487,7 @@ static char* GetLocalString( PIFilter* iFilter, int iNum, char* iDefault ) // in the localized file (or the localized file doesn't exist). std::string label_from_evn() { - std::string _plugin_label = "Avalon"; + std::string _plugin_label = "OpenPype"; if (std::getenv("AVALON_LABEL") && std::getenv("AVALON_LABEL") != "") { _plugin_label = std::getenv("AVALON_LABEL"); @@ -540,9 +543,12 @@ int FAR PASCAL PI_Open( PIFilter* iFilter ) { PI_Parameters( iFilter, NULL ); // NULL as iArg means "open the requester" } - - communication.connect(); - register_callbacks(); + char *env_value = std::getenv("WEBSOCKET_URL"); + if (env_value != NULL) { + communication = new Communicator(env_value); + communication->connect(); + register_callbacks(); + } return 1; // OK } @@ -560,7 +566,10 @@ void FAR PASCAL PI_Close( PIFilter* iFilter ) { TVCloseReq( iFilter, Data.mReq ); } - communication.endpoint.close_connection(); + if (communication != nullptr) { + communication->endpoint.close_connection(); + delete communication; + } } @@ -709,7 +718,7 @@ int FAR PASCAL PI_Msg( PIFilter* iFilter, INTPTR iEvent, INTPTR iReq, INTPTR* iA if (Data.menuItemsById.contains(button_up_item_id_str)) { std::string callback_name = Data.menuItemsById[button_up_item_id_str].get(); - communication.call_method(callback_name, nlohmann::json::array()); + communication->call_method(callback_name, nlohmann::json::array()); } TVExecute( iFilter ); break; @@ -737,7 +746,9 @@ int FAR PASCAL PI_Msg( PIFilter* iFilter, INTPTR iEvent, INTPTR iReq, INTPTR* iA { newMenuItemsProcess(iFilter); } - communication.process_requests(); + if (communication != nullptr) { + communication->process_requests(); + } } return 1; diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll index f7f5119ef3..7081778bee 100644 Binary files a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll and b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x64/plugin/OpenPypePlugin.dll differ diff --git a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll index f35e3ffe86..0f2afec245 100644 Binary files a/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll and b/openpype/hosts/tvpaint/tvpaint_plugin/plugin_files/windows_x86/plugin/OpenPypePlugin.dll differ diff --git a/openpype/hosts/tvpaint/worker/init_file.tvpp b/openpype/hosts/tvpaint/worker/init_file.tvpp new file mode 100644 index 0000000000..22170b45bc Binary files /dev/null and b/openpype/hosts/tvpaint/worker/init_file.tvpp differ diff --git a/openpype/hosts/tvpaint/worker/worker.py b/openpype/hosts/tvpaint/worker/worker.py index cfd40bc7ba..9295c8afb4 100644 --- a/openpype/hosts/tvpaint/worker/worker.py +++ b/openpype/hosts/tvpaint/worker/worker.py @@ -1,5 +1,8 @@ +import os import signal import time +import tempfile +import shutil import asyncio from openpype.hosts.tvpaint.api.communication_server import ( @@ -36,8 +39,28 @@ class TVPaintWorkerCommunicator(BaseCommunicator): super()._start_webserver() + def _open_init_file(self): + """Open init TVPaint file. + + File triggers dialog missing path to audio file which must be closed + once and is ignored for rest of running process. + """ + current_dir = os.path.dirname(os.path.abspath(__file__)) + init_filepath = os.path.join(current_dir, "init_file.tvpp") + with tempfile.NamedTemporaryFile( + mode="w", prefix="a_tvp_", suffix=".tvpp" + ) as tmp_file: + tmp_filepath = tmp_file.name.replace("\\", "/") + + shutil.copy(init_filepath, tmp_filepath) + george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(tmp_filepath) + self.execute_george_through_file(george_script) + self.execute_george("tv_projectclose") + os.remove(tmp_filepath) + def _on_client_connect(self, *args, **kwargs): super()._on_client_connect(*args, **kwargs) + self._open_init_file() # Register as "ready to work" worker self._worker_connection.register_as_worker() diff --git a/openpype/hosts/tvpaint/worker/worker_job.py b/openpype/hosts/tvpaint/worker/worker_job.py index 1c785ab2ee..95c0a678bc 100644 --- a/openpype/hosts/tvpaint/worker/worker_job.py +++ b/openpype/hosts/tvpaint/worker/worker_job.py @@ -9,7 +9,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty import six -from openpype.api import PypeLogger +from openpype.lib import Logger from openpype.modules import ModulesManager @@ -328,7 +328,7 @@ class TVPaintCommands: def log(self): """Access to logger object.""" if self._log is None: - self._log = PypeLogger.get_logger(self.__class__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log @property diff --git a/openpype/hosts/unreal/__init__.py b/openpype/hosts/unreal/__init__.py index 533f315df3..42dd8f0ac4 100644 --- a/openpype/hosts/unreal/__init__.py +++ b/openpype/hosts/unreal/__init__.py @@ -1,20 +1,6 @@ -import os -import openpype.hosts +from .addon import UnrealAddon -def add_implementation_envs(env, _app): - """Modify environments to contain all required for implementation.""" - # Set OPENPYPE_UNREAL_PLUGIN required for Unreal implementation - unreal_plugin_path = os.path.join( - os.path.dirname(os.path.abspath(openpype.hosts.__file__)), - "unreal", "integration" - ) - env["OPENPYPE_UNREAL_PLUGIN"] = unreal_plugin_path - - # Set default environments if are not set via settings - defaults = { - "OPENPYPE_LOG_NO_COLORS": "True" - } - for key, value in defaults.items(): - if not env.get(key): - env[key] = value +__all__ = ( + "UnrealAddon", +) diff --git a/openpype/hosts/unreal/addon.py b/openpype/hosts/unreal/addon.py new file mode 100644 index 0000000000..e2c8484651 --- /dev/null +++ b/openpype/hosts/unreal/addon.py @@ -0,0 +1,41 @@ +import os +from openpype.modules import OpenPypeModule, IHostAddon + +UNREAL_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class UnrealAddon(OpenPypeModule, IHostAddon): + name = "unreal" + host_name = "unreal" + + def initialize(self, module_settings): + self.enabled = True + + def add_implementation_envs(self, env, app): + """Modify environments to contain all required for implementation.""" + # Set OPENPYPE_UNREAL_PLUGIN required for Unreal implementation + + ue_plugin = "UE_5.0" if app.name[:1] == "5" else "UE_4.7" + unreal_plugin_path = os.path.join( + UNREAL_ROOT_DIR, "integration", ue_plugin + ) + if not env.get("OPENPYPE_UNREAL_PLUGIN"): + env["OPENPYPE_UNREAL_PLUGIN"] = unreal_plugin_path + + # Set default environments if are not set via settings + defaults = { + "OPENPYPE_LOG_NO_COLORS": "True" + } + for key, value in defaults.items(): + if not env.get(key): + env[key] = value + + def get_launch_hook_paths(self, app): + if app.host_name != self.host_name: + return [] + return [ + os.path.join(UNREAL_ROOT_DIR, "hooks") + ] + + def get_workfile_extensions(self): + return [".uproject"] diff --git a/openpype/hosts/unreal/api/__init__.py b/openpype/hosts/unreal/api/__init__.py index ede71aa218..ca9db259e6 100644 --- a/openpype/hosts/unreal/api/__init__.py +++ b/openpype/hosts/unreal/api/__init__.py @@ -1,10 +1,8 @@ # -*- coding: utf-8 -*- """Unreal Editor OpenPype host API.""" -from .plugin import ( - Loader, - Creator -) +from .plugin import Loader + from .pipeline import ( install, uninstall, @@ -19,12 +17,13 @@ from .pipeline import ( show_tools_dialog, show_tools_popup, instantiate, + UnrealHost, + maintained_selection ) __all__ = [ "install", "uninstall", - "Creator", "Loader", "ls", "publish", @@ -36,5 +35,7 @@ __all__ = [ "show_experimental_tools", "show_tools_dialog", "show_tools_popup", - "instantiate" + "instantiate", + "UnrealHost", + "maintained_selection" ] diff --git a/openpype/hosts/unreal/api/pipeline.py b/openpype/hosts/unreal/api/pipeline.py index 713c588976..2081c8fd13 100644 --- a/openpype/hosts/unreal/api/pipeline.py +++ b/openpype/hosts/unreal/api/pipeline.py @@ -2,24 +2,30 @@ import os import logging from typing import List +from contextlib import contextmanager +import semver import pyblish.api -from avalon import api from openpype.pipeline import ( - LegacyCreator, register_loader_plugin_path, + register_creator_plugin_path, deregister_loader_plugin_path, + deregister_creator_plugin_path, AVALON_CONTAINER_ID, ) from openpype.tools.utils import host_tools import openpype.hosts.unreal +from openpype.host import HostBase, ILoadHost import unreal # noqa logger = logging.getLogger("openpype.hosts.unreal") OPENPYPE_CONTAINERS = "OpenPypeContainers" +UNREAL_VERSION = semver.VersionInfo( + *os.getenv("OPENPYPE_UNREAL_VERSION").split(".") +) HOST_DIR = os.path.dirname(os.path.abspath(openpype.hosts.unreal.__file__)) PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") @@ -29,6 +35,32 @@ CREATE_PATH = os.path.join(PLUGINS_DIR, "create") INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory") +class UnrealHost(HostBase, ILoadHost): + """Unreal host implementation. + + For some time this class will re-use functions from module based + implementation for backwards compatibility of older unreal projects. + """ + + name = "unreal" + + def install(self): + install() + + def get_containers(self): + return ls() + + def show_tools_popup(self): + """Show tools popup with actions leading to show other tools.""" + + show_tools_popup() + + def show_tools_dialog(self): + """Show tools dialog with actions leading to show other tools.""" + + show_tools_dialog() + + def install(): """Install Unreal configuration for OpenPype.""" print("-=" * 40) @@ -47,9 +79,10 @@ def install(): print("installing OpenPype for Unreal ...") print("-=" * 40) logger.info("installing OpenPype for Unreal") + pyblish.api.register_host("unreal") pyblish.api.register_plugin_path(str(PUBLISH_PATH)) register_loader_plugin_path(str(LOAD_PATH)) - api.register_plugin_path(LegacyCreator, str(CREATE_PATH)) + register_creator_plugin_path(str(CREATE_PATH)) _register_callbacks() _register_events() @@ -58,7 +91,7 @@ def uninstall(): """Uninstall Unreal configuration for Avalon.""" pyblish.api.deregister_plugin_path(str(PUBLISH_PATH)) deregister_loader_plugin_path(str(LOAD_PATH)) - api.deregister_plugin_path(LegacyCreator, str(CREATE_PATH)) + deregister_creator_plugin_path(str(CREATE_PATH)) def _register_callbacks(): @@ -75,30 +108,6 @@ def _register_events(): pass -class Creator(LegacyCreator): - hosts = ["unreal"] - asset_types = [] - - def process(self): - nodes = list() - - with unreal.ScopedEditorTransaction("OpenPype Creating Instance"): - if (self.options or {}).get("useSelection"): - self.log.info("setting ...") - print("settings ...") - nodes = unreal.EditorUtilityLibrary.get_selected_assets() - - asset_paths = [a.get_path_name() for a in nodes] - self.name = move_assets_to_path( - "/Game", self.name, asset_paths - ) - - instance = create_publish_instance("/Game", self.name) - imprint(instance, self.data) - - return instance - - def ls(): """List all containers. @@ -107,7 +116,9 @@ def ls(): """ ar = unreal.AssetRegistryHelpers.get_asset_registry() - openpype_containers = ar.get_assets_by_class("AssetContainer", True) + # UE 5.1 changed how class name is specified + class_name = ["/Script/OpenPype", "AssetContainer"] if UNREAL_VERSION.major == 5 and UNREAL_VERSION.minor > 0 else "AssetContainer" # noqa + openpype_containers = ar.get_assets_by_class(class_name, True) # get_asset_by_class returns AssetData. To get all metadata we need to # load asset. get_tag_values() work only on metadata registered in @@ -416,3 +427,37 @@ def cast_map_to_str_dict(umap) -> dict: """ return {str(key): str(value) for (key, value) in umap.items()} + + +def get_subsequences(sequence: unreal.LevelSequence): + """Get list of subsequences from sequence. + + Args: + sequence (unreal.LevelSequence): Sequence + + Returns: + list(unreal.LevelSequence): List of subsequences + + """ + tracks = sequence.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track is not None and subscene_track.get_sections(): + return subscene_track.get_sections() + return [] + + +@contextmanager +def maintained_selection(): + """Stub to be either implemented or replaced. + + This is needed for old publisher implementation, but + it is not supported (yet) in UE. + """ + try: + yield + finally: + pass diff --git a/openpype/hosts/unreal/api/plugin.py b/openpype/hosts/unreal/api/plugin.py index b24bab831d..6fc00cb71c 100644 --- a/openpype/hosts/unreal/api/plugin.py +++ b/openpype/hosts/unreal/api/plugin.py @@ -1,15 +1,7 @@ # -*- coding: utf-8 -*- from abc import ABC -from openpype.pipeline import ( - LegacyCreator, - LoaderPlugin, -) - - -class Creator(LegacyCreator): - """This serves as skeleton for future OpenPype specific functionality""" - defaults = ['Main'] +from openpype.pipeline import LoaderPlugin class Loader(LoaderPlugin, ABC): diff --git a/openpype/hosts/unreal/api/rendering.py b/openpype/hosts/unreal/api/rendering.py new file mode 100644 index 0000000000..29e4747f6e --- /dev/null +++ b/openpype/hosts/unreal/api/rendering.py @@ -0,0 +1,137 @@ +import os + +import unreal + +from openpype.pipeline import Anatomy +from openpype.hosts.unreal.api import pipeline + + +queue = None +executor = None + + +def _queue_finish_callback(exec, success): + unreal.log("Render completed. Success: " + str(success)) + + # Delete our reference so we don't keep it alive. + global executor + global queue + del executor + del queue + + +def _job_finish_callback(job, success): + # You can make any edits you want to the editor world here, and the world + # will be duplicated when the next render happens. Make sure you undo your + # edits in OnQueueFinishedCallback if you don't want to leak state changes + # into the editor world. + unreal.log("Individual job completed.") + + +def start_rendering(): + """ + Start the rendering process. + """ + print("Starting rendering...") + + # Get selected sequences + assets = unreal.EditorUtilityLibrary.get_selected_assets() + + # instances = pipeline.ls_inst() + instances = [ + a for a in assets + if a.get_class().get_name() == "OpenPypePublishInstance"] + + inst_data = [] + + for i in instances: + data = pipeline.parse_container(i.get_path_name()) + if data["family"] == "render": + inst_data.append(data) + + try: + project = os.environ.get("AVALON_PROJECT") + anatomy = Anatomy(project) + root = anatomy.roots['renders'] + except Exception: + raise Exception("Could not find render root in anatomy settings.") + + render_dir = f"{root}/{project}" + + # subsystem = unreal.get_editor_subsystem( + # unreal.MoviePipelineQueueSubsystem) + # queue = subsystem.get_queue() + global queue + queue = unreal.MoviePipelineQueue() + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for i in inst_data: + sequence = ar.get_asset_by_object_path(i["sequence"]).get_asset() + + sequences = [{ + "sequence": sequence, + "output": f"{i['output']}", + "frame_range": ( + int(float(i["frameStart"])), + int(float(i["frameEnd"])) + 1) + }] + render_list = [] + + # Get all the sequences to render. If there are subsequences, + # add them and their frame ranges to the render list. We also + # use the names for the output paths. + for s in sequences: + subscenes = pipeline.get_subsequences(s.get('sequence')) + + if subscenes: + for ss in subscenes: + sequences.append({ + "sequence": ss.get_sequence(), + "output": (f"{s.get('output')}/" + f"{ss.get_sequence().get_name()}"), + "frame_range": ( + ss.get_start_frame(), ss.get_end_frame()) + }) + else: + # Avoid rendering camera sequences + if "_camera" not in s.get('sequence').get_name(): + render_list.append(s) + + # Create the rendering jobs and add them to the queue. + for r in render_list: + job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob) + job.sequence = unreal.SoftObjectPath(i["master_sequence"]) + job.map = unreal.SoftObjectPath(i["master_level"]) + job.author = "OpenPype" + + # User data could be used to pass data to the job, that can be + # read in the job's OnJobFinished callback. We could, + # for instance, pass the AvalonPublishInstance's path to the job. + # job.user_data = "" + + settings = job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineOutputSetting) + settings.output_resolution = unreal.IntPoint(1920, 1080) + settings.custom_start_frame = r.get("frame_range")[0] + settings.custom_end_frame = r.get("frame_range")[1] + settings.use_custom_playback_range = True + settings.file_name_format = "{sequence_name}.{frame_number}" + settings.output_directory.path = f"{render_dir}/{r.get('output')}" + + renderPass = job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineDeferredPassBase) + renderPass.disable_multisample_effects = True + + job.get_configuration().find_or_add_setting_by_class( + unreal.MoviePipelineImageSequenceOutput_PNG) + + # If there are jobs in the queue, start the rendering process. + if queue.get_jobs(): + global executor + executor = unreal.MoviePipelinePIEExecutor() + executor.on_executor_finished_delegate.add_callable_unique( + _queue_finish_callback) + executor.on_individual_job_finished_delegate.add_callable_unique( + _job_finish_callback) # Only available on PIE Executor + executor.execute(queue) diff --git a/openpype/hosts/unreal/api/tools_ui.py b/openpype/hosts/unreal/api/tools_ui.py index 93361c3574..708e167a65 100644 --- a/openpype/hosts/unreal/api/tools_ui.py +++ b/openpype/hosts/unreal/api/tools_ui.py @@ -1,5 +1,5 @@ import sys -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui from openpype import ( resources, @@ -7,6 +7,7 @@ from openpype import ( ) from openpype.tools.utils import host_tools from openpype.tools.utils.lib import qt_app_context +from openpype.hosts.unreal.api import rendering class ToolsBtnsWidget(QtWidgets.QWidget): @@ -20,6 +21,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget): load_btn = QtWidgets.QPushButton("Load...", self) publish_btn = QtWidgets.QPushButton("Publish...", self) manage_btn = QtWidgets.QPushButton("Manage...", self) + render_btn = QtWidgets.QPushButton("Render...", self) experimental_tools_btn = QtWidgets.QPushButton( "Experimental tools...", self ) @@ -30,6 +32,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget): layout.addWidget(load_btn, 0) layout.addWidget(publish_btn, 0) layout.addWidget(manage_btn, 0) + layout.addWidget(render_btn, 0) layout.addWidget(experimental_tools_btn, 0) layout.addStretch(1) @@ -37,6 +40,7 @@ class ToolsBtnsWidget(QtWidgets.QWidget): load_btn.clicked.connect(self._on_load) publish_btn.clicked.connect(self._on_publish) manage_btn.clicked.connect(self._on_manage) + render_btn.clicked.connect(self._on_render) experimental_tools_btn.clicked.connect(self._on_experimental) def _on_create(self): @@ -51,6 +55,9 @@ class ToolsBtnsWidget(QtWidgets.QWidget): def _on_manage(self): self.tool_required.emit("sceneinventory") + def _on_render(self): + rendering.start_rendering() + def _on_experimental(self): self.tool_required.emit("experimental_tools") diff --git a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py index f07e96551c..2dc6fb9f42 100644 --- a/openpype/hosts/unreal/hooks/pre_workfile_preparation.py +++ b/openpype/hosts/unreal/hooks/pre_workfile_preparation.py @@ -1,15 +1,15 @@ # -*- coding: utf-8 -*- """Hook to launch Unreal and prepare projects.""" import os +import copy from pathlib import Path from openpype.lib import ( PreLaunchHook, ApplicationLaunchFailed, ApplicationNotFound, - get_workdir_data, - get_workfile_template_key ) +from openpype.pipeline.workfile import get_workfile_template_key import openpype.hosts.unreal.lib as unreal_lib @@ -25,7 +25,7 @@ class UnrealPrelaunchHook(PreLaunchHook): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.signature = "( {} )".format(self.__class__.__name__) + self.signature = f"( {self.__class__.__name__} )" def _get_work_filename(self): # Use last workfile if was found @@ -35,18 +35,13 @@ class UnrealPrelaunchHook(PreLaunchHook): return last_workfile.name # Prepare data for fill data and for getting workfile template key - task_name = self.data["task_name"] anatomy = self.data["anatomy"] - asset_doc = self.data["asset_doc"] project_doc = self.data["project_doc"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") + # Use already prepared workdir data + workdir_data = copy.deepcopy(self.data["workdir_data"]) + task_type = workdir_data.get("task", {}).get("type") - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, self.host_name - ) # QUESTION raise exception if version is part of filename template? workdir_data["version"] = 1 workdir_data["ext"] = "uproject" @@ -71,7 +66,7 @@ class UnrealPrelaunchHook(PreLaunchHook): if int(engine_version.split(".")[0]) < 4 and \ int(engine_version.split(".")[1]) < 26: raise ApplicationLaunchFailed(( - f"{self.signature} Old unsupported version of UE4 " + f"{self.signature} Old unsupported version of UE " f"detected - {engine_version}")) except ValueError: # there can be string in minor version and in that case @@ -99,18 +94,19 @@ class UnrealPrelaunchHook(PreLaunchHook): f"character ({unreal_project_name}). Appending 'P'" )) unreal_project_name = f"P{unreal_project_name}" + unreal_project_filename = f'{unreal_project_name}.uproject' project_path = Path(os.path.join(workdir, unreal_project_name)) self.log.info(( - f"{self.signature} requested UE4 version: " + f"{self.signature} requested UE version: " f"[ {engine_version} ]" )) detected = unreal_lib.get_engine_versions(self.launch_context.env) detected_str = ', '.join(detected.keys()) or 'none' self.log.info(( - f"{self.signature} detected UE4 versions: " + f"{self.signature} detected UE versions: " f"[ {detected_str} ]" )) if not detected: @@ -123,10 +119,10 @@ class UnrealPrelaunchHook(PreLaunchHook): f"detected [ {engine_version} ]" )) - ue4_path = unreal_lib.get_editor_executable_path( - Path(detected[engine_version])) + ue_path = unreal_lib.get_editor_executable_path( + Path(detected[engine_version]), engine_version) - self.launch_context.launch_args = [ue4_path.as_posix()] + self.launch_context.launch_args = [ue_path.as_posix()] project_path.mkdir(parents=True, exist_ok=True) project_file = project_path / unreal_project_filename @@ -138,6 +134,11 @@ class UnrealPrelaunchHook(PreLaunchHook): )) # Set "OPENPYPE_UNREAL_PLUGIN" to current process environment for # execution of `create_unreal_project` + if self.launch_context.env.get("OPENPYPE_UNREAL_PLUGIN"): + self.log.info(( + f"{self.signature} using OpenPype plugin from " + f"{self.launch_context.env.get('OPENPYPE_UNREAL_PLUGIN')}" + )) env_key = "OPENPYPE_UNREAL_PLUGIN" if self.launch_context.env.get(env_key): os.environ[env_key] = self.launch_context.env[env_key] @@ -149,6 +150,7 @@ class UnrealPrelaunchHook(PreLaunchHook): engine_path=Path(engine_path) ) + self.launch_context.env["OPENPYPE_UNREAL_VERSION"] = engine_version # Append project file to launch arguments self.launch_context.launch_args.append( f"\"{project_file.as_posix()}\"") diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h deleted file mode 100644 index 0a27a078d7..0000000000 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstance.h +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -#include "Engine.h" -#include "OpenPypePublishInstance.generated.h" - - -UCLASS(Blueprintable) -class OPENPYPE_API UOpenPypePublishInstance : public UObject -{ - GENERATED_BODY() - -public: - UOpenPypePublishInstance(const FObjectInitializer& ObjectInitalizer); - - UPROPERTY(EditAnywhere, BlueprintReadOnly) - TArray assets; -private: - void OnAssetAdded(const FAssetData& AssetData); - void OnAssetRemoved(const FAssetData& AssetData); - void OnAssetRenamed(const FAssetData& AssetData, const FString& str); -}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/.gitignore b/openpype/hosts/unreal/integration/UE_4.7/.gitignore similarity index 100% rename from openpype/hosts/unreal/integration/.gitignore rename to openpype/hosts/unreal/integration/UE_4.7/.gitignore diff --git a/openpype/hosts/unreal/integration/UE_4.7/Config/DefaultOpenPypeSettings.ini b/openpype/hosts/unreal/integration/UE_4.7/Config/DefaultOpenPypeSettings.ini new file mode 100644 index 0000000000..8a883cf1db --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_4.7/Config/DefaultOpenPypeSettings.ini @@ -0,0 +1,2 @@ +[/Script/OpenPype.OpenPypeSettings] +FolderColor=(R=91,G=197,B=220,A=255) \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py similarity index 73% rename from openpype/hosts/unreal/integration/Content/Python/init_unreal.py rename to openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py index 2ecd301c25..b85f970699 100644 --- a/openpype/hosts/unreal/integration/Content/Python/init_unreal.py +++ b/openpype/hosts/unreal/integration/UE_4.7/Content/Python/init_unreal.py @@ -2,21 +2,17 @@ import unreal openpype_detected = True try: - from avalon import api -except ImportError as exc: - api = None - openpype_detected = False - unreal.log_error("Avalon: cannot load Avalon [ {} ]".format(exc)) + from openpype.pipeline import install_host + from openpype.hosts.unreal.api import UnrealHost -try: - from openpype.hosts.unreal import api as openpype_host + openpype_host = UnrealHost() except ImportError as exc: openpype_host = None openpype_detected = False unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) if openpype_detected: - api.install(openpype_host) + install_host(openpype_host) @unreal.uclass() diff --git a/openpype/hosts/unreal/integration/OpenPype.uplugin b/openpype/hosts/unreal/integration/UE_4.7/OpenPype.uplugin similarity index 100% rename from openpype/hosts/unreal/integration/OpenPype.uplugin rename to openpype/hosts/unreal/integration/UE_4.7/OpenPype.uplugin diff --git a/openpype/hosts/unreal/integration/README.md b/openpype/hosts/unreal/integration/UE_4.7/README.md similarity index 91% rename from openpype/hosts/unreal/integration/README.md rename to openpype/hosts/unreal/integration/UE_4.7/README.md index a32d89aab8..a08c1ada39 100644 --- a/openpype/hosts/unreal/integration/README.md +++ b/openpype/hosts/unreal/integration/UE_4.7/README.md @@ -1,4 +1,4 @@ -# OpenPype Unreal Integration plugin +# OpenPype Unreal Integration plugin - UE 4.x This is plugin for Unreal Editor, creating menu for [OpenPype](https://github.com/getavalon) tools to run. diff --git a/openpype/hosts/unreal/integration/Resources/openpype128.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype128.png similarity index 100% rename from openpype/hosts/unreal/integration/Resources/openpype128.png rename to openpype/hosts/unreal/integration/UE_4.7/Resources/openpype128.png diff --git a/openpype/hosts/unreal/integration/Resources/openpype40.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype40.png similarity index 100% rename from openpype/hosts/unreal/integration/Resources/openpype40.png rename to openpype/hosts/unreal/integration/UE_4.7/Resources/openpype40.png diff --git a/openpype/hosts/unreal/integration/Resources/openpype512.png b/openpype/hosts/unreal/integration/UE_4.7/Resources/openpype512.png similarity index 100% rename from openpype/hosts/unreal/integration/Resources/openpype512.png rename to openpype/hosts/unreal/integration/UE_4.7/Resources/openpype512.png diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs similarity index 98% rename from openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs index c30835b63d..46e5dcb2df 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/OpenPype.Build.cs +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/OpenPype.Build.cs @@ -42,6 +42,7 @@ public class OpenPype : ModuleRules "Engine", "Slate", "SlateCore", + "AssetTools" // ... add private dependencies that you statically link with here ... } ); diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainer.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainer.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainer.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainer.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainerFactory.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainerFactory.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/AssetContainerFactory.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/AssetContainerFactory.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp similarity index 59% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp index 15c46b3862..d06a08eb43 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPype.cpp +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPype.cpp @@ -1,6 +1,11 @@ #include "OpenPype.h" + +#include "ISettingsContainer.h" +#include "ISettingsModule.h" +#include "ISettingsSection.h" #include "LevelEditor.h" #include "OpenPypePythonBridge.h" +#include "OpenPypeSettings.h" #include "OpenPypeStyle.h" @@ -11,13 +16,12 @@ static const FName OpenPypeTabName("OpenPype"); // This function is triggered when the plugin is staring up void FOpenPypeModule::StartupModule() { - FOpenPypeStyle::Initialize(); FOpenPypeStyle::SetIcon("Logo", "openpype40"); // Create the Extender that will add content to the menu FLevelEditorModule& LevelEditorModule = FModuleManager::LoadModuleChecked("LevelEditor"); - + TSharedPtr MenuExtender = MakeShareable(new FExtender()); TSharedPtr ToolbarExtender = MakeShareable(new FExtender()); @@ -37,6 +41,7 @@ void FOpenPypeModule::StartupModule() LevelEditorModule.GetMenuExtensibilityManager()->AddExtender(MenuExtender); LevelEditorModule.GetToolBarExtensibilityManager()->AddExtender(ToolbarExtender); + RegisterSettings(); } void FOpenPypeModule::ShutdownModule() @@ -64,7 +69,6 @@ void FOpenPypeModule::AddMenuEntry(FMenuBuilder& MenuBuilder) FSlateIcon(FOpenPypeStyle::GetStyleSetName(), "OpenPype.Logo"), FUIAction(FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuDialog)) ); - } MenuBuilder.EndSection(); } @@ -89,13 +93,58 @@ void FOpenPypeModule::AddToobarEntry(FToolBarBuilder& ToolbarBuilder) ToolbarBuilder.EndSection(); } +void FOpenPypeModule::RegisterSettings() +{ + ISettingsModule& SettingsModule = FModuleManager::LoadModuleChecked("Settings"); -void FOpenPypeModule::MenuPopup() { + // Create the new category + // TODO: After the movement of the plugin from the game to editor, it might be necessary to move this! + ISettingsContainerPtr SettingsContainer = SettingsModule.GetContainer("Project"); + + UOpenPypeSettings* Settings = GetMutableDefault(); + + // Register the settings + ISettingsSectionPtr SettingsSection = SettingsModule.RegisterSettings("Project", "OpenPype", "General", + LOCTEXT("RuntimeGeneralSettingsName", + "General"), + LOCTEXT("RuntimeGeneralSettingsDescription", + "Base configuration for Open Pype Module"), + Settings + ); + + // Register the save handler to your settings, you might want to use it to + // validate those or just act to settings changes. + if (SettingsSection.IsValid()) + { + SettingsSection->OnModified().BindRaw(this, &FOpenPypeModule::HandleSettingsSaved); + } +} + +bool FOpenPypeModule::HandleSettingsSaved() +{ + UOpenPypeSettings* Settings = GetMutableDefault(); + bool ResaveSettings = false; + + // You can put any validation code in here and resave the settings in case an invalid + // value has been entered + + if (ResaveSettings) + { + Settings->SaveConfig(); + } + + return true; +} + + +void FOpenPypeModule::MenuPopup() +{ UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); bridge->RunInPython_Popup(); } -void FOpenPypeModule::MenuDialog() { +void FOpenPypeModule::MenuDialog() +{ UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); bridge->RunInPython_Dialog(); } diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp similarity index 53% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp index 5facab7b8b..a58e921288 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeLib.cpp +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeLib.cpp @@ -1,4 +1,6 @@ #include "OpenPypeLib.h" + +#include "AssetViewUtils.h" #include "Misc/Paths.h" #include "Misc/ConfigCacheIni.h" #include "UObject/UnrealType.h" @@ -10,21 +12,23 @@ * @warning This color will appear only after Editor restart. Is there a better way? */ -void UOpenPypeLib::CSetFolderColor(FString FolderPath, FLinearColor FolderColor, bool bForceAdd) +bool UOpenPypeLib::SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor, const bool& bForceAdd) { - auto SaveColorInternal = [](FString InPath, FLinearColor InFolderColor) + if (AssetViewUtils::DoesFolderExist(FolderPath)) { - // Saves the color of the folder to the config - if (FPaths::FileExists(GEditorPerProjectIni)) - { - GConfig->SetString(TEXT("PathColor"), *InPath, *InFolderColor.ToString(), GEditorPerProjectIni); - } + const TSharedPtr LinearColor = MakeShared(FolderColor); - }; - - SaveColorInternal(FolderPath, FolderColor); + AssetViewUtils::SaveColor(FolderPath, LinearColor, true); + UE_LOG(LogAssetData, Display, TEXT("A color {%s} has been set to folder \"%s\""), *LinearColor->ToString(), + *FolderPath) + return true; + } + UE_LOG(LogAssetData, Display, TEXT("Setting a color {%s} to folder \"%s\" has failed! Directory doesn't exist!"), + *FolderColor.ToString(), *FolderPath) + return false; } + /** * Returns all poperties on given object * @param cls - class diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp new file mode 100644 index 0000000000..38740f1cbd --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstance.cpp @@ -0,0 +1,200 @@ +#pragma once + +#include "OpenPypePublishInstance.h" +#include "AssetRegistryModule.h" +#include "NotificationManager.h" +#include "OpenPypeLib.h" +#include "OpenPypeSettings.h" +#include "SNotificationList.h" + +//Moves all the invalid pointers to the end to prepare them for the shrinking +#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \ + VAR.Shrink(); + +UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer) + : UPrimaryDataAsset(ObjectInitializer) +{ + const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked< + FAssetRegistryModule>("AssetRegistry"); + + const FPropertyEditorModule& PropertyEditorModule = FModuleManager::LoadModuleChecked( + "PropertyEditor"); + + FString Left, Right; + GetPathName().Split("/" + GetName(), &Left, &Right); + + FARFilter Filter; + Filter.PackagePaths.Emplace(FName(Left)); + + TArray FoundAssets; + AssetRegistryModule.GetRegistry().GetAssets(Filter, FoundAssets); + + for (const FAssetData& AssetData : FoundAssets) + OnAssetCreated(AssetData); + + REMOVE_INVALID_ENTRIES(AssetDataInternal) + REMOVE_INVALID_ENTRIES(AssetDataExternal) + + AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetCreated); + AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved); + AssetRegistryModule.Get().OnAssetUpdated().AddUObject(this, &UOpenPypePublishInstance::OnAssetUpdated); + +#ifdef WITH_EDITOR + ColorOpenPypeDirs(); +#endif + +} + +void UOpenPypePublishInstance::OnAssetCreated(const FAssetData& InAssetData) +{ + TArray split; + + UObject* Asset = InAssetData.GetAsset(); + + if (!IsValid(Asset)) + { + UE_LOG(LogAssetData, Warning, TEXT("Asset \"%s\" is not valid! Skipping the addition."), + *InAssetData.ObjectPath.ToString()); + return; + } + + const bool result = IsUnderSameDir(Asset) && Cast(Asset) == nullptr; + + if (result) + { + if (AssetDataInternal.Emplace(Asset).IsValidId()) + { + UE_LOG(LogTemp, Log, TEXT("Added an Asset to PublishInstance - Publish Instance: %s, Asset %s"), + *this->GetName(), *Asset->GetName()); + } + } +} + +void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& InAssetData) +{ + if (Cast(InAssetData.GetAsset()) == nullptr) + { + if (AssetDataInternal.Contains(nullptr)) + { + AssetDataInternal.Remove(nullptr); + REMOVE_INVALID_ENTRIES(AssetDataInternal) + } + else + { + AssetDataExternal.Remove(nullptr); + REMOVE_INVALID_ENTRIES(AssetDataExternal) + } + } +} + +void UOpenPypePublishInstance::OnAssetUpdated(const FAssetData& InAssetData) +{ + REMOVE_INVALID_ENTRIES(AssetDataInternal); + REMOVE_INVALID_ENTRIES(AssetDataExternal); +} + +bool UOpenPypePublishInstance::IsUnderSameDir(const UObject* InAsset) const +{ + FString ThisLeft, ThisRight; + this->GetPathName().Split(this->GetName(), &ThisLeft, &ThisRight); + + return InAsset->GetPathName().StartsWith(ThisLeft); +} + +#ifdef WITH_EDITOR + +void UOpenPypePublishInstance::ColorOpenPypeDirs() +{ + FString PathName = this->GetPathName(); + + //Check whether the path contains the defined OpenPype folder + if (!PathName.Contains(TEXT("OpenPype"))) return; + + //Get the base path for open pype + FString PathLeft, PathRight; + PathName.Split(FString("OpenPype"), &PathLeft, &PathRight); + + if (PathLeft.IsEmpty() || PathRight.IsEmpty()) + { + UE_LOG(LogAssetData, Error, TEXT("Failed to retrieve the base OpenPype directory!")) + return; + } + + PathName.RemoveFromEnd(PathRight, ESearchCase::CaseSensitive); + + //Get the current settings + const UOpenPypeSettings* Settings = GetMutableDefault(); + + //Color the base folder + UOpenPypeLib::SetFolderColor(PathName, Settings->GetFolderFColor(), false); + + //Get Sub paths, iterate through them and color them according to the folder color in UOpenPypeSettings + const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked( + "AssetRegistry"); + + TArray PathList; + + AssetRegistryModule.Get().GetSubPaths(PathName, PathList, true); + + if (PathList.Num() > 0) + { + for (const FString& Path : PathList) + { + UOpenPypeLib::SetFolderColor(Path, Settings->GetFolderFColor(), false); + } + } +} + +void UOpenPypePublishInstance::SendNotification(const FString& Text) const +{ + FNotificationInfo Info{FText::FromString(Text)}; + + Info.bFireAndForget = true; + Info.bUseLargeFont = false; + Info.bUseThrobber = false; + Info.bUseSuccessFailIcons = false; + Info.ExpireDuration = 4.f; + Info.FadeOutDuration = 2.f; + + FSlateNotificationManager::Get().AddNotification(Info); + + UE_LOG(LogAssetData, Warning, + TEXT( + "Removed duplicated asset from the AssetsDataExternal in Container \"%s\", Asset is already included in the AssetDataInternal!" + ), *GetName() + ) +} + + +void UOpenPypePublishInstance::PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) +{ + Super::PostEditChangeProperty(PropertyChangedEvent); + + if (PropertyChangedEvent.ChangeType == EPropertyChangeType::ValueSet && + PropertyChangedEvent.Property->GetFName() == GET_MEMBER_NAME_CHECKED( + UOpenPypePublishInstance, AssetDataExternal)) + { + // Check for duplicated assets + for (const auto& Asset : AssetDataInternal) + { + if (AssetDataExternal.Contains(Asset)) + { + AssetDataExternal.Remove(Asset); + return SendNotification( + "You are not allowed to add assets into AssetDataExternal which are already included in AssetDataInternal!"); + } + } + + // Check if no UOpenPypePublishInstance type assets are included + for (const auto& Asset : AssetDataExternal) + { + if (Cast(Asset.Get()) != nullptr) + { + AssetDataExternal.Remove(Asset); + return SendNotification("You are not allowed to add publish instances!"); + } + } + } +} + +#endif diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp similarity index 65% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp index e61964c689..9b26da7fa4 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp @@ -9,10 +9,10 @@ UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectIn bEditorImport = true; } -UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) +UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) { - UOpenPypePublishInstance* OpenPypePublishInstance = NewObject(InParent, Class, Name, Flags); - return OpenPypePublishInstance; + check(InClass->IsChildOf(UOpenPypePublishInstance::StaticClass())); + return NewObject(InParent, InClass, InName, Flags); } bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const { diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePythonBridge.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePythonBridge.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePythonBridge.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypePythonBridge.cpp diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeSettings.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeSettings.cpp new file mode 100644 index 0000000000..7134614d22 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeSettings.cpp @@ -0,0 +1,21 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#include "OpenPypeSettings.h" + +#include "IPluginManager.h" +#include "UObjectGlobals.h" + +/** + * Mainly is used for initializing default values if the DefaultOpenPypeSettings.ini file does not exist in the saved config + */ +UOpenPypeSettings::UOpenPypeSettings(const FObjectInitializer& ObjectInitializer) +{ + + const FString ConfigFilePath = OPENPYPE_SETTINGS_FILEPATH; + + // This has to be probably in the future set using the UE Reflection system + FColor Color; + GConfig->GetColor(TEXT("/Script/OpenPype.OpenPypeSettings"), TEXT("FolderColor"), Color, ConfigFilePath); + + FolderColor = Color; +} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeStyle.cpp b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeStyle.cpp similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypeStyle.cpp rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Private/OpenPypeStyle.cpp diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainer.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainer.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainer.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainer.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainerFactory.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainerFactory.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/AssetContainerFactory.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/AssetContainerFactory.h diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h similarity index 87% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h index db3f299354..9cfa60176c 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPype.h +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPype.h @@ -12,10 +12,11 @@ public: virtual void ShutdownModule() override; private: + void RegisterSettings(); + bool HandleSettingsSaved(); void AddMenuEntry(FMenuBuilder& MenuBuilder); void AddToobarEntry(FToolBarBuilder& ToolbarBuilder); void MenuPopup(); void MenuDialog(); - }; diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h similarity index 61% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h index 59e9c8bd76..06425c7c7d 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeLib.h +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeLib.h @@ -5,14 +5,14 @@ UCLASS(Blueprintable) -class OPENPYPE_API UOpenPypeLib : public UObject +class OPENPYPE_API UOpenPypeLib : public UBlueprintFunctionLibrary { GENERATED_BODY() public: UFUNCTION(BlueprintCallable, Category = Python) - static void CSetFolderColor(FString FolderPath, FLinearColor FolderColor, bool bForceAdd); + static bool SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor,const bool& bForceAdd); UFUNCTION(BlueprintCallable, Category = Python) static TArray GetAllProperties(UClass* cls); diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h new file mode 100644 index 0000000000..cd414fe2cc --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstance.h @@ -0,0 +1,101 @@ +#pragma once + +#include "Engine.h" +#include "OpenPypePublishInstance.generated.h" + + +UCLASS(Blueprintable) +class OPENPYPE_API UOpenPypePublishInstance : public UPrimaryDataAsset +{ + GENERATED_UCLASS_BODY() + +public: + /** + * Retrieves all the assets which are monitored by the Publish Instance (Monitors assets in the directory which is + * placed in) + * + * @return - Set of UObjects. Careful! They are returning raw pointers. Seems like an issue in UE5 + */ + UFUNCTION(BlueprintCallable, BlueprintPure) + TSet GetInternalAssets() const + { + //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. + TSet ResultSet; + + for (const auto& Asset : AssetDataInternal) + ResultSet.Add(Asset.LoadSynchronous()); + + return ResultSet; + } + + /** + * Retrieves all the assets which have been added manually by the Publish Instance + * + * @return - TSet of assets (UObjects). Careful! They are returning raw pointers. Seems like an issue in UE5 + */ + UFUNCTION(BlueprintCallable, BlueprintPure) + TSet GetExternalAssets() const + { + //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. + TSet ResultSet; + + for (const auto& Asset : AssetDataExternal) + ResultSet.Add(Asset.LoadSynchronous()); + + return ResultSet; + } + + /** + * Function for returning all the assets in the container combined. + * + * @return Returns all the internal and externally added assets into one set (TSet of UObjects). Careful! They are + * returning raw pointers. Seems like an issue in UE5 + * + * @attention If the bAddExternalAssets variable is false, external assets won't be included! + */ + UFUNCTION(BlueprintCallable, BlueprintPure) + TSet GetAllAssets() const + { + const TSet>& IteratedSet = bAddExternalAssets + ? AssetDataInternal.Union(AssetDataExternal) + : AssetDataInternal; + + //Create a new TSet only with raw pointers. + TSet ResultSet; + + for (auto& Asset : IteratedSet) + ResultSet.Add(Asset.LoadSynchronous()); + + return ResultSet; + } + +private: + UPROPERTY(VisibleAnywhere, Category="Assets") + TSet> AssetDataInternal; + + /** + * This property allows exposing the array to include other assets from any other directory than what it's currently + * monitoring. NOTE: that these assets have to be added manually! They are not automatically registered or added! + */ + UPROPERTY(EditAnywhere, Category = "Assets") + bool bAddExternalAssets = false; + + UPROPERTY(EditAnywhere, meta=(EditCondition="bAddExternalAssets"), Category="Assets") + TSet> AssetDataExternal; + + + void OnAssetCreated(const FAssetData& InAssetData); + void OnAssetRemoved(const FAssetData& InAssetData); + void OnAssetUpdated(const FAssetData& InAssetData); + + bool IsUnderSameDir(const UObject* InAsset) const; + +#ifdef WITH_EDITOR + + void ColorOpenPypeDirs(); + + void SendNotification(const FString& Text) const; + virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override; + +#endif +}; diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h similarity index 70% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h index a2b3abe13e..7d2c77fe6e 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h @@ -14,6 +14,6 @@ class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory public: UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer); - virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; + virtual UObject* FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; virtual bool ShouldShowInNewMenu() const override; -}; \ No newline at end of file +}; diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePythonBridge.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePythonBridge.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypePythonBridge.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypePythonBridge.h diff --git a/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeSettings.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeSettings.h new file mode 100644 index 0000000000..2df6c887cf --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeSettings.h @@ -0,0 +1,32 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#pragma once + +#include "CoreMinimal.h" +#include "Object.h" +#include "OpenPypeSettings.generated.h" + +#define OPENPYPE_SETTINGS_FILEPATH IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Config") / TEXT("DefaultOpenPypeSettings.ini") + +UCLASS(Config=OpenPypeSettings, DefaultConfig) +class OPENPYPE_API UOpenPypeSettings : public UObject +{ + GENERATED_UCLASS_BODY() + + UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) + FColor GetFolderFColor() const + { + return FolderColor; + } + + UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) + FLinearColor GetFolderFLinearColor() const + { + return FLinearColor(FolderColor); + } + +protected: + + UPROPERTY(config, EditAnywhere, Category = Folders) + FColor FolderColor = FColor(25,45,223); +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeStyle.h b/openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeStyle.h similarity index 100% rename from openpype/hosts/unreal/integration/Source/OpenPype/Public/OpenPypeStyle.h rename to openpype/hosts/unreal/integration/UE_4.7/Source/OpenPype/Public/OpenPypeStyle.h diff --git a/openpype/hosts/unreal/integration/UE_5.0/.gitignore b/openpype/hosts/unreal/integration/UE_5.0/.gitignore new file mode 100644 index 0000000000..b32a6f55e5 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/.gitignore @@ -0,0 +1,35 @@ +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +/Binaries +/Intermediate diff --git a/openpype/hosts/unreal/integration/UE_5.0/Config/DefaultOpenPypeSettings.ini b/openpype/hosts/unreal/integration/UE_5.0/Config/DefaultOpenPypeSettings.ini new file mode 100644 index 0000000000..8a883cf1db --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Config/DefaultOpenPypeSettings.ini @@ -0,0 +1,2 @@ +[/Script/OpenPype.OpenPypeSettings] +FolderColor=(R=91,G=197,B=220,A=255) \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py b/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py new file mode 100644 index 0000000000..b85f970699 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Content/Python/init_unreal.py @@ -0,0 +1,30 @@ +import unreal + +openpype_detected = True +try: + from openpype.pipeline import install_host + from openpype.hosts.unreal.api import UnrealHost + + openpype_host = UnrealHost() +except ImportError as exc: + openpype_host = None + openpype_detected = False + unreal.log_error("OpenPype: cannot load OpenPype [ {} ]".format(exc)) + +if openpype_detected: + install_host(openpype_host) + + +@unreal.uclass() +class OpenPypeIntegration(unreal.OpenPypePythonBridge): + @unreal.ufunction(override=True) + def RunInPython_Popup(self): + unreal.log_warning("OpenPype: showing tools popup") + if openpype_detected: + openpype_host.show_tools_popup() + + @unreal.ufunction(override=True) + def RunInPython_Dialog(self): + unreal.log_warning("OpenPype: showing tools dialog") + if openpype_detected: + openpype_host.show_tools_dialog() diff --git a/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin b/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin new file mode 100644 index 0000000000..4c7a74403c --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/OpenPype.uplugin @@ -0,0 +1,24 @@ +{ + "FileVersion": 3, + "Version": 1, + "VersionName": "1.0", + "FriendlyName": "OpenPype", + "Description": "OpenPype Integration", + "Category": "OpenPype.Integration", + "CreatedBy": "Ondrej Samohel", + "CreatedByURL": "https://openpype.io", + "DocsURL": "https://openpype.io/docs/artist_hosts_unreal", + "MarketplaceURL": "", + "SupportURL": "https://pype.club/", + "CanContainContent": true, + "IsBetaVersion": true, + "IsExperimentalVersion": false, + "Installed": false, + "Modules": [ + { + "Name": "OpenPype", + "Type": "Editor", + "LoadingPhase": "Default" + } + ] +} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/README.md b/openpype/hosts/unreal/integration/UE_5.0/README.md new file mode 100644 index 0000000000..cf0aa622c2 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/README.md @@ -0,0 +1,11 @@ +# OpenPype Unreal Integration plugin - UE 5.x + +This is plugin for Unreal Editor, creating menu for [OpenPype](https://github.com/getavalon) tools to run. + +## How does this work + +Plugin is creating basic menu items in **Window/OpenPype** section of Unreal Editor main menu and a button +on the main toolbar with associated menu. Clicking on those menu items is calling callbacks that are +declared in C++ but needs to be implemented during Unreal Editor +startup in `Plugins/OpenPype/Content/Python/init_unreal.py` - this should be executed by Unreal Editor +automatically. diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png new file mode 100644 index 0000000000..abe8a807ef Binary files /dev/null and b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype128.png differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png new file mode 100644 index 0000000000..f983e7a1f2 Binary files /dev/null and b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype40.png differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png new file mode 100644 index 0000000000..97c4d4326b Binary files /dev/null and b/openpype/hosts/unreal/integration/UE_5.0/Resources/openpype512.png differ diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs new file mode 100644 index 0000000000..d853ec028f --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/OpenPype.Build.cs @@ -0,0 +1,64 @@ +// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. + +using UnrealBuildTool; + +public class OpenPype : ModuleRules +{ + public OpenPype(ReadOnlyTargetRules Target) : base(Target) + { + DefaultBuildSettings = BuildSettingsVersion.V2; + bLegacyPublicIncludePaths = false; + ShadowVariableWarningLevel = WarningLevel.Error; + PCHUsage = ModuleRules.PCHUsageMode.UseExplicitOrSharedPCHs; + IncludeOrderVersion = EngineIncludeOrderVersion.Unreal5_0; + + PublicIncludePaths.AddRange( + new string[] { + // ... add public include paths required here ... + } + ); + + + PrivateIncludePaths.AddRange( + new string[] { + // ... add other private include paths required here ... + } + ); + + + PublicDependencyModuleNames.AddRange( + new string[] + { + "Core", + // ... add other public dependencies that you statically link with here ... + } + ); + + + PrivateDependencyModuleNames.AddRange( + new string[] + { + "Projects", + "InputCore", + "EditorFramework", + "UnrealEd", + "ToolMenus", + "LevelEditor", + "CoreUObject", + "Engine", + "Slate", + "SlateCore", + "AssetTools" + // ... add private dependencies that you statically link with here ... + } + ); + + + DynamicallyLoadedModuleNames.AddRange( + new string[] + { + // ... add any modules that your module loads dynamically here ... + } + ); + } +} diff --git a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp similarity index 55% rename from openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp rename to openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp index 4f1e846c0b..61e563f729 100644 --- a/openpype/hosts/unreal/integration/Source/OpenPype/Private/OpenPypePublishInstance.cpp +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainer.cpp @@ -1,62 +1,66 @@ -#pragma once +// Fill out your copyright notice in the Description page of Project Settings. -#include "OpenPypePublishInstance.h" -#include "AssetRegistryModule.h" +#include "AssetContainer.h" +#include "AssetRegistry/AssetRegistryModule.h" +#include "Misc/PackageName.h" +#include "Engine.h" +#include "Containers/UnrealString.h" - -UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer) - : UObject(ObjectInitializer) +UAssetContainer::UAssetContainer(const FObjectInitializer& ObjectInitializer) +: UAssetUserData(ObjectInitializer) { FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked("AssetRegistry"); - FString path = UOpenPypePublishInstance::GetPathName(); + FString path = UAssetContainer::GetPathName(); + UE_LOG(LogTemp, Warning, TEXT("UAssetContainer %s"), *path); FARFilter Filter; Filter.PackagePaths.Add(FName(*path)); - - AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetAdded); - AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved); - AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UOpenPypePublishInstance::OnAssetRenamed); + + AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UAssetContainer::OnAssetAdded); + AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UAssetContainer::OnAssetRemoved); + AssetRegistryModule.Get().OnAssetRenamed().AddUObject(this, &UAssetContainer::OnAssetRenamed); } -void UOpenPypePublishInstance::OnAssetAdded(const FAssetData& AssetData) +void UAssetContainer::OnAssetAdded(const FAssetData& AssetData) { TArray split; // get directory of current container - FString selfFullPath = UOpenPypePublishInstance::GetPathName(); + FString selfFullPath = UAssetContainer::GetPathName(); FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); // get asset path and class FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClass.ToString(); - + FString assetFName = AssetData.AssetClassPath.ToString(); + UE_LOG(LogTemp, Log, TEXT("asset name %s"), *assetFName); // split path assetPath.ParseIntoArray(split, TEXT(" "), true); FString assetDir = FPackageName::GetLongPackagePath(*split[1]); - + // take interest only in paths starting with path of current container if (assetDir.StartsWith(*selfDir)) { // exclude self - if (assetFName != "OpenPypePublishInstance") + if (assetFName != "AssetContainer") { assets.Add(assetPath); + assetsData.Add(AssetData); UE_LOG(LogTemp, Log, TEXT("%s: asset added to %s"), *selfFullPath, *selfDir); } } } -void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& AssetData) +void UAssetContainer::OnAssetRemoved(const FAssetData& AssetData) { TArray split; // get directory of current container - FString selfFullPath = UOpenPypePublishInstance::GetPathName(); + FString selfFullPath = UAssetContainer::GetPathName(); FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); // get asset path and class FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClass.ToString(); + FString assetFName = AssetData.AssetClassPath.ToString(); // split path assetPath.ParseIntoArray(split, TEXT(" "), true); @@ -64,31 +68,32 @@ void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& AssetData) FString assetDir = FPackageName::GetLongPackagePath(*split[1]); // take interest only in paths starting with path of current container - FString path = UOpenPypePublishInstance::GetPathName(); + FString path = UAssetContainer::GetPathName(); FString lpp = FPackageName::GetLongPackagePath(*path); if (assetDir.StartsWith(*selfDir)) { // exclude self - if (assetFName != "OpenPypePublishInstance") + if (assetFName != "AssetContainer") { // UE_LOG(LogTemp, Warning, TEXT("%s: asset removed"), *lpp); assets.Remove(assetPath); + assetsData.Remove(AssetData); } } } -void UOpenPypePublishInstance::OnAssetRenamed(const FAssetData& AssetData, const FString& str) +void UAssetContainer::OnAssetRenamed(const FAssetData& AssetData, const FString& str) { TArray split; // get directory of current container - FString selfFullPath = UOpenPypePublishInstance::GetPathName(); + FString selfFullPath = UAssetContainer::GetPathName(); FString selfDir = FPackageName::GetLongPackagePath(*selfFullPath); // get asset path and class FString assetPath = AssetData.GetFullName(); - FString assetFName = AssetData.AssetClass.ToString(); + FString assetFName = AssetData.AssetClassPath.ToString(); // split path assetPath.ParseIntoArray(split, TEXT(" "), true); @@ -102,7 +107,9 @@ void UOpenPypePublishInstance::OnAssetRenamed(const FAssetData& AssetData, const assets.Remove(str); assets.Add(assetPath); + assetsData.Remove(AssetData); // UE_LOG(LogTemp, Warning, TEXT("%s: asset renamed %s"), *lpp, *str); } } } + diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp new file mode 100644 index 0000000000..b943150bdd --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/AssetContainerFactory.cpp @@ -0,0 +1,20 @@ +#include "AssetContainerFactory.h" +#include "AssetContainer.h" + +UAssetContainerFactory::UAssetContainerFactory(const FObjectInitializer& ObjectInitializer) + : UFactory(ObjectInitializer) +{ + SupportedClass = UAssetContainer::StaticClass(); + bCreateNew = false; + bEditorImport = true; +} + +UObject* UAssetContainerFactory::FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) +{ + UAssetContainer* AssetContainer = NewObject(InParent, Class, Name, Flags); + return AssetContainer; +} + +bool UAssetContainerFactory::ShouldShowInNewMenu() const { + return false; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp new file mode 100644 index 0000000000..d23de61102 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPype.cpp @@ -0,0 +1,139 @@ +#include "OpenPype.h" + +#include "ISettingsContainer.h" +#include "ISettingsModule.h" +#include "ISettingsSection.h" +#include "OpenPypeStyle.h" +#include "OpenPypeCommands.h" +#include "OpenPypePythonBridge.h" +#include "OpenPypeSettings.h" +#include "Misc/MessageDialog.h" +#include "ToolMenus.h" + + +static const FName OpenPypeTabName("OpenPype"); + +#define LOCTEXT_NAMESPACE "FOpenPypeModule" + +// This function is triggered when the plugin is staring up +void FOpenPypeModule::StartupModule() +{ + FOpenPypeStyle::Initialize(); + FOpenPypeStyle::ReloadTextures(); + FOpenPypeCommands::Register(); + + PluginCommands = MakeShareable(new FUICommandList); + + PluginCommands->MapAction( + FOpenPypeCommands::Get().OpenPypeTools, + FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuPopup), + FCanExecuteAction()); + PluginCommands->MapAction( + FOpenPypeCommands::Get().OpenPypeToolsDialog, + FExecuteAction::CreateRaw(this, &FOpenPypeModule::MenuDialog), + FCanExecuteAction()); + + UToolMenus::RegisterStartupCallback( + FSimpleMulticastDelegate::FDelegate::CreateRaw(this, &FOpenPypeModule::RegisterMenus)); + + RegisterSettings(); +} + +void FOpenPypeModule::ShutdownModule() +{ + UToolMenus::UnRegisterStartupCallback(this); + + UToolMenus::UnregisterOwner(this); + + FOpenPypeStyle::Shutdown(); + + FOpenPypeCommands::Unregister(); +} + + +void FOpenPypeModule::RegisterSettings() +{ + ISettingsModule& SettingsModule = FModuleManager::LoadModuleChecked("Settings"); + + // Create the new category + // TODO: After the movement of the plugin from the game to editor, it might be necessary to move this! + ISettingsContainerPtr SettingsContainer = SettingsModule.GetContainer("Project"); + + UOpenPypeSettings* Settings = GetMutableDefault(); + + // Register the settings + ISettingsSectionPtr SettingsSection = SettingsModule.RegisterSettings("Project", "OpenPype", "General", + LOCTEXT("RuntimeGeneralSettingsName", + "General"), + LOCTEXT("RuntimeGeneralSettingsDescription", + "Base configuration for Open Pype Module"), + Settings + ); + + // Register the save handler to your settings, you might want to use it to + // validate those or just act to settings changes. + if (SettingsSection.IsValid()) + { + SettingsSection->OnModified().BindRaw(this, &FOpenPypeModule::HandleSettingsSaved); + } +} + +bool FOpenPypeModule::HandleSettingsSaved() +{ + UOpenPypeSettings* Settings = GetMutableDefault(); + bool ResaveSettings = false; + + // You can put any validation code in here and resave the settings in case an invalid + // value has been entered + + if (ResaveSettings) + { + Settings->SaveConfig(); + } + + return true; +} + +void FOpenPypeModule::RegisterMenus() +{ + // Owner will be used for cleanup in call to UToolMenus::UnregisterOwner + FToolMenuOwnerScoped OwnerScoped(this); + + { + UToolMenu* Menu = UToolMenus::Get()->ExtendMenu("LevelEditor.MainMenu.Tools"); + { + // FToolMenuSection& Section = Menu->FindOrAddSection("OpenPype"); + FToolMenuSection& Section = Menu->AddSection( + "OpenPype", + TAttribute(FText::FromString("OpenPype")), + FToolMenuInsert("Programming", EToolMenuInsertType::Before) + ); + Section.AddMenuEntryWithCommandList(FOpenPypeCommands::Get().OpenPypeTools, PluginCommands); + Section.AddMenuEntryWithCommandList(FOpenPypeCommands::Get().OpenPypeToolsDialog, PluginCommands); + } + UToolMenu* ToolbarMenu = UToolMenus::Get()->ExtendMenu("LevelEditor.LevelEditorToolBar.PlayToolBar"); + { + FToolMenuSection& Section = ToolbarMenu->FindOrAddSection("PluginTools"); + { + FToolMenuEntry& Entry = Section.AddEntry( + FToolMenuEntry::InitToolBarButton(FOpenPypeCommands::Get().OpenPypeTools)); + Entry.SetCommandList(PluginCommands); + } + } + } +} + + +void FOpenPypeModule::MenuPopup() +{ + UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); + bridge->RunInPython_Popup(); +} + +void FOpenPypeModule::MenuDialog() +{ + UOpenPypePythonBridge* bridge = UOpenPypePythonBridge::Get(); + bridge->RunInPython_Dialog(); +} + +IMPLEMENT_MODULE(FOpenPypeModule, OpenPype) diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp new file mode 100644 index 0000000000..6187bd7c7e --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeCommands.cpp @@ -0,0 +1,13 @@ +// Copyright Epic Games, Inc. All Rights Reserved. + +#include "OpenPypeCommands.h" + +#define LOCTEXT_NAMESPACE "FOpenPypeModule" + +void FOpenPypeCommands::RegisterCommands() +{ + UI_COMMAND(OpenPypeTools, "OpenPype Tools", "Pipeline tools", EUserInterfaceActionType::Button, FInputChord()); + UI_COMMAND(OpenPypeToolsDialog, "OpenPype Tools Dialog", "Pipeline tools dialog", EUserInterfaceActionType::Button, FInputChord()); +} + +#undef LOCTEXT_NAMESPACE diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp new file mode 100644 index 0000000000..a58e921288 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeLib.cpp @@ -0,0 +1,52 @@ +#include "OpenPypeLib.h" + +#include "AssetViewUtils.h" +#include "Misc/Paths.h" +#include "Misc/ConfigCacheIni.h" +#include "UObject/UnrealType.h" + +/** + * Sets color on folder icon on given path + * @param InPath - path to folder + * @param InFolderColor - color of the folder + * @warning This color will appear only after Editor restart. Is there a better way? + */ + +bool UOpenPypeLib::SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor, const bool& bForceAdd) +{ + if (AssetViewUtils::DoesFolderExist(FolderPath)) + { + const TSharedPtr LinearColor = MakeShared(FolderColor); + + AssetViewUtils::SaveColor(FolderPath, LinearColor, true); + UE_LOG(LogAssetData, Display, TEXT("A color {%s} has been set to folder \"%s\""), *LinearColor->ToString(), + *FolderPath) + return true; + } + + UE_LOG(LogAssetData, Display, TEXT("Setting a color {%s} to folder \"%s\" has failed! Directory doesn't exist!"), + *FolderColor.ToString(), *FolderPath) + return false; +} + +/** + * Returns all poperties on given object + * @param cls - class + * @return TArray of properties + */ +TArray UOpenPypeLib::GetAllProperties(UClass* cls) +{ + TArray Ret; + if (cls != nullptr) + { + for (TFieldIterator It(cls); It; ++It) + { + FProperty* Property = *It; + if (Property->HasAnyPropertyFlags(EPropertyFlags::CPF_Edit)) + { + Ret.Add(Property->GetName()); + } + } + } + return Ret; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp new file mode 100644 index 0000000000..0b56111a49 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstance.cpp @@ -0,0 +1,201 @@ +#pragma once + +#include "OpenPypePublishInstance.h" +#include "AssetRegistry/AssetRegistryModule.h" +#include "AssetToolsModule.h" +#include "Framework/Notifications/NotificationManager.h" +#include "OpenPypeLib.h" +#include "OpenPypeSettings.h" +#include "Widgets/Notifications/SNotificationList.h" + + +//Moves all the invalid pointers to the end to prepare them for the shrinking +#define REMOVE_INVALID_ENTRIES(VAR) VAR.CompactStable(); \ + VAR.Shrink(); + +UOpenPypePublishInstance::UOpenPypePublishInstance(const FObjectInitializer& ObjectInitializer) + : UPrimaryDataAsset(ObjectInitializer) +{ + const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked< + FAssetRegistryModule>("AssetRegistry"); + + const FPropertyEditorModule& PropertyEditorModule = FModuleManager::LoadModuleChecked( + "PropertyEditor"); + + FString Left, Right; + GetPathName().Split("/" + GetName(), &Left, &Right); + + FARFilter Filter; + Filter.PackagePaths.Emplace(FName(Left)); + + TArray FoundAssets; + AssetRegistryModule.GetRegistry().GetAssets(Filter, FoundAssets); + + for (const FAssetData& AssetData : FoundAssets) + OnAssetCreated(AssetData); + + REMOVE_INVALID_ENTRIES(AssetDataInternal) + REMOVE_INVALID_ENTRIES(AssetDataExternal) + + AssetRegistryModule.Get().OnAssetAdded().AddUObject(this, &UOpenPypePublishInstance::OnAssetCreated); + AssetRegistryModule.Get().OnAssetRemoved().AddUObject(this, &UOpenPypePublishInstance::OnAssetRemoved); + AssetRegistryModule.Get().OnAssetUpdated().AddUObject(this, &UOpenPypePublishInstance::OnAssetUpdated); + +#ifdef WITH_EDITOR + ColorOpenPypeDirs(); +#endif +} + +void UOpenPypePublishInstance::OnAssetCreated(const FAssetData& InAssetData) +{ + TArray split; + + UObject* Asset = InAssetData.GetAsset(); + + if (!IsValid(Asset)) + { + UE_LOG(LogAssetData, Warning, TEXT("Asset \"%s\" is not valid! Skipping the addition."), + *InAssetData.GetObjectPathString()); + return; + } + + const bool result = IsUnderSameDir(Asset) && Cast(Asset) == nullptr; + + if (result) + { + if (AssetDataInternal.Emplace(Asset).IsValidId()) + { + UE_LOG(LogTemp, Log, TEXT("Added an Asset to PublishInstance - Publish Instance: %s, Asset %s"), + *this->GetName(), *Asset->GetName()); + } + } +} + +void UOpenPypePublishInstance::OnAssetRemoved(const FAssetData& InAssetData) +{ + if (Cast(InAssetData.GetAsset()) == nullptr) + { + if (AssetDataInternal.Contains(nullptr)) + { + AssetDataInternal.Remove(nullptr); + REMOVE_INVALID_ENTRIES(AssetDataInternal) + } + else + { + AssetDataExternal.Remove(nullptr); + REMOVE_INVALID_ENTRIES(AssetDataExternal) + } + } +} + +void UOpenPypePublishInstance::OnAssetUpdated(const FAssetData& InAssetData) +{ + REMOVE_INVALID_ENTRIES(AssetDataInternal); + REMOVE_INVALID_ENTRIES(AssetDataExternal); +} + +bool UOpenPypePublishInstance::IsUnderSameDir(const UObject* InAsset) const +{ + FString ThisLeft, ThisRight; + this->GetPathName().Split(this->GetName(), &ThisLeft, &ThisRight); + + return InAsset->GetPathName().StartsWith(ThisLeft); +} + +#ifdef WITH_EDITOR + +void UOpenPypePublishInstance::ColorOpenPypeDirs() +{ + FString PathName = this->GetPathName(); + + //Check whether the path contains the defined OpenPype folder + if (!PathName.Contains(TEXT("OpenPype"))) return; + + //Get the base path for open pype + FString PathLeft, PathRight; + PathName.Split(FString("OpenPype"), &PathLeft, &PathRight); + + if (PathLeft.IsEmpty() || PathRight.IsEmpty()) + { + UE_LOG(LogAssetData, Error, TEXT("Failed to retrieve the base OpenPype directory!")) + return; + } + + PathName.RemoveFromEnd(PathRight, ESearchCase::CaseSensitive); + + //Get the current settings + const UOpenPypeSettings* Settings = GetMutableDefault(); + + //Color the base folder + UOpenPypeLib::SetFolderColor(PathName, Settings->GetFolderFColor(), false); + + //Get Sub paths, iterate through them and color them according to the folder color in UOpenPypeSettings + const FAssetRegistryModule& AssetRegistryModule = FModuleManager::LoadModuleChecked( + "AssetRegistry"); + + TArray PathList; + + AssetRegistryModule.Get().GetSubPaths(PathName, PathList, true); + + if (PathList.Num() > 0) + { + for (const FString& Path : PathList) + { + UOpenPypeLib::SetFolderColor(Path, Settings->GetFolderFColor(), false); + } + } +} + +void UOpenPypePublishInstance::SendNotification(const FString& Text) const +{ + FNotificationInfo Info{FText::FromString(Text)}; + + Info.bFireAndForget = true; + Info.bUseLargeFont = false; + Info.bUseThrobber = false; + Info.bUseSuccessFailIcons = false; + Info.ExpireDuration = 4.f; + Info.FadeOutDuration = 2.f; + + FSlateNotificationManager::Get().AddNotification(Info); + + UE_LOG(LogAssetData, Warning, + TEXT( + "Removed duplicated asset from the AssetsDataExternal in Container \"%s\", Asset is already included in the AssetDataInternal!" + ), *GetName() + ) +} + + +void UOpenPypePublishInstance::PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) +{ + Super::PostEditChangeProperty(PropertyChangedEvent); + + if (PropertyChangedEvent.ChangeType == EPropertyChangeType::ValueSet && + PropertyChangedEvent.Property->GetFName() == GET_MEMBER_NAME_CHECKED( + UOpenPypePublishInstance, AssetDataExternal)) + { + // Check for duplicated assets + for (const auto& Asset : AssetDataInternal) + { + if (AssetDataExternal.Contains(Asset)) + { + AssetDataExternal.Remove(Asset); + return SendNotification( + "You are not allowed to add assets into AssetDataExternal which are already included in AssetDataInternal!"); + } + } + + // Check if no UOpenPypePublishInstance type assets are included + for (const auto& Asset : AssetDataExternal) + { + if (Cast(Asset.Get()) != nullptr) + { + AssetDataExternal.Remove(Asset); + return SendNotification("You are not allowed to add publish instances!"); + } + } + } +} + +#endif diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp new file mode 100644 index 0000000000..9b26da7fa4 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePublishInstanceFactory.cpp @@ -0,0 +1,20 @@ +#include "OpenPypePublishInstanceFactory.h" +#include "OpenPypePublishInstance.h" + +UOpenPypePublishInstanceFactory::UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer) + : UFactory(ObjectInitializer) +{ + SupportedClass = UOpenPypePublishInstance::StaticClass(); + bCreateNew = false; + bEditorImport = true; +} + +UObject* UOpenPypePublishInstanceFactory::FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) +{ + check(InClass->IsChildOf(UOpenPypePublishInstance::StaticClass())); + return NewObject(InParent, InClass, InName, Flags); +} + +bool UOpenPypePublishInstanceFactory::ShouldShowInNewMenu() const { + return false; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp new file mode 100644 index 0000000000..8113231503 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypePythonBridge.cpp @@ -0,0 +1,13 @@ +#include "OpenPypePythonBridge.h" + +UOpenPypePythonBridge* UOpenPypePythonBridge::Get() +{ + TArray OpenPypePythonBridgeClasses; + GetDerivedClasses(UOpenPypePythonBridge::StaticClass(), OpenPypePythonBridgeClasses); + int32 NumClasses = OpenPypePythonBridgeClasses.Num(); + if (NumClasses > 0) + { + return Cast(OpenPypePythonBridgeClasses[NumClasses - 1]->GetDefaultObject()); + } + return nullptr; +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeSettings.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeSettings.cpp new file mode 100644 index 0000000000..a6b9eba749 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeSettings.cpp @@ -0,0 +1,21 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#include "OpenPypeSettings.h" + +#include "Interfaces/IPluginManager.h" +#include "UObject/UObjectGlobals.h" + +/** + * Mainly is used for initializing default values if the DefaultOpenPypeSettings.ini file does not exist in the saved config + */ +UOpenPypeSettings::UOpenPypeSettings(const FObjectInitializer& ObjectInitializer) +{ + + const FString ConfigFilePath = OPENPYPE_SETTINGS_FILEPATH; + + // This has to be probably in the future set using the UE Reflection system + FColor Color; + GConfig->GetColor(TEXT("/Script/OpenPype.OpenPypeSettings"), TEXT("FolderColor"), Color, ConfigFilePath); + + FolderColor = Color; +} \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp new file mode 100644 index 0000000000..49e805da4d --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Private/OpenPypeStyle.cpp @@ -0,0 +1,61 @@ +#include "OpenPypeStyle.h" +#include "OpenPype.h" +#include "Framework/Application/SlateApplication.h" +#include "Styling/SlateStyleRegistry.h" +#include "Slate/SlateGameResources.h" +#include "Interfaces/IPluginManager.h" +#include "Styling/SlateStyleMacros.h" + +#define RootToContentDir Style->RootToContentDir + +TSharedPtr FOpenPypeStyle::OpenPypeStyleInstance = nullptr; + +void FOpenPypeStyle::Initialize() +{ + if (!OpenPypeStyleInstance.IsValid()) + { + OpenPypeStyleInstance = Create(); + FSlateStyleRegistry::RegisterSlateStyle(*OpenPypeStyleInstance); + } +} + +void FOpenPypeStyle::Shutdown() +{ + FSlateStyleRegistry::UnRegisterSlateStyle(*OpenPypeStyleInstance); + ensure(OpenPypeStyleInstance.IsUnique()); + OpenPypeStyleInstance.Reset(); +} + +FName FOpenPypeStyle::GetStyleSetName() +{ + static FName StyleSetName(TEXT("OpenPypeStyle")); + return StyleSetName; +} + +const FVector2D Icon16x16(16.0f, 16.0f); +const FVector2D Icon20x20(20.0f, 20.0f); +const FVector2D Icon40x40(40.0f, 40.0f); + +TSharedRef< FSlateStyleSet > FOpenPypeStyle::Create() +{ + TSharedRef< FSlateStyleSet > Style = MakeShareable(new FSlateStyleSet("OpenPypeStyle")); + Style->SetContentRoot(IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Resources")); + + Style->Set("OpenPype.OpenPypeTools", new IMAGE_BRUSH(TEXT("openpype40"), Icon40x40)); + Style->Set("OpenPype.OpenPypeToolsDialog", new IMAGE_BRUSH(TEXT("openpype40"), Icon40x40)); + + return Style; +} + +void FOpenPypeStyle::ReloadTextures() +{ + if (FSlateApplication::IsInitialized()) + { + FSlateApplication::Get().GetRenderer()->ReloadTextureResources(); + } +} + +const ISlateStyle& FOpenPypeStyle::Get() +{ + return *OpenPypeStyleInstance; +} diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h new file mode 100644 index 0000000000..2c06e59d6f --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainer.h @@ -0,0 +1,39 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#pragma once + +#include "CoreMinimal.h" +#include "UObject/NoExportTypes.h" +#include "Engine/AssetUserData.h" +#include "AssetRegistry/AssetData.h" +#include "AssetContainer.generated.h" + +/** + * + */ +UCLASS(Blueprintable) +class OPENPYPE_API UAssetContainer : public UAssetUserData +{ + GENERATED_BODY() + +public: + + UAssetContainer(const FObjectInitializer& ObjectInitalizer); + // ~UAssetContainer(); + + UPROPERTY(EditAnywhere, BlueprintReadOnly) + TArray assets; + + // There seems to be no reflection option to expose array of FAssetData + /* + UPROPERTY(Transient, BlueprintReadOnly, Category = "Python", meta=(DisplayName="Assets Data")) + TArray assetsData; + */ +private: + TArray assetsData; + void OnAssetAdded(const FAssetData& AssetData); + void OnAssetRemoved(const FAssetData& AssetData); + void OnAssetRenamed(const FAssetData& AssetData, const FString& str); +}; + + diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h new file mode 100644 index 0000000000..331ce6bb50 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/AssetContainerFactory.h @@ -0,0 +1,21 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#pragma once + +#include "CoreMinimal.h" +#include "Factories/Factory.h" +#include "AssetContainerFactory.generated.h" + +/** + * + */ +UCLASS() +class OPENPYPE_API UAssetContainerFactory : public UFactory +{ + GENERATED_BODY() + +public: + UAssetContainerFactory(const FObjectInitializer& ObjectInitializer); + virtual UObject* FactoryCreateNew(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; + virtual bool ShouldShowInNewMenu() const override; +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h new file mode 100644 index 0000000000..4261476da8 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPype.h @@ -0,0 +1,25 @@ +// Copyright 1998-2019 Epic Games, Inc. All Rights Reserved. + +#pragma once + +#include "CoreMinimal.h" +#include "Modules/ModuleManager.h" + + +class FOpenPypeModule : public IModuleInterface +{ +public: + virtual void StartupModule() override; + virtual void ShutdownModule() override; + +private: + void RegisterMenus(); + void RegisterSettings(); + bool HandleSettingsSaved(); + + void MenuPopup(); + void MenuDialog(); + +private: + TSharedPtr PluginCommands; +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h new file mode 100644 index 0000000000..62ffb8de33 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeCommands.h @@ -0,0 +1,24 @@ +// Copyright Epic Games, Inc. All Rights Reserved. + +#pragma once + +#include "CoreMinimal.h" +#include "Framework/Commands/Commands.h" +#include "OpenPypeStyle.h" + +class FOpenPypeCommands : public TCommands +{ +public: + + FOpenPypeCommands() + : TCommands(TEXT("OpenPype"), NSLOCTEXT("Contexts", "OpenPype", "OpenPype Tools"), NAME_None, FOpenPypeStyle::GetStyleSetName()) + { + } + + // TCommands<> interface + virtual void RegisterCommands() override; + +public: + TSharedPtr< FUICommandInfo > OpenPypeTools; + TSharedPtr< FUICommandInfo > OpenPypeToolsDialog; +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h new file mode 100644 index 0000000000..06425c7c7d --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeLib.h @@ -0,0 +1,19 @@ +#pragma once + +#include "Engine.h" +#include "OpenPypeLib.generated.h" + + +UCLASS(Blueprintable) +class OPENPYPE_API UOpenPypeLib : public UBlueprintFunctionLibrary +{ + + GENERATED_BODY() + +public: + UFUNCTION(BlueprintCallable, Category = Python) + static bool SetFolderColor(const FString& FolderPath, const FLinearColor& FolderColor,const bool& bForceAdd); + + UFUNCTION(BlueprintCallable, Category = Python) + static TArray GetAllProperties(UClass* cls); +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h new file mode 100644 index 0000000000..146025bd6d --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstance.h @@ -0,0 +1,102 @@ +#pragma once + +#include "Engine.h" +#include "OpenPypePublishInstance.generated.h" + + +UCLASS(Blueprintable) +class OPENPYPE_API UOpenPypePublishInstance : public UPrimaryDataAsset +{ + GENERATED_UCLASS_BODY() + +public: + /** + /** + * Retrieves all the assets which are monitored by the Publish Instance (Monitors assets in the directory which is + * placed in) + * + * @return - Set of UObjects. Careful! They are returning raw pointers. Seems like an issue in UE5 + */ + UFUNCTION(BlueprintCallable, BlueprintPure) + TSet GetInternalAssets() const + { + //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. + TSet ResultSet; + + for (const auto& Asset : AssetDataInternal) + ResultSet.Add(Asset.LoadSynchronous()); + + return ResultSet; + } + + /** + * Retrieves all the assets which have been added manually by the Publish Instance + * + * @return - TSet of assets (UObjects). Careful! They are returning raw pointers. Seems like an issue in UE5 + */ + UFUNCTION(BlueprintCallable, BlueprintPure) + TSet GetExternalAssets() const + { + //For some reason it can only return Raw Pointers? Seems like an issue which they haven't fixed. + TSet ResultSet; + + for (const auto& Asset : AssetDataExternal) + ResultSet.Add(Asset.LoadSynchronous()); + + return ResultSet; + } + + /** + * Function for returning all the assets in the container combined. + * + * @return Returns all the internal and externally added assets into one set (TSet of UObjects). Careful! They are + * returning raw pointers. Seems like an issue in UE5 + * + * @attention If the bAddExternalAssets variable is false, external assets won't be included! + */ + UFUNCTION(BlueprintCallable, BlueprintPure) + TSet GetAllAssets() const + { + const TSet>& IteratedSet = bAddExternalAssets + ? AssetDataInternal.Union(AssetDataExternal) + : AssetDataInternal; + + //Create a new TSet only with raw pointers. + TSet ResultSet; + + for (auto& Asset : IteratedSet) + ResultSet.Add(Asset.LoadSynchronous()); + + return ResultSet; + } + +private: + UPROPERTY(VisibleAnywhere, Category="Assets") + TSet> AssetDataInternal; + + /** + * This property allows exposing the array to include other assets from any other directory than what it's currently + * monitoring. NOTE: that these assets have to be added manually! They are not automatically registered or added! + */ + UPROPERTY(EditAnywhere, Category = "Assets") + bool bAddExternalAssets = false; + + UPROPERTY(EditAnywhere, meta=(EditCondition="bAddExternalAssets"), Category="Assets") + TSet> AssetDataExternal; + + + void OnAssetCreated(const FAssetData& InAssetData); + void OnAssetRemoved(const FAssetData& InAssetData); + void OnAssetUpdated(const FAssetData& InAssetData); + + bool IsUnderSameDir(const UObject* InAsset) const; + +#ifdef WITH_EDITOR + + void ColorOpenPypeDirs(); + + void SendNotification(const FString& Text) const; + virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override; + +#endif +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h new file mode 100644 index 0000000000..7d2c77fe6e --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePublishInstanceFactory.h @@ -0,0 +1,19 @@ +#pragma once + +#include "CoreMinimal.h" +#include "Factories/Factory.h" +#include "OpenPypePublishInstanceFactory.generated.h" + +/** + * + */ +UCLASS() +class OPENPYPE_API UOpenPypePublishInstanceFactory : public UFactory +{ + GENERATED_BODY() + +public: + UOpenPypePublishInstanceFactory(const FObjectInitializer& ObjectInitializer); + virtual UObject* FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) override; + virtual bool ShouldShowInNewMenu() const override; +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h new file mode 100644 index 0000000000..692aab2e5e --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypePythonBridge.h @@ -0,0 +1,20 @@ +#pragma once +#include "Engine.h" +#include "OpenPypePythonBridge.generated.h" + +UCLASS(Blueprintable) +class UOpenPypePythonBridge : public UObject +{ + GENERATED_BODY() + +public: + UFUNCTION(BlueprintCallable, Category = Python) + static UOpenPypePythonBridge* Get(); + + UFUNCTION(BlueprintImplementableEvent, Category = Python) + void RunInPython_Popup() const; + + UFUNCTION(BlueprintImplementableEvent, Category = Python) + void RunInPython_Dialog() const; + +}; diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeSettings.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeSettings.h new file mode 100644 index 0000000000..aca80946bb --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeSettings.h @@ -0,0 +1,32 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#pragma once + +#include "CoreMinimal.h" +#include "UObject/Object.h" +#include "OpenPypeSettings.generated.h" + +#define OPENPYPE_SETTINGS_FILEPATH IPluginManager::Get().FindPlugin("OpenPype")->GetBaseDir() / TEXT("Config") / TEXT("DefaultOpenPypeSettings.ini") + +UCLASS(Config=OpenPypeSettings, DefaultConfig) +class OPENPYPE_API UOpenPypeSettings : public UObject +{ + GENERATED_UCLASS_BODY() + + UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) + FColor GetFolderFColor() const + { + return FolderColor; + } + + UFUNCTION(BlueprintCallable, BlueprintPure, Category = Settings) + FLinearColor GetFolderFLinearColor() const + { + return FLinearColor(FolderColor); + } + +protected: + + UPROPERTY(config, EditAnywhere, Category = Folders) + FColor FolderColor = FColor(25,45,223); +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h new file mode 100644 index 0000000000..ae704251e1 --- /dev/null +++ b/openpype/hosts/unreal/integration/UE_5.0/Source/OpenPype/Public/OpenPypeStyle.h @@ -0,0 +1,18 @@ +#pragma once +#include "CoreMinimal.h" +#include "Styling/SlateStyle.h" + +class FOpenPypeStyle +{ +public: + static void Initialize(); + static void Shutdown(); + static void ReloadTextures(); + static const ISlateStyle& Get(); + static FName GetStyleSetName(); + + +private: + static TSharedRef< class FSlateStyleSet > Create(); + static TSharedPtr< class FSlateStyleSet > OpenPypeStyleInstance; +}; \ No newline at end of file diff --git a/openpype/hosts/unreal/lib.py b/openpype/hosts/unreal/lib.py index d4a776e892..095f5e414b 100644 --- a/openpype/hosts/unreal/lib.py +++ b/openpype/hosts/unreal/lib.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """Unreal launching and project tools.""" -import sys + import os import platform import json @@ -9,7 +9,7 @@ import subprocess import re from pathlib import Path from collections import OrderedDict -from openpype.api import get_project_settings +from openpype.settings import get_project_settings def get_engine_versions(env=None): @@ -50,7 +50,10 @@ def get_engine_versions(env=None): # environment variable not set pass except OSError: - # specified directory doesn't exists + # specified directory doesn't exist + pass + except StopIteration: + # specified directory doesn't exist pass # if we've got something, terminate auto-detection process @@ -70,19 +73,22 @@ def get_engine_versions(env=None): return OrderedDict() -def get_editor_executable_path(engine_path: Path) -> Path: - """Get UE4 Editor executable path.""" - ue4_path = engine_path / "Engine/Binaries" +def get_editor_executable_path(engine_path: Path, engine_version: str) -> Path: + """Get UE Editor executable path.""" + ue_path = engine_path / "Engine/Binaries" if platform.system().lower() == "windows": - ue4_path /= "Win64/UE4Editor.exe" + if engine_version.split(".")[0] == "4": + ue_path /= "Win64/UE4Editor.exe" + elif engine_version.split(".")[0] == "5": + ue_path /= "Win64/UnrealEditor.exe" elif platform.system().lower() == "linux": - ue4_path /= "Linux/UE4Editor" + ue_path /= "Linux/UE4Editor" elif platform.system().lower() == "darwin": - ue4_path /= "Mac/UE4Editor" + ue_path /= "Mac/UE4Editor" - return ue4_path + return ue_path def _win_get_engine_versions(): @@ -208,22 +214,26 @@ def create_unreal_project(project_name: str, # created in different UE4 version. When user convert such project # to his UE4 version, Engine ID is replaced in uproject file. If some # other user tries to open it, it will present him with similar error. - ue4_modules = Path() + ue_modules = Path() if platform.system().lower() == "windows": - ue4_modules = Path(os.path.join(engine_path, "Engine", "Binaries", - "Win64", "UE4Editor.modules")) + ue_modules_path = engine_path / "Engine/Binaries/Win64" + if ue_version.split(".")[0] == "4": + ue_modules_path /= "UE4Editor.modules" + elif ue_version.split(".")[0] == "5": + ue_modules_path /= "UnrealEditor.modules" + ue_modules = Path(ue_modules_path) if platform.system().lower() == "linux": - ue4_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", "Linux", "UE4Editor.modules")) if platform.system().lower() == "darwin": - ue4_modules = Path(os.path.join(engine_path, "Engine", "Binaries", + ue_modules = Path(os.path.join(engine_path, "Engine", "Binaries", "Mac", "UE4Editor.modules")) - if ue4_modules.exists(): + if ue_modules.exists(): print("--- Loading Engine ID from modules file ...") - with open(ue4_modules, "r") as mp: + with open(ue_modules, "r") as mp: loaded_modules = json.load(mp) if loaded_modules.get("BuildId"): @@ -254,6 +264,7 @@ def create_unreal_project(project_name: str, {"Name": "PythonScriptPlugin", "Enabled": True}, {"Name": "EditorScriptingUtilities", "Enabled": True}, {"Name": "SequencerScripting", "Enabled": True}, + {"Name": "MovieRenderPipeline", "Enabled": True}, {"Name": "OpenPype", "Enabled": True} ] } @@ -279,7 +290,7 @@ def create_unreal_project(project_name: str, python_path = None if platform.system().lower() == "windows": python_path = engine_path / ("Engine/Binaries/ThirdParty/" - "Python3/Win64/pythonw.exe") + "Python3/Win64/python.exe") if platform.system().lower() == "linux": python_path = engine_path / ("Engine/Binaries/ThirdParty/" @@ -293,14 +304,15 @@ def create_unreal_project(project_name: str, raise NotImplementedError("Unsupported platform") if not python_path.exists(): raise RuntimeError(f"Unreal Python not found at {python_path}") - subprocess.run( + subprocess.check_call( [python_path.as_posix(), "-m", "pip", "install", "pyside2"]) if dev_mode or preset["dev_mode"]: - _prepare_cpp_project(project_file, engine_path) + _prepare_cpp_project(project_file, engine_path, ue_version) -def _prepare_cpp_project(project_file: Path, engine_path: Path) -> None: +def _prepare_cpp_project( + project_file: Path, engine_path: Path, ue_version: str) -> None: """Prepare CPP Unreal Project. This function will add source files needed for project to be @@ -419,8 +431,12 @@ class {1}_API A{0}GameModeBase : public AGameModeBase with open(sources_dir / f"{project_name}GameModeBase.h", mode="w") as f: f.write(game_mode_h) - u_build_tool = Path( - engine_path / "Engine/Binaries/DotNET/UnrealBuildTool.exe") + u_build_tool_path = engine_path / "Engine/Binaries/DotNET" + if ue_version.split(".")[0] == "4": + u_build_tool_path /= "UnrealBuildTool.exe" + elif ue_version.split(".")[0] == "5": + u_build_tool_path /= "UnrealBuildTool/UnrealBuildTool.exe" + u_build_tool = Path(u_build_tool_path) u_header_tool = None arch = "Win64" diff --git a/openpype/hosts/unreal/plugins/create/create_camera.py b/openpype/hosts/unreal/plugins/create/create_camera.py index c2905fb6dd..bf1489d688 100644 --- a/openpype/hosts/unreal/plugins/create/create_camera.py +++ b/openpype/hosts/unreal/plugins/create/create_camera.py @@ -2,13 +2,11 @@ import unreal from unreal import EditorAssetLibrary as eal from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api.plugin import Creator -from avalon.unreal import ( - instantiate, -) +from openpype.hosts.unreal.api.pipeline import instantiate +from openpype.pipeline import LegacyCreator -class CreateCamera(Creator): +class CreateCamera(LegacyCreator): """Layout output for character rigs""" name = "layoutMain" diff --git a/openpype/hosts/unreal/plugins/create/create_layout.py b/openpype/hosts/unreal/plugins/create/create_layout.py index 00e83cf433..c1067b00d9 100644 --- a/openpype/hosts/unreal/plugins/create/create_layout.py +++ b/openpype/hosts/unreal/plugins/create/create_layout.py @@ -1,12 +1,11 @@ # -*- coding: utf-8 -*- -from unreal import EditorLevelLibrary as ell -from openpype.hosts.unreal.api.plugin import Creator -from avalon.unreal import ( - instantiate, -) +from unreal import EditorLevelLibrary + +from openpype.pipeline import LegacyCreator +from openpype.hosts.unreal.api.pipeline import instantiate -class CreateLayout(Creator): +class CreateLayout(LegacyCreator): """Layout output for character rigs.""" name = "layoutMain" @@ -30,13 +29,13 @@ class CreateLayout(Creator): # sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() # selection = [a.get_path_name() for a in sel_objects] - data["level"] = ell.get_editor_world().get_path_name() + data["level"] = EditorLevelLibrary.get_editor_world().get_path_name() data["members"] = [] if (self.options or {}).get("useSelection"): # Set as members the selected actors - for actor in ell.get_selected_level_actors(): + for actor in EditorLevelLibrary.get_selected_level_actors(): data["members"].append("{}.{}".format( actor.get_outer().get_name(), actor.get_name())) diff --git a/openpype/hosts/unreal/plugins/create/create_look.py b/openpype/hosts/unreal/plugins/create/create_look.py index 59c40d3e74..4abf3f6095 100644 --- a/openpype/hosts/unreal/plugins/create/create_look.py +++ b/openpype/hosts/unreal/plugins/create/create_look.py @@ -1,11 +1,11 @@ # -*- coding: utf-8 -*- """Create look in Unreal.""" import unreal # noqa -from openpype.hosts.unreal.api.plugin import Creator -from openpype.hosts.unreal.api import pipeline +from openpype.hosts.unreal.api import pipeline, plugin +from openpype.pipeline import LegacyCreator -class CreateLook(Creator): +class CreateLook(LegacyCreator): """Shader connections defining shape look.""" name = "unrealLook" diff --git a/openpype/hosts/unreal/plugins/create/create_render.py b/openpype/hosts/unreal/plugins/create/create_render.py new file mode 100644 index 0000000000..a85d17421b --- /dev/null +++ b/openpype/hosts/unreal/plugins/create/create_render.py @@ -0,0 +1,117 @@ +import unreal + +from openpype.hosts.unreal.api import pipeline +from openpype.pipeline import LegacyCreator + + +class CreateRender(LegacyCreator): + """Create instance for sequence for rendering""" + + name = "unrealRender" + label = "Unreal - Render" + family = "render" + icon = "cube" + asset_types = ["LevelSequence"] + + root = "/Game/OpenPype/PublishInstances" + suffix = "_INS" + + def process(self): + subset = self.data["subset"] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + # The asset name is the the third element of the path which contains + # the map. + # The index of the split path is 3 because the first element is an + # empty string, as the path begins with "/Content". + a = unreal.EditorUtilityLibrary.get_selected_assets()[0] + asset_name = a.get_path_name().split("/")[3] + + # Get the master sequence and the master level. + # There should be only one sequence and one level in the directory. + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"/Game/OpenPype/{asset_name}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + ms = sequences[0].get_editor_property('object_path') + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"/Game/OpenPype/{asset_name}"], + recursive_paths=False) + levels = ar.get_assets(filter) + ml = levels[0].get_editor_property('object_path') + + selection = [] + if (self.options or {}).get("useSelection"): + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [ + a.get_path_name() for a in sel_objects + if a.get_class().get_name() in self.asset_types] + else: + selection.append(self.data['sequence']) + + unreal.log(f"selection: {selection}") + + path = f"{self.root}" + unreal.EditorAssetLibrary.make_directory(path) + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for a in selection: + ms_obj = ar.get_asset_by_object_path(ms).get_asset() + + seq_data = None + + if a == ms: + seq_data = { + "sequence": ms_obj, + "output": f"{ms_obj.get_name()}", + "frame_range": ( + ms_obj.get_playback_start(), ms_obj.get_playback_end()) + } + else: + seq_data_list = [{ + "sequence": ms_obj, + "output": f"{ms_obj.get_name()}", + "frame_range": ( + ms_obj.get_playback_start(), ms_obj.get_playback_end()) + }] + + for s in seq_data_list: + subscenes = pipeline.get_subsequences(s.get('sequence')) + + for ss in subscenes: + curr_data = { + "sequence": ss.get_sequence(), + "output": (f"{s.get('output')}/" + f"{ss.get_sequence().get_name()}"), + "frame_range": ( + ss.get_start_frame(), ss.get_end_frame() - 1) + } + + if ss.get_sequence().get_path_name() == a: + seq_data = curr_data + break + seq_data_list.append(curr_data) + + if seq_data is not None: + break + + if not seq_data: + continue + + d = self.data.copy() + d["members"] = [a] + d["sequence"] = a + d["master_sequence"] = ms + d["master_level"] = ml + d["output"] = seq_data.get('output') + d["frameStart"] = seq_data.get('frame_range')[0] + d["frameEnd"] = seq_data.get('frame_range')[1] + + container_name = f"{subset}{self.suffix}" + pipeline.create_publish_instance( + instance=container_name, path=path) + pipeline.imprint(f"{path}/{container_name}", d) diff --git a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py index 700eac7366..45d517d27d 100644 --- a/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/create/create_staticmeshfbx.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- """Create Static Meshes as FBX geometry.""" import unreal # noqa -from openpype.hosts.unreal.api.plugin import Creator from openpype.hosts.unreal.api.pipeline import ( instantiate, ) +from openpype.pipeline import LegacyCreator -class CreateStaticMeshFBX(Creator): +class CreateStaticMeshFBX(LegacyCreator): """Static FBX geometry.""" name = "unrealStaticMeshMain" diff --git a/openpype/hosts/unreal/plugins/create/create_uasset.py b/openpype/hosts/unreal/plugins/create/create_uasset.py new file mode 100644 index 0000000000..ee584ac00c --- /dev/null +++ b/openpype/hosts/unreal/plugins/create/create_uasset.py @@ -0,0 +1,61 @@ +"""Create UAsset.""" +from pathlib import Path + +import unreal + +from openpype.hosts.unreal.api import pipeline +from openpype.pipeline import LegacyCreator + + +class CreateUAsset(LegacyCreator): + """UAsset.""" + + name = "UAsset" + label = "UAsset" + family = "uasset" + icon = "cube" + + root = "/Game/OpenPype" + suffix = "_INS" + + def __init__(self, *args, **kwargs): + super(CreateUAsset, self).__init__(*args, **kwargs) + + def process(self): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + subset = self.data["subset"] + path = f"{self.root}/PublishInstances/" + + unreal.EditorAssetLibrary.make_directory(path) + + selection = [] + if (self.options or {}).get("useSelection"): + sel_objects = unreal.EditorUtilityLibrary.get_selected_assets() + selection = [a.get_path_name() for a in sel_objects] + + if len(selection) != 1: + raise RuntimeError("Please select only one object.") + + obj = selection[0] + + asset = ar.get_asset_by_object_path(obj).get_asset() + sys_path = unreal.SystemLibrary.get_system_path(asset) + + if not sys_path: + raise RuntimeError( + f"{Path(obj).name} is not on the disk. Likely it needs to" + "be saved first.") + + if Path(sys_path).suffix != ".uasset": + raise RuntimeError(f"{Path(sys_path).name} is not a UAsset.") + + unreal.log("selection: {}".format(selection)) + container_name = f"{subset}{self.suffix}" + pipeline.create_publish_instance( + instance=container_name, path=path) + + data = self.data.copy() + data["members"] = selection + + pipeline.imprint(f"{path}/{container_name}", data) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py b/openpype/hosts/unreal/plugins/load/load_alembic_animation.py similarity index 72% rename from openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py rename to openpype/hosts/unreal/plugins/load/load_alembic_animation.py index b2c3889f68..496b6056ea 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_skeletalmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_alembic_animation.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""Load Skeletal Mesh alembics.""" +"""Load Alembic Animation.""" import os from openpype.pipeline import ( @@ -11,15 +11,41 @@ from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa -class SkeletalMeshAlembicLoader(plugin.Loader): +class AnimationAlembicLoader(plugin.Loader): """Load Unreal SkeletalMesh from Alembic""" - families = ["pointcache"] - label = "Import Alembic Skeletal Mesh" + families = ["animation"] + label = "Import Alembic Animation" representations = ["abc"] icon = "cube" color = "orange" + def get_task(self, filename, asset_dir, asset_name, replace): + task = unreal.AssetImportTask() + options = unreal.AbcImportSettings() + sm_settings = unreal.AbcStaticMeshSettings() + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, -1.0]) + + task.set_editor_property('filename', filename) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', replace) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + options.set_editor_property( + 'import_type', unreal.AlembicImportType.SKELETAL) + + options.static_mesh_settings = sm_settings + options.conversion_settings = conversion_settings + task.options = options + + return task + def load(self, context, name, namespace, data): """Load and containerise representation into Content Browser. @@ -50,36 +76,25 @@ class SkeletalMeshAlembicLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() + task = self.get_task(self.fname, asset_dir, asset_name, False) - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) + asset_tools = unreal.AssetToolsHelpers.get_asset_tools() + asset_tools.import_asset_tasks([task]) - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.SKELETAL) - - task.options = options - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", @@ -110,27 +125,14 @@ class SkeletalMeshAlembicLoader(plugin.Loader): source_path = get_representation_path(representation) destination_path = container["namespace"] - task = unreal.AssetImportTask() + task = self.get_task(source_path, destination_path, name, True) - task.set_editor_property('filename', source_path) - task.set_editor_property('destination_path', destination_path) - # strip suffix - task.set_editor_property('destination_name', name) - task.set_editor_property('replace_existing', True) - task.set_editor_property('automated', True) - task.set_editor_property('save', True) - - # set import options here - # Unreal 4.24 ignores the settings. It works with Unreal 4.26 - options = unreal.AbcImportSettings() - options.set_editor_property( - 'import_type', unreal.AlembicImportType.SKELETAL) - - task.options = options # do import fbx and replace existing data - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) - container_path = "{}/{}".format(container["namespace"], - container["objectName"]) + asset_tools = unreal.AssetToolsHelpers.get_asset_tools() + asset_tools.import_asset_tasks([task]) + + container_path = f"{container['namespace']}/{container['objectName']}" + # update metadata unreal_pipeline.imprint( container_path, diff --git a/openpype/hosts/unreal/plugins/load/load_animation.py b/openpype/hosts/unreal/plugins/load/load_animation.py index c9a1633031..1fe0bef462 100644 --- a/openpype/hosts/unreal/plugins/load/load_animation.py +++ b/openpype/hosts/unreal/plugins/load/load_animation.py @@ -3,13 +3,18 @@ import os import json +import unreal +from unreal import EditorAssetLibrary +from unreal import MovieSceneSkeletalAnimationTrack +from unreal import MovieSceneSkeletalAnimationSection + +from openpype.pipeline.context_tools import get_current_project_asset from openpype.pipeline import ( get_representation_path, AVALON_CONTAINER_ID ) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline -import unreal # noqa class AnimationFBXLoader(plugin.Loader): @@ -21,59 +26,13 @@ class AnimationFBXLoader(plugin.Loader): icon = "cube" color = "orange" - def load(self, context, name, namespace, options=None): - """ - Load and containerise representation into Content Browser. - - This is two step process. First, import FBX to temporary path and - then call `containerise()` on it - this moves all content to new - directory and then it will create AssetContainer there and imprint it - with metadata. This will mark this path as container. - - Args: - context (dict): application context - name (str): subset name - namespace (str): in Unreal this is basically path to container. - This is not passed here, so namespace is set - by `containerise()` because only then we know - real path. - data (dict): Those would be data to be imprinted. This is not used - now, data are imprinted by `containerise()`. - - Returns: - list(str): list of container content - - """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" - asset = context.get('asset').get('name') - suffix = "_CON" - if asset: - asset_name = "{}_{}".format(asset, name) - else: - asset_name = "{}".format(name) - - tools = unreal.AssetToolsHelpers().get_asset_tools() - asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") - - container_name += suffix - - unreal.EditorAssetLibrary.make_directory(asset_dir) - + def _process(self, asset_dir, asset_name, instance_name): automated = False actor = None task = unreal.AssetImportTask() task.options = unreal.FbxImportUI() - lib_path = self.fname.replace("fbx", "json") - - with open(lib_path, "r") as fp: - data = json.load(fp) - - instance_name = data.get("instance_name") - if instance_name: automated = True # Old method to get the actor @@ -94,6 +53,8 @@ class AnimationFBXLoader(plugin.Loader): if not actor: return None + asset_doc = get_current_project_asset(fields=["data.fps"]) + task.set_editor_property('filename', self.fname) task.set_editor_property('destination_path', asset_dir) task.set_editor_property('destination_name', asset_name) @@ -119,18 +80,144 @@ class AnimationFBXLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'import_meshes_in_bone_hierarchy', False) task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', True) + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( 'import_bone_tracks', True) task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', True) + 'remove_redundant_keys', False) task.options.anim_sequence_import_data.set_editor_property( 'convert_scene', True) unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + asset_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + animation = None + + for a in asset_content: + imported_asset_data = EditorAssetLibrary.find_asset_data(a) + imported_asset = unreal.AssetRegistryHelpers.get_asset( + imported_asset_data) + if imported_asset.__class__ == unreal.AnimSequence: + animation = imported_asset + break + + if animation: + animation.set_editor_property('enable_root_motion', True) + actor.skeletal_mesh_component.set_editor_property( + 'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE) + actor.skeletal_mesh_component.animation_data.set_editor_property( + 'anim_to_play', animation) + + return animation + + def load(self, context, name, namespace, options=None): + """ + Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + # Create directory for asset and avalon container + hierarchy = context.get('asset').get('data').get('parents') + root = "/Game/OpenPype" + asset = context.get('asset').get('name') + suffix = "_CON" + asset_name = f"{asset}_{name}" if asset else f"{name}" + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + f"{root}/Animations/{asset}/{name}", suffix="") + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{hierarchy[0]}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_editor_property('object_path') + + hierarchy_dir = root + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_dir = f"{hierarchy_dir}/{asset}" + + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{hierarchy_dir}/"], + recursive_paths=True) + levels = ar.get_assets(_filter) + level = levels[0].get_editor_property('object_path') + + unreal.EditorLevelLibrary.save_all_dirty_levels() + unreal.EditorLevelLibrary.load_level(level) + + container_name += suffix + + EditorAssetLibrary.make_directory(asset_dir) + + libpath = self.fname.replace("fbx", "json") + + with open(libpath, "r") as fp: + data = json.load(fp) + + instance_name = data.get("instance_name") + + animation = self._process(asset_dir, asset_name, instance_name) + + asset_content = EditorAssetLibrary.list_assets( + hierarchy_dir, recursive=True, include_folder=False) + + # Get the sequence for the layout, excluding the camera one. + sequences = [a for a in asset_content + if (EditorAssetLibrary.find_asset_data(a).get_class() == + unreal.LevelSequence.static_class() and + "_camera" not in a.split("/")[-1])] + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for s in sequences: + sequence = ar.get_asset_by_object_path(s).get_asset() + possessables = [ + p for p in sequence.get_possessables() + if p.get_display_name() == instance_name] + + for p in possessables: + tracks = [ + t for t in p.get_tracks() + if (t.get_class() == + MovieSceneSkeletalAnimationTrack.static_class())] + + for t in tracks: + sections = [ + s for s in t.get_sections() + if (s.get_class() == + MovieSceneSkeletalAnimationSection.static_class())] + + for s in sections: + s.params.set_editor_property('animation', animation) + # Create Asset Container unreal_pipeline.create_container( container=container_name, path=asset_dir) @@ -147,36 +234,21 @@ class AnimationFBXLoader(plugin.Loader): "parent": context["representation"]["parent"], "family": context["representation"]["context"]["family"] } - unreal_pipeline.imprint( - "{}/{}".format(asset_dir, container_name), data) + unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data) - asset_content = unreal.EditorAssetLibrary.list_assets( - asset_dir, recursive=True, include_folder=True - ) + imported_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=False) - animation = None + for a in imported_content: + EditorAssetLibrary.save_asset(a) - for a in asset_content: - unreal.EditorAssetLibrary.save_asset(a) - imported_asset_data = unreal.EditorAssetLibrary.find_asset_data(a) - imported_asset = unreal.AssetRegistryHelpers.get_asset( - imported_asset_data) - if imported_asset.__class__ == unreal.AnimSequence: - animation = imported_asset - break - - if animation: - animation.set_editor_property('enable_root_motion', True) - actor.skeletal_mesh_component.set_editor_property( - 'animation_mode', unreal.AnimationMode.ANIMATION_SINGLE_NODE) - actor.skeletal_mesh_component.animation_data.set_editor_property( - 'anim_to_play', animation) - - return asset_content + unreal.EditorLevelLibrary.save_current_level() + unreal.EditorLevelLibrary.load_level(master_level) def update(self, container, representation): name = container["asset_name"] source_path = get_representation_path(representation) + asset_doc = get_current_project_asset(fields=["data.fps"]) destination_path = container["namespace"] task = unreal.AssetImportTask() @@ -208,25 +280,26 @@ class AnimationFBXLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'import_meshes_in_bone_hierarchy', False) task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', True) + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( 'import_bone_tracks', True) task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', True) + 'remove_redundant_keys', False) task.options.anim_sequence_import_data.set_editor_property( 'convert_scene', True) - skeletal_mesh = unreal.EditorAssetLibrary.load_asset( + skeletal_mesh = EditorAssetLibrary.load_asset( container.get('namespace') + "/" + container.get('asset_name')) skeleton = skeletal_mesh.get_editor_property('skeleton') task.options.set_editor_property('skeleton', skeleton) # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) - container_path = "{}/{}".format(container["namespace"], - container["objectName"]) + container_path = f'{container["namespace"]}/{container["objectName"]}' # update metadata unreal_pipeline.imprint( container_path, @@ -235,22 +308,22 @@ class AnimationFBXLoader(plugin.Loader): "parent": str(representation["parent"]) }) - asset_content = unreal.EditorAssetLibrary.list_assets( + asset_content = EditorAssetLibrary.list_assets( destination_path, recursive=True, include_folder=True ) for a in asset_content: - unreal.EditorAssetLibrary.save_asset(a) + EditorAssetLibrary.save_asset(a) def remove(self, container): path = container["namespace"] parent_path = os.path.dirname(path) - unreal.EditorAssetLibrary.delete_directory(path) + EditorAssetLibrary.delete_directory(path) - asset_content = unreal.EditorAssetLibrary.list_assets( + asset_content = EditorAssetLibrary.list_assets( parent_path, recursive=False, include_folder=True ) if len(asset_content) == 0: - unreal.EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_camera.py b/openpype/hosts/unreal/plugins/load/load_camera.py index 40bca0b0c7..ca6b0ce736 100644 --- a/openpype/hosts/unreal/plugins/load/load_camera.py +++ b/openpype/hosts/unreal/plugins/load/load_camera.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- """Load camera from FBX.""" -import os +from pathlib import Path -from avalon import io -from openpype.pipeline import AVALON_CONTAINER_ID +import unreal +from unreal import EditorAssetLibrary +from unreal import EditorLevelLibrary +from unreal import EditorLevelUtils +from openpype.client import get_assets, get_asset_by_name +from openpype.pipeline import ( + AVALON_CONTAINER_ID, + legacy_io, +) from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline -import unreal # noqa class CameraLoader(plugin.Loader): @@ -18,6 +24,59 @@ class CameraLoader(plugin.Loader): icon = "cube" color = "orange" + def _set_sequence_hierarchy( + self, seq_i, seq_j, min_frame_j, max_frame_j + ): + tracks = seq_i.get_master_tracks() + track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + track = t + break + if not track: + track = seq_i.add_master_track(unreal.MovieSceneSubTrack) + + subscenes = track.get_sections() + subscene = None + for s in subscenes: + if s.get_editor_property('sub_sequence') == seq_j: + subscene = s + break + if not subscene: + subscene = track.add_section() + subscene.set_row_index(len(track.get_sections())) + subscene.set_editor_property('sub_sequence', seq_j) + subscene.set_range( + min_frame_j, + max_frame_j + 1) + + def _import_camera( + self, world, sequence, bindings, import_fbx_settings, import_filename + ): + ue_version = unreal.SystemLibrary.get_engine_version().split('.') + ue_major = int(ue_version[0]) + ue_minor = int(ue_version[1]) + + if ue_major == 4 and ue_minor <= 26: + unreal.SequencerTools.import_fbx( + world, + sequence, + bindings, + import_fbx_settings, + import_filename + ) + elif (ue_major == 4 and ue_minor >= 27) or ue_major == 5: + unreal.SequencerTools.import_level_sequence_fbx( + world, + sequence, + bindings, + import_fbx_settings, + import_filename + ) + else: + raise NotImplementedError( + f"Unreal version {ue_major} not supported") + def load(self, context, name, namespace, data): """ Load and containerise representation into Content Browser. @@ -41,8 +100,14 @@ class CameraLoader(plugin.Loader): list(str): list of container content """ - # Create directory for asset and OpenPype container - root = "/Game/OpenPype/Assets" + # Create directory for asset and avalon container + hierarchy = context.get('asset').get('data').get('parents') + root = "/Game/OpenPype" + hierarchy_dir = root + hierarchy_dir_list = [] + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_dir_list.append(hierarchy_dir) asset = context.get('asset').get('name') suffix = "_CON" if asset: @@ -52,10 +117,10 @@ class CameraLoader(plugin.Loader): tools = unreal.AssetToolsHelpers().get_asset_tools() + # Create a unique name for the camera directory unique_number = 1 - - if unreal.EditorAssetLibrary.does_directory_exist(f"{root}/{asset}"): - asset_content = unreal.EditorAssetLibrary.list_assets( + if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"): + asset_content = EditorAssetLibrary.list_assets( f"{root}/{asset}", recursive=False, include_folder=True ) @@ -74,42 +139,152 @@ class CameraLoader(plugin.Loader): unique_number = f_numbers[-1] + 1 asset_dir, container_name = tools.create_unique_asset_name( - f"{root}/{asset}/{name}_{unique_number:02d}", suffix="") + f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="") + + asset_path = Path(asset_dir) + asset_path_parent = str(asset_path.parent.as_posix()) container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + EditorAssetLibrary.make_directory(asset_dir) - sequence = tools.create_asset( - asset_name=asset_name, + # Create map for the shot, and create hierarchy of map. If the maps + # already exist, we will use them. + h_dir = hierarchy_dir_list[0] + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + if not EditorAssetLibrary.does_asset_exist(master_level): + EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") + + level = f"{asset_path_parent}/{asset}_map.{asset}_map" + if not EditorAssetLibrary.does_asset_exist(level): + EditorLevelLibrary.new_level(f"{asset_path_parent}/{asset}_map") + + EditorLevelLibrary.load_level(master_level) + EditorLevelUtils.add_level_to_world( + EditorLevelLibrary.get_editor_world(), + level, + unreal.LevelStreamingDynamic + ) + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(level) + + project_name = legacy_io.active_project() + # TODO refactor + # - Creationg of hierarchy should be a function in unreal integration + # - it's used in multiple loaders but must not be loader's logic + # - hard to say what is purpose of the loop + # - variables does not match their meaning + # - why scene is stored to sequences? + # - asset documents vs. elements + # - cleanup variable names in whole function + # - e.g. 'asset', 'asset_name', 'asset_data', 'asset_doc' + # - really inefficient queries of asset documents + # - existing asset in scene is considered as "with correct values" + # - variable 'elements' is modified during it's loop + # Get all the sequences in the hierarchy. It will create them, if + # they don't exist. + sequences = [] + frame_ranges = [] + i = 0 + for h in hierarchy_dir_list: + root_content = EditorAssetLibrary.list_assets( + h, recursive=False, include_folder=False) + + existing_sequences = [ + EditorAssetLibrary.find_asset_data(asset) + for asset in root_content + if EditorAssetLibrary.find_asset_data( + asset).get_class().get_name() == 'LevelSequence' + ] + + if not existing_sequences: + scene = tools.create_asset( + asset_name=hierarchy[i], + package_path=h, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + asset_data = get_asset_by_name( + project_name, + h.split('/')[-1], + fields=["_id", "data.fps"] + ) + + start_frames = [] + end_frames = [] + + elements = list(get_assets( + project_name, + parent_ids=[asset_data["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + + for e in elements: + start_frames.append(e.get('data').get('clipIn')) + end_frames.append(e.get('data').get('clipOut')) + + elements.extend(get_assets( + project_name, + parent_ids=[e["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + + min_frame = min(start_frames) + max_frame = max(end_frames) + + scene.set_display_rate( + unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) + scene.set_playback_start(min_frame) + scene.set_playback_end(max_frame) + + sequences.append(scene) + frame_ranges.append((min_frame, max_frame)) + else: + for e in existing_sequences: + sequences.append(e.get_asset()) + frame_ranges.append(( + e.get_asset().get_playback_start(), + e.get_asset().get_playback_end())) + + i += 1 + + EditorAssetLibrary.make_directory(asset_dir) + + cam_seq = tools.create_asset( + asset_name=f"{asset}_camera", package_path=asset_dir, asset_class=unreal.LevelSequence, factory=unreal.LevelSequenceFactoryNew() ) - io_asset = io.Session["AVALON_ASSET"] - asset_doc = io.find_one({ - "type": "asset", - "name": io_asset - }) + # Add sequences data to hierarchy + for i in range(0, len(sequences) - 1): + self._set_sequence_hierarchy( + sequences[i], sequences[i + 1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1]) - data = asset_doc.get("data") - - if data: - sequence.set_display_rate(unreal.FrameRate(data.get("fps"), 1.0)) - sequence.set_playback_start(data.get("frameStart")) - sequence.set_playback_end(data.get("frameEnd")) + data = get_asset_by_name(project_name, asset)["data"] + cam_seq.set_display_rate( + unreal.FrameRate(data.get("fps"), 1.0)) + cam_seq.set_playback_start(0) + cam_seq.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) + self._set_sequence_hierarchy( + sequences[-1], cam_seq, + data.get('clipIn'), data.get('clipOut')) settings = unreal.MovieSceneUserImportFBXSettings() settings.set_editor_property('reduce_keys', False) - unreal.SequencerTools.import_fbx( - unreal.EditorLevelLibrary.get_editor_world(), - sequence, - sequence.get_bindings(), - settings, - self.fname - ) + if cam_seq: + self._import_camera( + EditorLevelLibrary.get_editor_world(), + cam_seq, + cam_seq.get_bindings(), + settings, + self.fname + ) # Create Asset Container unreal_pipeline.create_container( @@ -130,81 +305,258 @@ class CameraLoader(plugin.Loader): unreal_pipeline.imprint( "{}/{}".format(asset_dir, container_name), data) - asset_content = unreal.EditorAssetLibrary.list_assets( + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(master_level) + + asset_content = EditorAssetLibrary.list_assets( asset_dir, recursive=True, include_folder=True ) for a in asset_content: - unreal.EditorAssetLibrary.save_asset(a) + EditorAssetLibrary.save_asset(a) return asset_content def update(self, container, representation): - path = container["namespace"] - ar = unreal.AssetRegistryHelpers.get_asset_registry() - tools = unreal.AssetToolsHelpers().get_asset_tools() - asset_content = unreal.EditorAssetLibrary.list_assets( - path, recursive=False, include_folder=False - ) - asset_name = "" - for a in asset_content: - asset = ar.get_asset_by_object_path(a) - if a.endswith("_CON"): - loaded_asset = unreal.EditorAssetLibrary.load_asset(a) - unreal.EditorAssetLibrary.set_metadata_tag( - loaded_asset, "representation", str(representation["_id"]) - ) - unreal.EditorAssetLibrary.set_metadata_tag( - loaded_asset, "parent", str(representation["parent"]) - ) - asset_name = unreal.EditorAssetLibrary.get_metadata_tag( - loaded_asset, "asset_name" - ) - elif asset.asset_class == "LevelSequence": - unreal.EditorAssetLibrary.delete_asset(a) + root = "/Game/OpenPype" - sequence = tools.create_asset( - asset_name=asset_name, - package_path=path, - asset_class=unreal.LevelSequence, - factory=unreal.LevelSequenceFactoryNew() + asset_dir = container.get('namespace') + + context = representation.get("context") + + hierarchy = context.get('hierarchy').split("/") + h_dir = f"{root}/{hierarchy[0]}" + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + + EditorLevelLibrary.save_current_level() + + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(filter) + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[str(Path(asset_dir).parent.as_posix())], + recursive_paths=True) + maps = ar.get_assets(filter) + + # There should be only one map in the list + EditorLevelLibrary.load_level(maps[0].get_full_name()) + + level_sequence = sequences[0].get_asset() + + display_rate = level_sequence.get_display_rate() + playback_start = level_sequence.get_playback_start() + playback_end = level_sequence.get_playback_end() + + sequence_name = f"{container.get('asset')}_camera" + + # Get the actors in the level sequence. + objs = unreal.SequencerTools.get_bound_objects( + unreal.EditorLevelLibrary.get_editor_world(), + level_sequence, + level_sequence.get_bindings(), + unreal.SequencerScriptingRange( + has_start_value=True, + has_end_value=True, + inclusive_start=level_sequence.get_playback_start(), + exclusive_end=level_sequence.get_playback_end() + ) ) - io_asset = io.Session["AVALON_ASSET"] - asset_doc = io.find_one({ - "type": "asset", - "name": io_asset - }) + # Delete actors from the map + for o in objs: + if o.bound_objects[0].get_class().get_name() == "CineCameraActor": + actor_path = o.bound_objects[0].get_path_name().split(":")[-1] + actor = EditorLevelLibrary.get_actor_reference(actor_path) + EditorLevelLibrary.destroy_actor(actor) - data = asset_doc.get("data") + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/OpenPype" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + master_sequence = sequences[0].get_asset() - if data: - sequence.set_display_rate(unreal.FrameRate(data.get("fps"), 1.0)) - sequence.set_playback_start(data.get("frameStart")) - sequence.set_playback_end(data.get("frameEnd")) + sequences = [master_sequence] + + parent = None + sub_scene = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + sub_scene = ss + # subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if parent: + break + + assert parent, "Could not find the parent sequence" + + EditorAssetLibrary.delete_asset(level_sequence.get_path_name()) settings = unreal.MovieSceneUserImportFBXSettings() settings.set_editor_property('reduce_keys', False) - unreal.SequencerTools.import_fbx( - unreal.EditorLevelLibrary.get_editor_world(), - sequence, - sequence.get_bindings(), + tools = unreal.AssetToolsHelpers().get_asset_tools() + new_sequence = tools.create_asset( + asset_name=sequence_name, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + new_sequence.set_display_rate(display_rate) + new_sequence.set_playback_start(playback_start) + new_sequence.set_playback_end(playback_end) + + sub_scene.set_sequence(new_sequence) + + self._import_camera( + EditorLevelLibrary.get_editor_world(), + new_sequence, + new_sequence.get_bindings(), settings, str(representation["data"]["path"]) ) + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container.get('container_name')), data) + + EditorLevelLibrary.save_current_level() + + asset_content = EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=False) + + for a in asset_content: + EditorAssetLibrary.save_asset(a) + + EditorLevelLibrary.load_level(master_level) + def remove(self, container): - path = container["namespace"] - parent_path = os.path.dirname(path) + path = Path(container.get("namespace")) + parent_path = str(path.parent.as_posix()) - unreal.EditorAssetLibrary.delete_directory(path) + ar = unreal.AssetRegistryHelpers.get_asset_registry() + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{str(path.as_posix())}"], + recursive_paths=False) + sequences = ar.get_assets(filter) - asset_content = unreal.EditorAssetLibrary.list_assets( + if not sequences: + raise Exception("Could not find sequence.") + + world = ar.get_asset_by_object_path( + EditorLevelLibrary.get_editor_world().get_path_name()) + + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{parent_path}"], + recursive_paths=True) + maps = ar.get_assets(filter) + + # There should be only one map in the list + if not maps: + raise Exception("Could not find map.") + + map = maps[0] + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(map.get_full_name()) + + # Remove the camera from the level. + actors = EditorLevelLibrary.get_all_level_actors() + + for a in actors: + if a.__class__ == unreal.CineCameraActor: + EditorLevelLibrary.destroy_actor(a) + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(world.get_full_name()) + + # There should be only one sequence in the path. + sequence_name = sequences[0].asset_name + + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to find + # the level sequence. + root = "/Game/OpenPype" + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(filter) + master_sequence = sequences[0].get_asset() + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + break + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if ss.get_sequence().get_name() == sequence_name: + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if parent: + break + + assert parent, "Could not find the parent sequence" + + EditorAssetLibrary.delete_directory(str(path.as_posix())) + + # Check if there isn't any more assets in the parent folder, and + # delete it if not. + asset_content = EditorAssetLibrary.list_assets( parent_path, recursive=False, include_folder=True ) if len(asset_content) == 0: - unreal.EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py b/openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py similarity index 100% rename from openpype/hosts/unreal/plugins/load/load_alembic_geometrycache.py rename to openpype/hosts/unreal/plugins/load/load_geometrycache_abc.py diff --git a/openpype/hosts/unreal/plugins/load/load_layout.py b/openpype/hosts/unreal/plugins/load/load_layout.py index 7f6ce7d822..c1d66ddf2a 100644 --- a/openpype/hosts/unreal/plugins/load/load_layout.py +++ b/openpype/hosts/unreal/plugins/load/load_layout.py @@ -1,23 +1,30 @@ # -*- coding: utf-8 -*- """Loader for layouts.""" -import os import json from pathlib import Path import unreal from unreal import EditorAssetLibrary from unreal import EditorLevelLibrary +from unreal import EditorLevelUtils from unreal import AssetToolsHelpers from unreal import FBXImportType -from unreal import MathLibrary as umath +from unreal import MovieSceneLevelVisibilityTrack +from unreal import MovieSceneSubTrack +from bson.objectid import ObjectId + +from openpype.client import get_asset_by_name, get_assets from openpype.pipeline import ( discover_loader_plugins, loaders_from_representation, load_container, get_representation_path, AVALON_CONTAINER_ID, + legacy_io, ) +from openpype.pipeline.context_tools import get_current_project_asset +from openpype.settings import get_current_project_settings from openpype.hosts.unreal.api import plugin from openpype.hosts.unreal.api import pipeline as unreal_pipeline @@ -31,7 +38,7 @@ class LayoutLoader(plugin.Loader): label = "Load Layout" icon = "code-fork" color = "orange" - ASSET_ROOT = "/Game/OpenPype/Assets" + ASSET_ROOT = "/Game/OpenPype" def _get_asset_containers(self, path): ar = unreal.AssetRegistryHelpers.get_asset_registry() @@ -86,52 +93,146 @@ class LayoutLoader(plugin.Loader): return None @staticmethod - def _process_family(assets, class_name, transform, inst_name=None): + def _set_sequence_hierarchy( + seq_i, seq_j, max_frame_i, min_frame_j, max_frame_j, map_paths + ): + # Get existing sequencer tracks or create them if they don't exist + tracks = seq_i.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == unreal.MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + unreal.MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if not subscene_track: + subscene_track = seq_i.add_master_track(unreal.MovieSceneSubTrack) + if not visibility_track: + visibility_track = seq_i.add_master_track( + unreal.MovieSceneLevelVisibilityTrack) + + # Create the sub-scene section + subscenes = subscene_track.get_sections() + subscene = None + for s in subscenes: + if s.get_editor_property('sub_sequence') == seq_j: + subscene = s + break + if not subscene: + subscene = subscene_track.add_section() + subscene.set_row_index(len(subscene_track.get_sections())) + subscene.set_editor_property('sub_sequence', seq_j) + subscene.set_range( + min_frame_j, + max_frame_j + 1) + + # Create the visibility section + ar = unreal.AssetRegistryHelpers.get_asset_registry() + maps = [] + for m in map_paths: + # Unreal requires to load the level to get the map name + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(m) + maps.append(str(ar.get_asset_by_object_path(m).asset_name)) + + vis_section = visibility_track.add_section() + index = len(visibility_track.get_sections()) + + vis_section.set_range( + min_frame_j, + max_frame_j + 1) + vis_section.set_visibility(unreal.LevelVisibility.VISIBLE) + vis_section.set_row_index(index) + vis_section.set_level_names(maps) + + if min_frame_j > 1: + hid_section = visibility_track.add_section() + hid_section.set_range( + 1, + min_frame_j) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + if max_frame_j < max_frame_i: + hid_section = visibility_track.add_section() + hid_section.set_range( + max_frame_j + 1, + max_frame_i + 1) + hid_section.set_visibility(unreal.LevelVisibility.HIDDEN) + hid_section.set_row_index(index) + hid_section.set_level_names(maps) + + def _transform_from_basis(self, transform, basis): + """Transform a transform from a basis to a new basis.""" + # Get the basis matrix + basis_matrix = unreal.Matrix( + basis[0], + basis[1], + basis[2], + basis[3] + ) + transform_matrix = unreal.Matrix( + transform[0], + transform[1], + transform[2], + transform[3] + ) + + new_transform = ( + basis_matrix.get_inverse() * transform_matrix * basis_matrix) + + return new_transform.transform() + + def _process_family( + self, assets, class_name, transform, basis, sequence, inst_name=None + ): ar = unreal.AssetRegistryHelpers.get_asset_registry() actors = [] + bindings = [] for asset in assets: obj = ar.get_asset_by_object_path(asset).get_asset() if obj.get_class().get_name() == class_name: + t = self._transform_from_basis(transform, basis) actor = EditorLevelLibrary.spawn_actor_from_object( - obj, - transform.get('translation') + obj, t.translation ) - if inst_name: - try: - # Rename method leads to crash - # actor.rename(name=inst_name) + actor.set_actor_rotation(t.rotation.rotator(), False) + actor.set_actor_scale3d(t.scale3d) - # The label works, although it make it slightly more - # complicated to check for the names, as we need to - # loop through all the actors in the level - actor.set_actor_label(inst_name) - except Exception as e: - print(e) - actor.set_actor_rotation(unreal.Rotator( - umath.radians_to_degrees( - transform.get('rotation').get('x')), - -umath.radians_to_degrees( - transform.get('rotation').get('y')), - umath.radians_to_degrees( - transform.get('rotation').get('z')), - ), False) - actor.set_actor_scale3d(transform.get('scale')) + if class_name == 'SkeletalMesh': + skm_comp = actor.get_editor_property( + 'skeletal_mesh_component') + skm_comp.set_bounds_scale(10.0) actors.append(actor) - return actors + if sequence: + binding = None + for p in sequence.get_possessables(): + if p.get_name() == actor.get_name(): + binding = p + break + + if not binding: + binding = sequence.add_possessable(actor) + + bindings.append(binding) + + return actors, bindings - @staticmethod def _import_animation( - asset_dir, path, instance_name, skeleton, actors_dict, - animation_file): + self, asset_dir, path, instance_name, skeleton, actors_dict, + animation_file, bindings_dict, sequence + ): anim_file = Path(animation_file) anim_file_name = anim_file.with_suffix('') anim_path = f"{asset_dir}/animations/{anim_file_name}" + asset_doc = get_current_project_asset() # Import animation task = unreal.AssetImportTask() task.options = unreal.FbxImportUI() @@ -164,13 +265,15 @@ class LayoutLoader(plugin.Loader): task.options.anim_sequence_import_data.set_editor_property( 'import_meshes_in_bone_hierarchy', False) task.options.anim_sequence_import_data.set_editor_property( - 'use_default_sample_rate', True) + 'use_default_sample_rate', False) + task.options.anim_sequence_import_data.set_editor_property( + 'custom_sample_rate', asset_doc.get("data", {}).get("fps")) task.options.anim_sequence_import_data.set_editor_property( 'import_custom_attribute', True) task.options.anim_sequence_import_data.set_editor_property( 'import_bone_tracks', True) task.options.anim_sequence_import_data.set_editor_property( - 'remove_redundant_keys', True) + 'remove_redundant_keys', False) task.options.anim_sequence_import_data.set_editor_property( 'convert_scene', True) @@ -205,7 +308,109 @@ class LayoutLoader(plugin.Loader): actor.skeletal_mesh_component.animation_data.set_editor_property( 'anim_to_play', animation) - def _process(self, lib_path, asset_dir, loaded=None): + if sequence: + # Add animation to the sequencer + bindings = bindings_dict.get(instance_name) + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + for binding in bindings: + tracks = binding.get_tracks() + track = None + track = tracks[0] if tracks else binding.add_track( + unreal.MovieSceneSkeletalAnimationTrack) + + sections = track.get_sections() + section = None + if not sections: + section = track.add_section() + else: + section = sections[0] + + sec_params = section.get_editor_property('params') + curr_anim = sec_params.get_editor_property('animation') + + if curr_anim: + # Checks if the animation path has a container. + # If it does, it means that the animation is + # already in the sequencer. + anim_path = str(Path( + curr_anim.get_path_name()).parent + ).replace('\\', '/') + + _filter = unreal.ARFilter( + class_names=["AssetContainer"], + package_paths=[anim_path], + recursive_paths=False) + containers = ar.get_assets(_filter) + + if len(containers) > 0: + return + + section.set_range( + sequence.get_playback_start(), + sequence.get_playback_end()) + sec_params = section.get_editor_property('params') + sec_params.set_editor_property('animation', animation) + + @staticmethod + def _generate_sequence(h, h_dir): + tools = unreal.AssetToolsHelpers().get_asset_tools() + + sequence = tools.create_asset( + asset_name=h, + package_path=h_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + project_name = legacy_io.active_project() + asset_data = get_asset_by_name( + project_name, + h_dir.split('/')[-1], + fields=["_id", "data.fps"] + ) + + start_frames = [] + end_frames = [] + + elements = list(get_assets( + project_name, + parent_ids=[asset_data["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + for e in elements: + start_frames.append(e.get('data').get('clipIn')) + end_frames.append(e.get('data').get('clipOut')) + + elements.extend(get_assets( + project_name, + parent_ids=[e["_id"]], + fields=["_id", "data.clipIn", "data.clipOut"] + )) + + min_frame = min(start_frames) + max_frame = max(end_frames) + + sequence.set_display_rate( + unreal.FrameRate(asset_data.get('data').get("fps"), 1.0)) + sequence.set_playback_start(min_frame) + sequence.set_playback_end(max_frame) + + tracks = sequence.get_master_tracks() + track = None + for t in tracks: + if (t.get_class() == + unreal.MovieSceneCameraCutTrack.static_class()): + track = t + break + if not track: + track = sequence.add_master_track( + unreal.MovieSceneCameraCutTrack) + + return sequence, (min_frame, max_frame) + + def _process(self, lib_path, asset_dir, sequence, repr_loaded=None): ar = unreal.AssetRegistryHelpers.get_asset_registry() with open(lib_path, "r") as fp: @@ -213,97 +418,142 @@ class LayoutLoader(plugin.Loader): all_loaders = discover_loader_plugins() - if not loaded: - loaded = [] + if not repr_loaded: + repr_loaded = [] path = Path(lib_path) skeleton_dict = {} actors_dict = {} + bindings_dict = {} + + loaded_assets = [] for element in data: - reference = None - if element.get('reference_fbx'): - reference = element.get('reference_fbx') + representation = None + repr_format = None + if element.get('representation'): + # representation = element.get('representation') + + self.log.info(element.get("version")) + + valid_formats = ['fbx', 'abc'] + + repr_data = legacy_io.find_one({ + "type": "representation", + "parent": ObjectId(element.get("version")), + "name": {"$in": valid_formats} + }) + repr_format = repr_data.get('name') + + if not repr_data: + self.log.error( + f"No valid representation found for version " + f"{element.get('version')}") + continue + + representation = str(repr_data.get('_id')) + print(representation) + # This is to keep compatibility with old versions of the + # json format. + elif element.get('reference_fbx'): + representation = element.get('reference_fbx') + repr_format = 'fbx' elif element.get('reference_abc'): - reference = element.get('reference_abc') + representation = element.get('reference_abc') + repr_format = 'abc' # If reference is None, this element is skipped, as it cannot be # imported in Unreal - if not reference: + if not representation: continue instance_name = element.get('instance_name') skeleton = None - if reference not in loaded: - loaded.append(reference) + if representation not in repr_loaded: + repr_loaded.append(representation) family = element.get('family') loaders = loaders_from_representation( - all_loaders, reference) + all_loaders, representation) loader = None - if reference == element.get('reference_fbx'): + if repr_format == 'fbx': loader = self._get_fbx_loader(loaders, family) - elif reference == element.get('reference_abc'): + elif repr_format == 'abc': loader = self._get_abc_loader(loaders, family) if not loader: + self.log.error( + f"No valid loader found for {representation}") continue options = { - "asset_dir": asset_dir + # "asset_dir": asset_dir } assets = load_container( loader, - reference, + representation, namespace=instance_name, options=options ) + container = None + + for asset in assets: + obj = ar.get_asset_by_object_path(asset).get_asset() + if obj.get_class().get_name() == 'AssetContainer': + container = obj + if obj.get_class().get_name() == 'Skeleton': + skeleton = obj + + loaded_assets.append(container.get_path_name()) + instances = [ item for item in data - if (item.get('reference_fbx') == reference or - item.get('reference_abc') == reference)] + if ((item.get('version') and + item.get('version') == element.get('version')) or + item.get('reference_fbx') == representation or + item.get('reference_abc') == representation)] for instance in instances: - transform = instance.get('transform') + # transform = instance.get('transform') + transform = instance.get('transform_matrix') + basis = instance.get('basis') inst = instance.get('instance_name') actors = [] if family == 'model': - actors = self._process_family( - assets, 'StaticMesh', transform, inst) + actors, _ = self._process_family( + assets, 'StaticMesh', transform, basis, + sequence, inst + ) elif family == 'rig': - actors = self._process_family( - assets, 'SkeletalMesh', transform, inst) + actors, bindings = self._process_family( + assets, 'SkeletalMesh', transform, basis, + sequence, inst + ) actors_dict[inst] = actors - - if family == 'rig': - # Finds skeleton among the imported assets - for asset in assets: - obj = ar.get_asset_by_object_path(asset).get_asset() - if obj.get_class().get_name() == 'Skeleton': - skeleton = obj - if skeleton: - break + bindings_dict[inst] = bindings if skeleton: - skeleton_dict[reference] = skeleton + skeleton_dict[representation] = skeleton else: - skeleton = skeleton_dict.get(reference) + skeleton = skeleton_dict.get(representation) animation_file = element.get('animation') if animation_file and skeleton: self._import_animation( - asset_dir, path, instance_name, skeleton, - actors_dict, animation_file) + asset_dir, path, instance_name, skeleton, actors_dict, + animation_file, bindings_dict, sequence) + + return loaded_assets @staticmethod def _remove_family(assets, components, class_name, prop_name): @@ -368,24 +618,118 @@ class LayoutLoader(plugin.Loader): Returns: list(str): list of container content """ + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] + # Create directory for asset and avalon container + hierarchy = context.get('asset').get('data').get('parents') root = self.ASSET_ROOT + hierarchy_dir = root + hierarchy_dir_list = [] + for h in hierarchy: + hierarchy_dir = f"{hierarchy_dir}/{h}" + hierarchy_dir_list.append(hierarchy_dir) asset = context.get('asset').get('name') suffix = "_CON" - if asset: - asset_name = "{}_{}".format(asset, name) - else: - asset_name = "{}".format(name) + asset_name = f"{asset}_{name}" if asset else name tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + "{}/{}/{}".format(hierarchy_dir, asset, name), suffix="") container_name += suffix EditorAssetLibrary.make_directory(asset_dir) - self._process(self.fname, asset_dir) + master_level = None + shot = None + sequences = [] + + level = f"{asset_dir}/{asset}_map.{asset}_map" + EditorLevelLibrary.new_level(f"{asset_dir}/{asset}_map") + + if create_sequences: + # Create map for the shot, and create hierarchy of map. If the + # maps already exist, we will use them. + if hierarchy: + h_dir = hierarchy_dir_list[0] + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + if not EditorAssetLibrary.does_asset_exist(master_level): + EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map") + + if master_level: + EditorLevelLibrary.load_level(master_level) + EditorLevelUtils.add_level_to_world( + EditorLevelLibrary.get_editor_world(), + level, + unreal.LevelStreamingDynamic + ) + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(level) + + # Get all the sequences in the hierarchy. It will create them, if + # they don't exist. + frame_ranges = [] + for (h_dir, h) in zip(hierarchy_dir_list, hierarchy): + root_content = EditorAssetLibrary.list_assets( + h_dir, recursive=False, include_folder=False) + + existing_sequences = [ + EditorAssetLibrary.find_asset_data(asset) + for asset in root_content + if EditorAssetLibrary.find_asset_data( + asset).get_class().get_name() == 'LevelSequence' + ] + + if not existing_sequences: + sequence, frame_range = self._generate_sequence(h, h_dir) + + sequences.append(sequence) + frame_ranges.append(frame_range) + else: + for e in existing_sequences: + sequences.append(e.get_asset()) + frame_ranges.append(( + e.get_asset().get_playback_start(), + e.get_asset().get_playback_end())) + + shot = tools.create_asset( + asset_name=asset, + package_path=asset_dir, + asset_class=unreal.LevelSequence, + factory=unreal.LevelSequenceFactoryNew() + ) + + # sequences and frame_ranges have the same length + for i in range(0, len(sequences) - 1): + self._set_sequence_hierarchy( + sequences[i], sequences[i + 1], + frame_ranges[i][1], + frame_ranges[i + 1][0], frame_ranges[i + 1][1], + [level]) + + project_name = legacy_io.active_project() + data = get_asset_by_name(project_name, asset)["data"] + shot.set_display_rate( + unreal.FrameRate(data.get("fps"), 1.0)) + shot.set_playback_start(0) + shot.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1) + if sequences: + self._set_sequence_hierarchy( + sequences[-1], shot, + frame_ranges[-1][1], + data.get('clipIn'), data.get('clipOut'), + [level]) + + EditorLevelLibrary.load_level(level) + + loaded_assets = self._process(self.fname, asset_dir, shot) + + for s in sequences: + EditorAssetLibrary.save_asset(s.get_full_name()) + + EditorLevelLibrary.save_current_level() # Create Asset Container unreal_pipeline.create_container( @@ -401,7 +745,8 @@ class LayoutLoader(plugin.Loader): "loader": str(self.__class__.__name__), "representation": context["representation"]["_id"], "parent": context["representation"]["parent"], - "family": context["representation"]["context"]["family"] + "family": context["representation"]["context"]["family"], + "loaded_assets": loaded_assets } unreal_pipeline.imprint( "{}/{}".format(asset_dir, container_name), data) @@ -412,146 +757,224 @@ class LayoutLoader(plugin.Loader): for a in asset_content: EditorAssetLibrary.save_asset(a) + if master_level: + EditorLevelLibrary.load_level(master_level) + return asset_content def update(self, container, representation): + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] + ar = unreal.AssetRegistryHelpers.get_asset_registry() + root = "/Game/OpenPype" + + asset_dir = container.get('namespace') + context = representation.get("context") + + sequence = None + master_level = None + + if create_sequences: + hierarchy = context.get('hierarchy').split("/") + h_dir = f"{root}/{hierarchy[0]}" + h_asset = hierarchy[0] + master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map" + + filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[asset_dir], + recursive_paths=False) + sequences = ar.get_assets(filter) + sequence = sequences[0].get_asset() + + prev_level = None + + if not master_level: + curr_level = unreal.LevelEditorSubsystem().get_current_level() + curr_level_path = curr_level.get_outer().get_path_name() + # If the level path does not start with "/Game/", the current + # level is a temporary, unsaved level. + if curr_level_path.startswith("/Game/"): + prev_level = curr_level_path + + # Get layout level + filter = unreal.ARFilter( + class_names=["World"], + package_paths=[asset_dir], + recursive_paths=False) + levels = ar.get_assets(filter) + + layout_level = levels[0].get_editor_property('object_path') + + EditorLevelLibrary.save_all_dirty_levels() + EditorLevelLibrary.load_level(layout_level) + + # Delete all the actors in the level + actors = unreal.EditorLevelLibrary.get_all_level_actors() + for actor in actors: + unreal.EditorLevelLibrary.destroy_actor(actor) + + if create_sequences: + EditorLevelLibrary.save_current_level() + + EditorAssetLibrary.delete_directory(f"{asset_dir}/animations/") + source_path = get_representation_path(representation) - destination_path = container["namespace"] - lib_path = Path(get_representation_path(representation)) - self._remove_actors(destination_path) + loaded_assets = self._process(source_path, asset_dir, sequence) - # Delete old animations - anim_path = f"{destination_path}/animations/" - EditorAssetLibrary.delete_directory(anim_path) - - with open(source_path, "r") as fp: - data = json.load(fp) - - references = [e.get('reference_fbx') for e in data] - asset_containers = self._get_asset_containers(destination_path) - loaded = [] - - # Delete all the assets imported with the previous version of the - # layout, if they're not in the new layout. - for asset_container in asset_containers: - if asset_container.get_editor_property( - 'asset_name') == container["objectName"]: - continue - ref = EditorAssetLibrary.get_metadata_tag( - asset_container.get_asset(), 'representation') - ppath = asset_container.get_editor_property('package_path') - - if ref not in references: - # If the asset is not in the new layout, delete it. - # Also check if the parent directory is empty, and delete that - # as well, if it is. - EditorAssetLibrary.delete_directory(ppath) - - parent = os.path.dirname(str(ppath)) - parent_content = EditorAssetLibrary.list_assets( - parent, recursive=False, include_folder=True - ) - - if len(parent_content) == 0: - EditorAssetLibrary.delete_directory(parent) - else: - # If the asset is in the new layout, search the instances in - # the JSON file, and create actors for them. - - actors_dict = {} - skeleton_dict = {} - - for element in data: - reference = element.get('reference_fbx') - instance_name = element.get('instance_name') - - skeleton = None - - if reference == ref and ref not in loaded: - loaded.append(ref) - - family = element.get('family') - - assets = EditorAssetLibrary.list_assets( - ppath, recursive=True, include_folder=False) - - instances = [ - item for item in data - if item.get('reference_fbx') == reference] - - for instance in instances: - transform = instance.get('transform') - inst = instance.get('instance_name') - - actors = [] - - if family == 'model': - actors = self._process_family( - assets, 'StaticMesh', transform, inst) - elif family == 'rig': - actors = self._process_family( - assets, 'SkeletalMesh', transform, inst) - actors_dict[inst] = actors - - if family == 'rig': - # Finds skeleton among the imported assets - for asset in assets: - obj = ar.get_asset_by_object_path( - asset).get_asset() - if obj.get_class().get_name() == 'Skeleton': - skeleton = obj - if skeleton: - break - - if skeleton: - skeleton_dict[reference] = skeleton - else: - skeleton = skeleton_dict.get(reference) - - animation_file = element.get('animation') - - if animation_file and skeleton: - self._import_animation( - destination_path, lib_path, - instance_name, skeleton, - actors_dict, animation_file) - - self._process(source_path, destination_path, loaded) - - container_path = "{}/{}".format(container["namespace"], - container["objectName"]) - # update metadata + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]), + "loaded_assets": loaded_assets + } unreal_pipeline.imprint( - container_path, - { - "representation": str(representation["_id"]), - "parent": str(representation["parent"]) - }) + "{}/{}".format(asset_dir, container.get('container_name')), data) + + EditorLevelLibrary.save_current_level() asset_content = EditorAssetLibrary.list_assets( - destination_path, recursive=True, include_folder=False) + asset_dir, recursive=True, include_folder=False) for a in asset_content: EditorAssetLibrary.save_asset(a) + if master_level: + EditorLevelLibrary.load_level(master_level) + elif prev_level: + EditorLevelLibrary.load_level(prev_level) + def remove(self, container): """ - First, destroy all actors of the assets to be removed. Then, deletes - the asset's directory. + Delete the layout. First, check if the assets loaded with the layout + are used by other layouts. If not, delete the assets. """ - path = container["namespace"] - parent_path = os.path.dirname(path) + data = get_current_project_settings() + create_sequences = data["unreal"]["level_sequences_for_layouts"] - self._remove_actors(path) + root = "/Game/OpenPype" + path = Path(container.get("namespace")) - EditorAssetLibrary.delete_directory(path) + containers = unreal_pipeline.ls() + layout_containers = [ + c for c in containers + if (c.get('asset_name') != container.get('asset_name') and + c.get('family') == "layout")] + # Check if the assets have been loaded by other layouts, and deletes + # them if they haven't. + for asset in eval(container.get('loaded_assets')): + layouts = [ + lc for lc in layout_containers + if asset in lc.get('loaded_assets')] + + if not layouts: + EditorAssetLibrary.delete_directory(str(Path(asset).parent)) + + # Delete the parent folder if there aren't any more + # layouts in it. + asset_content = EditorAssetLibrary.list_assets( + str(Path(asset).parent.parent), recursive=False, + include_folder=True + ) + + if len(asset_content) == 0: + EditorAssetLibrary.delete_directory( + str(Path(asset).parent.parent)) + + master_sequence = None + master_level = None + sequences = [] + + if create_sequences: + # Remove the Level Sequence from the parent. + # We need to traverse the hierarchy from the master sequence to + # find the level sequence. + namespace = container.get('namespace').replace(f"{root}/", "") + ms_asset = namespace.split('/')[0] + ar = unreal.AssetRegistryHelpers.get_asset_registry() + _filter = unreal.ARFilter( + class_names=["LevelSequence"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + sequences = ar.get_assets(_filter) + master_sequence = sequences[0].get_asset() + _filter = unreal.ARFilter( + class_names=["World"], + package_paths=[f"{root}/{ms_asset}"], + recursive_paths=False) + levels = ar.get_assets(_filter) + master_level = levels[0].get_editor_property('object_path') + + sequences = [master_sequence] + + parent = None + for s in sequences: + tracks = s.get_master_tracks() + subscene_track = None + visibility_track = None + for t in tracks: + if t.get_class() == MovieSceneSubTrack.static_class(): + subscene_track = t + if (t.get_class() == + MovieSceneLevelVisibilityTrack.static_class()): + visibility_track = t + if subscene_track: + sections = subscene_track.get_sections() + for ss in sections: + if (ss.get_sequence().get_name() == + container.get('asset')): + parent = s + subscene_track.remove_section(ss) + break + sequences.append(ss.get_sequence()) + # Update subscenes indexes. + i = 0 + for ss in sections: + ss.set_row_index(i) + i += 1 + + if visibility_track: + sections = visibility_track.get_sections() + for ss in sections: + if (unreal.Name(f"{container.get('asset')}_map") + in ss.get_level_names()): + visibility_track.remove_section(ss) + # Update visibility sections indexes. + i = -1 + prev_name = [] + for ss in sections: + if prev_name != ss.get_level_names(): + i += 1 + ss.set_row_index(i) + prev_name = ss.get_level_names() + if parent: + break + + assert parent, "Could not find the parent sequence" + + # Create a temporary level to delete the layout level. + EditorLevelLibrary.save_all_dirty_levels() + EditorAssetLibrary.make_directory(f"{root}/tmp") + tmp_level = f"{root}/tmp/temp_map" + if not EditorAssetLibrary.does_asset_exist(f"{tmp_level}.temp_map"): + EditorLevelLibrary.new_level(tmp_level) + else: + EditorLevelLibrary.load_level(tmp_level) + + # Delete the layout directory. + EditorAssetLibrary.delete_directory(str(path)) + + if create_sequences: + EditorLevelLibrary.load_level(master_level) + EditorAssetLibrary.delete_directory(f"{root}/tmp") + + # Delete the parent folder if there aren't any more layouts in it. asset_content = EditorAssetLibrary.list_assets( - parent_path, recursive=False, include_folder=True + str(path.parent), recursive=False, include_folder=True ) if len(asset_content) == 0: - EditorAssetLibrary.delete_directory(parent_path) + EditorAssetLibrary.delete_directory(str(path.parent)) diff --git a/openpype/hosts/unreal/plugins/load/load_layout_existing.py b/openpype/hosts/unreal/plugins/load/load_layout_existing.py new file mode 100644 index 0000000000..092b273ded --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_layout_existing.py @@ -0,0 +1,446 @@ +import json +from pathlib import Path + +import unreal +from unreal import EditorLevelLibrary + +from openpype.client import get_representations +from openpype.pipeline import ( + discover_loader_plugins, + loaders_from_representation, + load_container, + get_representation_path, + AVALON_CONTAINER_ID, + legacy_io, +) +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api import pipeline as upipeline + + +class ExistingLayoutLoader(plugin.Loader): + """ + Load Layout for an existing scene, and match the existing assets. + """ + + families = ["layout"] + representations = ["json"] + + label = "Load Layout on Existing Scene" + icon = "code-fork" + color = "orange" + ASSET_ROOT = "/Game/OpenPype" + + delete_unmatched_assets = True + + @classmethod + def apply_settings(cls, project_settings, *args, **kwargs): + super(ExistingLayoutLoader, cls).apply_settings( + project_settings, *args, **kwargs + ) + cls.delete_unmatched_assets = ( + project_settings["unreal"]["delete_unmatched_assets"] + ) + + @staticmethod + def _create_container( + asset_name, asset_dir, asset, representation, parent, family + ): + container_name = f"{asset_name}_CON" + + container = None + if not unreal.EditorAssetLibrary.does_asset_exist( + f"{asset_dir}/{container_name}" + ): + container = upipeline.create_container(container_name, asset_dir) + else: + ar = unreal.AssetRegistryHelpers.get_asset_registry() + obj = ar.get_asset_by_object_path( + f"{asset_dir}/{container_name}.{container_name}") + container = obj.get_asset() + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + # "loader": str(self.__class__.__name__), + "representation": representation, + "parent": parent, + "family": family + } + + upipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + return container.get_path_name() + + @staticmethod + def _get_current_level(): + ue_version = unreal.SystemLibrary.get_engine_version().split('.') + ue_major = ue_version[0] + + if ue_major == '4': + return EditorLevelLibrary.get_editor_world() + elif ue_major == '5': + return unreal.LevelEditorSubsystem().get_current_level() + + raise NotImplementedError( + f"Unreal version {ue_major} not supported") + + def _get_transform(self, ext, import_data, lasset): + conversion = unreal.Matrix.IDENTITY.transform() + fbx_tuning = unreal.Matrix.IDENTITY.transform() + + basis = unreal.Matrix( + lasset.get('basis')[0], + lasset.get('basis')[1], + lasset.get('basis')[2], + lasset.get('basis')[3] + ).transform() + transform = unreal.Matrix( + lasset.get('transform_matrix')[0], + lasset.get('transform_matrix')[1], + lasset.get('transform_matrix')[2], + lasset.get('transform_matrix')[3] + ).transform() + + # Check for the conversion settings. We cannot access + # the alembic conversion settings, so we assume that + # the maya ones have been applied. + if ext == '.fbx': + loc = import_data.import_translation + rot = import_data.import_rotation.to_vector() + scale = import_data.import_uniform_scale + conversion = unreal.Transform( + location=[loc.x, loc.y, loc.z], + rotation=[rot.x, rot.y, rot.z], + scale=[-scale, scale, scale] + ) + fbx_tuning = unreal.Transform( + rotation=[180.0, 0.0, 90.0], + scale=[1.0, 1.0, 1.0] + ) + elif ext == '.abc': + # This is the standard conversion settings for + # alembic files from Maya. + conversion = unreal.Transform( + location=[0.0, 0.0, 0.0], + rotation=[0.0, 0.0, 0.0], + scale=[1.0, -1.0, 1.0] + ) + + new_transform = (basis.inverse() * transform * basis) + return fbx_tuning * conversion.inverse() * new_transform + + def _spawn_actor(self, obj, lasset): + actor = EditorLevelLibrary.spawn_actor_from_object( + obj, unreal.Vector(0.0, 0.0, 0.0) + ) + + actor.set_actor_label(lasset.get('instance_name')) + smc = actor.get_editor_property('static_mesh_component') + mesh = smc.get_editor_property('static_mesh') + import_data = mesh.get_editor_property('asset_import_data') + filename = import_data.get_first_filename() + path = Path(filename) + + transform = self._get_transform( + path.suffix, import_data, lasset) + + actor.set_actor_transform(transform, False, True) + + @staticmethod + def _get_fbx_loader(loaders, family): + name = "" + if family == 'rig': + name = "SkeletalMeshFBXLoader" + elif family == 'model' or family == 'staticMesh': + name = "StaticMeshFBXLoader" + elif family == 'camera': + name = "CameraLoader" + + if name == "": + return None + + for loader in loaders: + if loader.__name__ == name: + return loader + + return None + + @staticmethod + def _get_abc_loader(loaders, family): + name = "" + if family == 'rig': + name = "SkeletalMeshAlembicLoader" + elif family == 'model': + name = "StaticMeshAlembicLoader" + + if name == "": + return None + + for loader in loaders: + if loader.__name__ == name: + return loader + + return None + + def _load_asset(self, repr_data, representation, instance_name, family): + repr_format = repr_data.get('name') + + all_loaders = discover_loader_plugins() + loaders = loaders_from_representation( + all_loaders, representation) + + loader = None + + if repr_format == 'fbx': + loader = self._get_fbx_loader(loaders, family) + elif repr_format == 'abc': + loader = self._get_abc_loader(loaders, family) + + if not loader: + self.log.error(f"No valid loader found for {representation}") + return [] + + # This option is necessary to avoid importing the assets with a + # different conversion compared to the other assets. For ABC files, + # it is in fact impossible to access the conversion settings. So, + # we must assume that the Maya conversion settings have been applied. + options = { + "default_conversion": True + } + + assets = load_container( + loader, + representation, + namespace=instance_name, + options=options + ) + + return assets + + def _get_valid_repre_docs(self, project_name, version_ids): + valid_formats = ['fbx', 'abc'] + + repre_docs = list(get_representations( + project_name, + representation_names=valid_formats, + version_ids=version_ids + )) + repre_doc_by_version_id = {} + for repre_doc in repre_docs: + version_id = str(repre_doc["parent"]) + repre_doc_by_version_id[version_id] = repre_doc + return repre_doc_by_version_id + + def _process(self, lib_path, project_name): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + actors = EditorLevelLibrary.get_all_level_actors() + + with open(lib_path, "r") as fp: + data = json.load(fp) + + elements = [] + repre_ids = set() + # Get all the representations in the JSON from the database. + for element in data: + repre_id = element.get('representation') + if repre_id: + repre_ids.add(repre_id) + elements.append(element) + + repre_docs = get_representations( + project_name, representation_ids=repre_ids + ) + repre_docs_by_id = { + str(repre_doc["_id"]): repre_doc + for repre_doc in repre_docs + } + layout_data = [] + version_ids = set() + for element in elements: + repre_id = element.get("representation") + repre_doc = repre_docs_by_id.get(repre_id) + if not repre_doc: + raise AssertionError("Representation not found") + if not (repre_doc.get('data') or repre_doc['data'].get('path')): + raise AssertionError("Representation does not have path") + if not repre_doc.get('context'): + raise AssertionError("Representation does not have context") + + layout_data.append((repre_doc, element)) + version_ids.add(repre_doc["parent"]) + + # Prequery valid repre documents for all elements at once + valid_repre_doc_by_version_id = self._get_valid_repre_docs( + project_name, version_ids) + containers = [] + actors_matched = [] + + for (repr_data, lasset) in layout_data: + # For every actor in the scene, check if it has a representation in + # those we got from the JSON. If so, create a container for it. + # Otherwise, remove it from the scene. + found = False + + for actor in actors: + if not actor.get_class().get_name() == 'StaticMeshActor': + continue + if actor in actors_matched: + continue + + # Get the original path of the file from which the asset has + # been imported. + smc = actor.get_editor_property('static_mesh_component') + mesh = smc.get_editor_property('static_mesh') + import_data = mesh.get_editor_property('asset_import_data') + filename = import_data.get_first_filename() + path = Path(filename) + + if (not path.name or + path.name not in repr_data.get('data').get('path')): + continue + + actor.set_actor_label(lasset.get('instance_name')) + + mesh_path = Path(mesh.get_path_name()).parent.as_posix() + + # Create the container for the asset. + asset = repr_data.get('context').get('asset') + subset = repr_data.get('context').get('subset') + container = self._create_container( + f"{asset}_{subset}", mesh_path, asset, + repr_data.get('_id'), repr_data.get('parent'), + repr_data.get('context').get('family') + ) + containers.append(container) + + # Set the transform for the actor. + transform = self._get_transform( + path.suffix, import_data, lasset) + actor.set_actor_transform(transform, False, True) + + actors_matched.append(actor) + found = True + break + + # If an actor has not been found for this representation, + # we check if it has been loaded already by checking all the + # loaded containers. If so, we add it to the scene. Otherwise, + # we load it. + if found: + continue + + all_containers = upipeline.ls() + + loaded = False + + for container in all_containers: + repr = container.get('representation') + + if not repr == str(repr_data.get('_id')): + continue + + asset_dir = container.get('namespace') + + filter = unreal.ARFilter( + class_names=["StaticMesh"], + package_paths=[asset_dir], + recursive_paths=False) + assets = ar.get_assets(filter) + + for asset in assets: + obj = asset.get_asset() + self._spawn_actor(obj, lasset) + + loaded = True + break + + # If the asset has not been loaded yet, we load it. + if loaded: + continue + + assets = self._load_asset( + valid_repre_doc_by_version_id.get(lasset.get('version')), + lasset.get('representation'), + lasset.get('instance_name'), + lasset.get('family') + ) + + for asset in assets: + obj = ar.get_asset_by_object_path(asset).get_asset() + if not obj.get_class().get_name() == 'StaticMesh': + continue + self._spawn_actor(obj, lasset) + + break + + # Check if an actor was not matched to a representation. + # If so, remove it from the scene. + for actor in actors: + if not actor.get_class().get_name() == 'StaticMeshActor': + continue + if actor not in actors_matched: + self.log.warning(f"Actor {actor.get_name()} not matched.") + if self.delete_unmatched_assets: + EditorLevelLibrary.destroy_actor(actor) + + return containers + + def load(self, context, name, namespace, options): + print("Loading Layout and Match Assets") + + asset = context.get('asset').get('name') + asset_name = f"{asset}_{name}" if asset else name + container_name = f"{asset}_{name}_CON" + + curr_level = self._get_current_level() + + if not curr_level: + raise AssertionError("Current level not saved") + + project_name = context["project"]["name"] + containers = self._process(self.fname, project_name) + + curr_level_path = Path( + curr_level.get_outer().get_path_name()).parent.as_posix() + + if not unreal.EditorAssetLibrary.does_asset_exist( + f"{curr_level_path}/{container_name}" + ): + upipeline.create_container( + container=container_name, path=curr_level_path) + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": curr_level_path, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"], + "loaded_assets": containers + } + upipeline.imprint(f"{curr_level_path}/{container_name}", data) + + def update(self, container, representation): + asset_dir = container.get('namespace') + + source_path = get_representation_path(representation) + project_name = legacy_io.active_project() + containers = self._process(source_path, project_name) + + data = { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]), + "loaded_assets": containers + } + upipeline.imprint( + "{}/{}".format(asset_dir, container.get('container_name')), data) diff --git a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py similarity index 79% rename from openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py rename to openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py index 5a73c72c64..e316d255e9 100644 --- a/openpype/hosts/unreal/plugins/load/load_alembic_staticmesh.py +++ b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_abc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -"""Loader for Static Mesh alembics.""" +"""Load Skeletal Mesh alembics.""" import os from openpype.pipeline import ( @@ -11,11 +11,11 @@ from openpype.hosts.unreal.api import pipeline as unreal_pipeline import unreal # noqa -class StaticMeshAlembicLoader(plugin.Loader): - """Load Unreal StaticMesh from Alembic""" +class SkeletalMeshAlembicLoader(plugin.Loader): + """Load Unreal SkeletalMesh from Alembic""" - families = ["model"] - label = "Import Alembic Static Mesh" + families = ["pointcache", "skeletalMesh"] + label = "Import Alembic Skeletal Mesh" representations = ["abc"] icon = "cube" color = "orange" @@ -24,7 +24,11 @@ class StaticMeshAlembicLoader(plugin.Loader): task = unreal.AssetImportTask() options = unreal.AbcImportSettings() sm_settings = unreal.AbcStaticMeshSettings() - conversion_settings = unreal.AbcConversionSettings() + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, 1.0]) task.set_editor_property('filename', filename) task.set_editor_property('destination_path', asset_dir) @@ -36,16 +40,7 @@ class StaticMeshAlembicLoader(plugin.Loader): # set import options here # Unreal 4.24 ignores the settings. It works with Unreal 4.26 options.set_editor_property( - 'import_type', unreal.AlembicImportType.STATIC_MESH) - - sm_settings.set_editor_property('merge_meshes', True) - - conversion_settings.set_editor_property('flip_u', False) - conversion_settings.set_editor_property('flip_v', True) - conversion_settings.set_editor_property( - 'scale', unreal.Vector(x=100.0, y=100.0, z=100.0)) - conversion_settings.set_editor_property( - 'rotation', unreal.Vector(x=-90.0, y=0.0, z=180.0)) + 'import_type', unreal.AlembicImportType.SKELETAL) options.static_mesh_settings = sm_settings options.conversion_settings = conversion_settings @@ -73,9 +68,9 @@ class StaticMeshAlembicLoader(plugin.Loader): Returns: list(str): list of container content - """ - # Create directory for asset and OpenPype container + + # Create directory for asset and openpype container root = "/Game/OpenPype/Assets" asset = context.get('asset').get('name') suffix = "_CON" @@ -83,22 +78,24 @@ class StaticMeshAlembicLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = self.get_task(self.fname, asset_dir, asset_name, False) + task = self.get_task(self.fname, asset_dir, asset_name, False) - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", @@ -133,7 +130,6 @@ class StaticMeshAlembicLoader(plugin.Loader): # do import fbx and replace existing data unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) - container_path = "{}/{}".format(container["namespace"], container["objectName"]) # update metadata diff --git a/openpype/hosts/unreal/plugins/load/load_rig.py b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py similarity index 72% rename from openpype/hosts/unreal/plugins/load/load_rig.py rename to openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py index ff844a5e94..227c5c9292 100644 --- a/openpype/hosts/unreal/plugins/load/load_rig.py +++ b/openpype/hosts/unreal/plugins/load/load_skeletalmesh_fbx.py @@ -14,7 +14,7 @@ import unreal # noqa class SkeletalMeshFBXLoader(plugin.Loader): """Load Unreal SkeletalMesh from FBX.""" - families = ["rig"] + families = ["rig", "skeletalMesh"] label = "Import FBX Skeletal Mesh" representations = ["fbx"] icon = "cube" @@ -52,54 +52,55 @@ class SkeletalMeshFBXLoader(plugin.Loader): asset_name = "{}_{}".format(asset, name) else: asset_name = "{}".format(name) + version = context.get('version').get('name') tools = unreal.AssetToolsHelpers().get_asset_tools() asset_dir, container_name = tools.create_unique_asset_name( - "{}/{}/{}".format(root, asset, name), suffix="") + f"{root}/{asset}/{name}_v{version:03d}", suffix="") container_name += suffix - unreal.EditorAssetLibrary.make_directory(asset_dir) + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) - task = unreal.AssetImportTask() + task = unreal.AssetImportTask() - task.set_editor_property('filename', self.fname) - task.set_editor_property('destination_path', asset_dir) - task.set_editor_property('destination_name', asset_name) - task.set_editor_property('replace_existing', False) - task.set_editor_property('automated', True) - task.set_editor_property('save', False) + task.set_editor_property('filename', self.fname) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', False) + task.set_editor_property('automated', True) + task.set_editor_property('save', False) - # set import options here - options = unreal.FbxImportUI() - options.set_editor_property('import_as_skeletal', True) - options.set_editor_property('import_animations', False) - options.set_editor_property('import_mesh', True) - options.set_editor_property('import_materials', True) - options.set_editor_property('import_textures', True) - options.set_editor_property('skeleton', None) - options.set_editor_property('create_physics_asset', False) + # set import options here + options = unreal.FbxImportUI() + options.set_editor_property('import_as_skeletal', True) + options.set_editor_property('import_animations', False) + options.set_editor_property('import_mesh', True) + options.set_editor_property('import_materials', False) + options.set_editor_property('import_textures', False) + options.set_editor_property('skeleton', None) + options.set_editor_property('create_physics_asset', False) - options.set_editor_property('mesh_type_to_import', - unreal.FBXImportType.FBXIT_SKELETAL_MESH) + options.set_editor_property( + 'mesh_type_to_import', + unreal.FBXImportType.FBXIT_SKELETAL_MESH) - options.skeletal_mesh_import_data.set_editor_property( - 'import_content_type', - unreal.FBXImportContentType.FBXICT_ALL - ) - # set to import normals, otherwise Unreal will compute them - # and it will take a long time, depending on the size of the mesh - options.skeletal_mesh_import_data.set_editor_property( - 'normal_import_method', - unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS - ) + options.skeletal_mesh_import_data.set_editor_property( + 'import_content_type', + unreal.FBXImportContentType.FBXICT_ALL) + # set to import normals, otherwise Unreal will compute them + # and it will take a long time, depending on the size of the mesh + options.skeletal_mesh_import_data.set_editor_property( + 'normal_import_method', + unreal.FBXNormalImportMethod.FBXNIM_IMPORT_NORMALS) - task.options = options - unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + task.options = options + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 - # Create Asset Container - unreal_pipeline.create_container( - container=container_name, path=asset_dir) + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) data = { "schema": "openpype:container-2.0", diff --git a/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py b/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py new file mode 100644 index 0000000000..c7841cef53 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_staticmesh_abc.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +"""Loader for Static Mesh alembics.""" +import os + +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api import pipeline as unreal_pipeline +import unreal # noqa + + +class StaticMeshAlembicLoader(plugin.Loader): + """Load Unreal StaticMesh from Alembic""" + + families = ["model", "staticMesh"] + label = "Import Alembic Static Mesh" + representations = ["abc"] + icon = "cube" + color = "orange" + + @staticmethod + def get_task(filename, asset_dir, asset_name, replace, default_conversion): + task = unreal.AssetImportTask() + options = unreal.AbcImportSettings() + sm_settings = unreal.AbcStaticMeshSettings() + + task.set_editor_property('filename', filename) + task.set_editor_property('destination_path', asset_dir) + task.set_editor_property('destination_name', asset_name) + task.set_editor_property('replace_existing', replace) + task.set_editor_property('automated', True) + task.set_editor_property('save', True) + + # set import options here + # Unreal 4.24 ignores the settings. It works with Unreal 4.26 + options.set_editor_property( + 'import_type', unreal.AlembicImportType.STATIC_MESH) + + sm_settings.set_editor_property('merge_meshes', True) + + if not default_conversion: + conversion_settings = unreal.AbcConversionSettings( + preset=unreal.AbcConversionPreset.CUSTOM, + flip_u=False, flip_v=False, + rotation=[0.0, 0.0, 0.0], + scale=[1.0, 1.0, 1.0]) + options.conversion_settings = conversion_settings + + options.static_mesh_settings = sm_settings + task.options = options + + return task + + def load(self, context, name, namespace, options): + """Load and containerise representation into Content Browser. + + This is two step process. First, import FBX to temporary path and + then call `containerise()` on it - this moves all content to new + directory and then it will create AssetContainer there and imprint it + with metadata. This will mark this path as container. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + data (dict): Those would be data to be imprinted. This is not used + now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + + """ + # Create directory for asset and OpenPype container + root = "/Game/OpenPype/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + version = context.get('version').get('name') + + default_conversion = False + if options.get("default_conversion"): + default_conversion = options.get("default_conversion") + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + f"{root}/{asset}/{name}_v{version:03d}", suffix="") + + container_name += suffix + + if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir): + unreal.EditorAssetLibrary.make_directory(asset_dir) + + task = self.get_task( + self.fname, asset_dir, asset_name, False, default_conversion) + + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501 + + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + name = container["asset_name"] + source_path = get_representation_path(representation) + destination_path = container["namespace"] + + task = self.get_task(source_path, destination_path, name, True) + + # do import fbx and replace existing data + unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) + + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + destination_path, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = os.path.dirname(path) + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py b/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py similarity index 99% rename from openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py rename to openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py index 282d249947..351c686095 100644 --- a/openpype/hosts/unreal/plugins/load/load_staticmeshfbx.py +++ b/openpype/hosts/unreal/plugins/load/load_staticmesh_fbx.py @@ -14,7 +14,7 @@ import unreal # noqa class StaticMeshFBXLoader(plugin.Loader): """Load Unreal StaticMesh from FBX.""" - families = ["model", "unrealStaticMesh"] + families = ["model", "staticMesh"] label = "Import FBX Static Mesh" representations = ["fbx"] icon = "cube" diff --git a/openpype/hosts/unreal/plugins/load/load_uasset.py b/openpype/hosts/unreal/plugins/load/load_uasset.py new file mode 100644 index 0000000000..eccfc7b445 --- /dev/null +++ b/openpype/hosts/unreal/plugins/load/load_uasset.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +"""Load UAsset.""" +from pathlib import Path +import shutil + +from openpype.pipeline import ( + get_representation_path, + AVALON_CONTAINER_ID +) +from openpype.hosts.unreal.api import plugin +from openpype.hosts.unreal.api import pipeline as unreal_pipeline +import unreal # noqa + + +class UAssetLoader(plugin.Loader): + """Load UAsset.""" + + families = ["uasset"] + label = "Load UAsset" + representations = ["uasset"] + icon = "cube" + color = "orange" + + def load(self, context, name, namespace, options): + """Load and containerise representation into Content Browser. + + Args: + context (dict): application context + name (str): subset name + namespace (str): in Unreal this is basically path to container. + This is not passed here, so namespace is set + by `containerise()` because only then we know + real path. + options (dict): Those would be data to be imprinted. This is not + used now, data are imprinted by `containerise()`. + + Returns: + list(str): list of container content + """ + + # Create directory for asset and OpenPype container + root = "/Game/OpenPype/Assets" + asset = context.get('asset').get('name') + suffix = "_CON" + if asset: + asset_name = "{}_{}".format(asset, name) + else: + asset_name = "{}".format(name) + + tools = unreal.AssetToolsHelpers().get_asset_tools() + asset_dir, container_name = tools.create_unique_asset_name( + "{}/{}/{}".format(root, asset, name), suffix="") + + container_name += suffix + + unreal.EditorAssetLibrary.make_directory(asset_dir) + + destination_path = asset_dir.replace( + "/Game", + Path(unreal.Paths.project_content_dir()).as_posix(), + 1) + + shutil.copy(self.fname, f"{destination_path}/{name}.uasset") + + # Create Asset Container + unreal_pipeline.create_container( + container=container_name, path=asset_dir) + + data = { + "schema": "openpype:container-2.0", + "id": AVALON_CONTAINER_ID, + "asset": asset, + "namespace": asset_dir, + "container_name": container_name, + "asset_name": asset_name, + "loader": str(self.__class__.__name__), + "representation": context["representation"]["_id"], + "parent": context["representation"]["parent"], + "family": context["representation"]["context"]["family"] + } + unreal_pipeline.imprint( + "{}/{}".format(asset_dir, container_name), data) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + return asset_content + + def update(self, container, representation): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + asset_dir = container["namespace"] + name = representation["context"]["subset"] + + destination_path = asset_dir.replace( + "/Game", + Path(unreal.Paths.project_content_dir()).as_posix(), + 1) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=False, include_folder=True + ) + + for asset in asset_content: + obj = ar.get_asset_by_object_path(asset).get_asset() + if not obj.get_class().get_name() == 'AssetContainer': + unreal.EditorAssetLibrary.delete_asset(asset) + + update_filepath = get_representation_path(representation) + + shutil.copy(update_filepath, f"{destination_path}/{name}.uasset") + + container_path = "{}/{}".format(container["namespace"], + container["objectName"]) + # update metadata + unreal_pipeline.imprint( + container_path, + { + "representation": str(representation["_id"]), + "parent": str(representation["parent"]) + }) + + asset_content = unreal.EditorAssetLibrary.list_assets( + asset_dir, recursive=True, include_folder=True + ) + + for a in asset_content: + unreal.EditorAssetLibrary.save_asset(a) + + def remove(self, container): + path = container["namespace"] + parent_path = Path(path).parent.as_posix() + + unreal.EditorAssetLibrary.delete_directory(path) + + asset_content = unreal.EditorAssetLibrary.list_assets( + parent_path, recursive=False + ) + + if len(asset_content) == 0: + unreal.EditorAssetLibrary.delete_directory(parent_path) diff --git a/openpype/hosts/unreal/plugins/publish/collect_instances.py b/openpype/hosts/unreal/plugins/publish/collect_instances.py index 94e732d728..27b711cad6 100644 --- a/openpype/hosts/unreal/plugins/publish/collect_instances.py +++ b/openpype/hosts/unreal/plugins/publish/collect_instances.py @@ -3,6 +3,8 @@ import ast import unreal # noqa import pyblish.api +from openpype.hosts.unreal.api.pipeline import UNREAL_VERSION +from openpype.pipeline.publish import KnownPublishError class CollectInstances(pyblish.api.ContextPlugin): @@ -17,14 +19,20 @@ class CollectInstances(pyblish.api.ContextPlugin): """ label = "Collect Instances" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.1 hosts = ["unreal"] def process(self, context): ar = unreal.AssetRegistryHelpers.get_asset_registry() - instance_containers = ar.get_assets_by_class( - "OpenPypePublishInstance", True) + class_name = [ + "/Script/OpenPype", + "OpenPypePublishInstance" + ] if ( + UNREAL_VERSION.major == 5 + and UNREAL_VERSION.minor > 0 + ) else "OpenPypePublishInstance" # noqa + instance_containers = ar.get_assets_by_class(class_name, True) for container_data in instance_containers: asset = container_data.get_asset() @@ -32,9 +40,8 @@ class CollectInstances(pyblish.api.ContextPlugin): data["objectName"] = container_data.asset_name # convert to strings data = {str(key): str(value) for (key, value) in data.items()} - assert data.get("family"), ( - "instance has no family" - ) + if not data.get("family"): + raise KnownPublishError("instance has no family") # content of container members = ast.literal_eval(data.get("members")) diff --git a/openpype/hosts/unreal/plugins/publish/collect_remove_marked.py b/openpype/hosts/unreal/plugins/publish/collect_remove_marked.py new file mode 100644 index 0000000000..69e69f6630 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/collect_remove_marked.py @@ -0,0 +1,24 @@ +import pyblish.api + + +class CollectRemoveMarked(pyblish.api.ContextPlugin): + """Remove marked data + + Remove instances that have 'remove' in their instance.data + + """ + + order = pyblish.api.CollectorOrder + 0.499 + label = 'Remove Marked Instances' + + def process(self, context): + + self.log.debug(context) + # make ftrack publishable + instances_to_remove = [] + for instance in context: + if instance.data.get('remove'): + instances_to_remove.append(instance) + + for instance in instances_to_remove: + context.remove(instance) diff --git a/openpype/hosts/unreal/plugins/publish/collect_render_instances.py b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py new file mode 100644 index 0000000000..cb28f4bf60 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/collect_render_instances.py @@ -0,0 +1,112 @@ +import os +from pathlib import Path + +import unreal + +from openpype.pipeline import Anatomy +from openpype.hosts.unreal.api import pipeline +import pyblish.api + + +class CollectRenderInstances(pyblish.api.InstancePlugin): + """ This collector will try to find all the rendered frames. + + """ + order = pyblish.api.CollectorOrder + hosts = ["unreal"] + families = ["render"] + label = "Collect Render Instances" + + def process(self, instance): + self.log.debug("Preparing Rendering Instances") + + context = instance.context + + data = instance.data + data['remove'] = True + + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + sequence = ar.get_asset_by_object_path( + data.get('sequence')).get_asset() + + sequences = [{ + "sequence": sequence, + "output": data.get('output'), + "frame_range": ( + data.get('frameStart'), data.get('frameEnd')) + }] + + for s in sequences: + self.log.debug(f"Processing: {s.get('sequence').get_name()}") + subscenes = pipeline.get_subsequences(s.get('sequence')) + + if subscenes: + for ss in subscenes: + sequences.append({ + "sequence": ss.get_sequence(), + "output": (f"{s.get('output')}/" + f"{ss.get_sequence().get_name()}"), + "frame_range": ( + ss.get_start_frame(), ss.get_end_frame() - 1) + }) + else: + # Avoid creating instances for camera sequences + if "_camera" not in s.get('sequence').get_name(): + seq = s.get('sequence') + seq_name = seq.get_name() + + new_instance = context.create_instance( + f"{data.get('subset')}_" + f"{seq_name}") + new_instance[:] = seq_name + + new_data = new_instance.data + + new_data["asset"] = seq_name + new_data["setMembers"] = seq_name + new_data["family"] = "render" + new_data["families"] = ["render", "review"] + new_data["parent"] = data.get("parent") + new_data["subset"] = f"{data.get('subset')}_{seq_name}" + new_data["level"] = data.get("level") + new_data["output"] = s.get('output') + new_data["fps"] = seq.get_display_rate().numerator + new_data["frameStart"] = s.get('frame_range')[0] + new_data["frameEnd"] = s.get('frame_range')[1] + new_data["sequence"] = seq.get_path_name() + new_data["master_sequence"] = data["master_sequence"] + new_data["master_level"] = data["master_level"] + + self.log.debug(f"new instance data: {new_data}") + + try: + project = os.environ.get("AVALON_PROJECT") + anatomy = Anatomy(project) + root = anatomy.roots['renders'] + except Exception: + raise Exception( + "Could not find render root in anatomy settings.") + + render_dir = f"{root}/{project}/{s.get('output')}" + render_path = Path(render_dir) + + frames = [] + + for x in render_path.iterdir(): + if x.is_file() and x.suffix == '.png': + frames.append(str(x.name)) + + if "representations" not in new_instance.data: + new_instance.data["representations"] = [] + + repr = { + 'frameStart': s.get('frame_range')[0], + 'frameEnd': s.get('frame_range')[1], + 'name': 'png', + 'ext': 'png', + 'files': frames, + 'stagingDir': render_dir, + 'tags': ['review'] + } + new_instance.data["representations"].append(repr) diff --git a/openpype/hosts/unreal/plugins/publish/extract_camera.py b/openpype/hosts/unreal/plugins/publish/extract_camera.py index ce53824563..4e37cc6a86 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_camera.py +++ b/openpype/hosts/unreal/plugins/publish/extract_camera.py @@ -6,10 +6,10 @@ import unreal from unreal import EditorAssetLibrary as eal from unreal import EditorLevelLibrary as ell -import openpype.api +from openpype.pipeline import publish -class ExtractCamera(openpype.api.Extractor): +class ExtractCamera(publish.Extractor): """Extract a camera.""" label = "Extract Camera" diff --git a/openpype/hosts/unreal/plugins/publish/extract_layout.py b/openpype/hosts/unreal/plugins/publish/extract_layout.py index f34a47b89f..cac7991f00 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_layout.py +++ b/openpype/hosts/unreal/plugins/publish/extract_layout.py @@ -3,17 +3,15 @@ import os import json import math -from bson.objectid import ObjectId - import unreal from unreal import EditorLevelLibrary as ell from unreal import EditorAssetLibrary as eal -import openpype.api -from avalon import io +from openpype.client import get_representation_by_name +from openpype.pipeline import legacy_io, publish -class ExtractLayout(openpype.api.Extractor): +class ExtractLayout(publish.Extractor): """Extract a layout.""" label = "Extract Layout" @@ -34,6 +32,7 @@ class ExtractLayout(openpype.api.Extractor): "Wrong level loaded" json_data = [] + project_name = legacy_io.active_project() for member in instance[:]: actor = ell.get_actor_reference(member) @@ -57,17 +56,13 @@ class ExtractLayout(openpype.api.Extractor): self.log.error("AssetContainer not found.") return - parent = eal.get_metadata_tag(asset_container, "parent") + parent_id = eal.get_metadata_tag(asset_container, "parent") family = eal.get_metadata_tag(asset_container, "family") - self.log.info("Parent: {}".format(parent)) - blend = io.find_one( - { - "type": "representation", - "parent": ObjectId(parent), - "name": "blend" - }, - projection={"_id": True}) + self.log.info("Parent: {}".format(parent_id)) + blend = get_representation_by_name( + project_name, "blend", parent_id, fields=["_id"] + ) blend_id = blend["_id"] json_element = {} diff --git a/openpype/hosts/unreal/plugins/publish/extract_look.py b/openpype/hosts/unreal/plugins/publish/extract_look.py index ea39949417..f999ad8651 100644 --- a/openpype/hosts/unreal/plugins/publish/extract_look.py +++ b/openpype/hosts/unreal/plugins/publish/extract_look.py @@ -5,10 +5,10 @@ import os import unreal from unreal import MaterialEditingLibrary as mat_lib -import openpype.api +from openpype.pipeline import publish -class ExtractLook(openpype.api.Extractor): +class ExtractLook(publish.Extractor): """Extract look.""" label = "Extract Look" diff --git a/openpype/hosts/unreal/plugins/publish/extract_render.py b/openpype/hosts/unreal/plugins/publish/extract_render.py new file mode 100644 index 0000000000..8ff38fbee0 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/extract_render.py @@ -0,0 +1,48 @@ +from pathlib import Path + +import unreal + +from openpype.pipeline import publish + + +class ExtractRender(publish.Extractor): + """Extract render.""" + + label = "Extract Render" + hosts = ["unreal"] + families = ["render"] + optional = True + + def process(self, instance): + # Define extract output file path + stagingdir = self.staging_dir(instance) + + # Perform extraction + self.log.info("Performing extraction..") + + # Get the render output directory + project_dir = unreal.Paths.project_dir() + render_dir = (f"{project_dir}/Saved/MovieRenders/" + f"{instance.data['subset']}") + + assert unreal.Paths.directory_exists(render_dir), \ + "Render directory does not exist" + + render_path = Path(render_dir) + + frames = [] + + for x in render_path.iterdir(): + if x.is_file() and x.suffix == '.png': + frames.append(str(x)) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + render_representation = { + 'name': 'png', + 'ext': 'png', + 'files': frames, + "stagingDir": stagingdir, + } + instance.data["representations"].append(render_representation) diff --git a/openpype/hosts/unreal/plugins/publish/extract_uasset.py b/openpype/hosts/unreal/plugins/publish/extract_uasset.py new file mode 100644 index 0000000000..89d779d368 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/extract_uasset.py @@ -0,0 +1,42 @@ +from pathlib import Path +import shutil + +import unreal + +from openpype.pipeline import publish + + +class ExtractUAsset(publish.Extractor): + """Extract a UAsset.""" + + label = "Extract UAsset" + hosts = ["unreal"] + families = ["uasset"] + optional = True + + def process(self, instance): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + + self.log.info("Performing extraction..") + + staging_dir = self.staging_dir(instance) + filename = "{}.uasset".format(instance.name) + + obj = instance[0] + + asset = ar.get_asset_by_object_path(obj).get_asset() + sys_path = unreal.SystemLibrary.get_system_path(asset) + filename = Path(sys_path).name + + shutil.copy(sys_path, staging_dir) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'uasset', + 'ext': 'uasset', + 'files': filename, + "stagingDir": staging_dir, + } + instance.data["representations"].append(representation) diff --git a/openpype/hosts/unreal/plugins/publish/validate_no_dependencies.py b/openpype/hosts/unreal/plugins/publish/validate_no_dependencies.py new file mode 100644 index 0000000000..c760129550 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/validate_no_dependencies.py @@ -0,0 +1,41 @@ +import unreal + +import pyblish.api + + +class ValidateNoDependencies(pyblish.api.InstancePlugin): + """Ensure that the uasset has no dependencies + + The uasset is checked for dependencies. If there are any, the instance + cannot be published. + """ + + order = pyblish.api.ValidatorOrder + label = "Check no dependencies" + families = ["uasset"] + hosts = ["unreal"] + optional = True + + def process(self, instance): + ar = unreal.AssetRegistryHelpers.get_asset_registry() + all_dependencies = [] + + for obj in instance[:]: + asset = ar.get_asset_by_object_path(obj) + dependencies = ar.get_dependencies( + asset.package_name, + unreal.AssetRegistryDependencyOptions( + include_soft_package_references=False, + include_hard_package_references=True, + include_searchable_names=False, + include_soft_management_references=False, + include_hard_management_references=False + )) + if dependencies: + for dep in dependencies: + if str(dep).startswith("/Game/"): + all_dependencies.append(str(dep)) + + if all_dependencies: + raise RuntimeError( + f"Dependencies found: {all_dependencies}") diff --git a/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py new file mode 100644 index 0000000000..87f1338ee8 --- /dev/null +++ b/openpype/hosts/unreal/plugins/publish/validate_sequence_frames.py @@ -0,0 +1,41 @@ +import clique + +import pyblish.api + + +class ValidateSequenceFrames(pyblish.api.InstancePlugin): + """Ensure the sequence of frames is complete + + The files found in the folder are checked against the frameStart and + frameEnd of the instance. If the first or last file is not + corresponding with the first or last frame it is flagged as invalid. + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Sequence Frames" + families = ["render"] + hosts = ["unreal"] + optional = True + + def process(self, instance): + representations = instance.data.get("representations") + for repr in representations: + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble( + repr["files"], minimum_items=1, patterns=patterns) + + assert not remainder, "Must not have remainder" + assert len(collections) == 1, "Must detect single collection" + collection = collections[0] + frames = list(collection.indexes) + + current_range = (frames[0], frames[-1]) + required_range = (instance.data["frameStart"], + instance.data["frameEnd"]) + + if current_range != required_range: + raise ValueError(f"Invalid frame range: {current_range} - " + f"expected: {required_range}") + + missing = collection.holes().indexes + assert not missing, "Missing frames: %s" % (missing,) diff --git a/openpype/hosts/webpublisher/__init__.py b/openpype/hosts/webpublisher/__init__.py index e69de29bb2..4e918c5d7d 100644 --- a/openpype/hosts/webpublisher/__init__.py +++ b/openpype/hosts/webpublisher/__init__.py @@ -0,0 +1,10 @@ +from .addon import ( + WebpublisherAddon, + WEBPUBLISHER_ROOT_DIR, +) + + +__all__ = ( + "WebpublisherAddon", + "WEBPUBLISHER_ROOT_DIR", +) diff --git a/openpype/hosts/webpublisher/addon.py b/openpype/hosts/webpublisher/addon.py new file mode 100644 index 0000000000..eb7fced2e6 --- /dev/null +++ b/openpype/hosts/webpublisher/addon.py @@ -0,0 +1,105 @@ +import os + +import click + +from openpype.modules import OpenPypeModule, IHostAddon + +WEBPUBLISHER_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class WebpublisherAddon(OpenPypeModule, IHostAddon): + name = "webpublisher" + host_name = "webpublisher" + + def initialize(self, module_settings): + self.enabled = True + + def headless_publish(self, log, close_plugin_name=None, is_test=False): + """Runs publish in a opened host with a context. + + Close Python process at the end. + """ + + from openpype.pipeline.publish.lib import remote_publish + from .lib import get_webpublish_conn, publish_and_log + + if is_test: + remote_publish(log, close_plugin_name) + return + + dbcon = get_webpublish_conn() + _id = os.environ.get("BATCH_LOG_ID") + if not _id: + log.warning("Unable to store log records, " + "batch will be unfinished!") + return + + publish_and_log( + dbcon, _id, log, close_plugin_name=close_plugin_name + ) + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group( + WebpublisherAddon.name, + help="Webpublisher related commands.") +def cli_main(): + pass + + +@cli_main.command() +@click.argument("path") +@click.option("-u", "--user", help="User email address") +@click.option("-p", "--project", help="Project") +@click.option("-t", "--targets", help="Targets", default=None, + multiple=True) +def publish(project, path, user=None, targets=None): + """Start publishing (Inner command). + + Publish collects json from paths provided as an argument. + More than one path is allowed. + """ + + from .publish_functions import cli_publish + + cli_publish(project, path, user, targets) + + +@cli_main.command() +@click.argument("path") +@click.option("-p", "--project", help="Project") +@click.option("-h", "--host", help="Host") +@click.option("-u", "--user", help="User email address") +@click.option("-t", "--targets", help="Targets", default=None, + multiple=True) +def publishfromapp(project, path, host, user=None, targets=None): + """Start publishing through application (Inner command). + + Publish collects json from paths provided as an argument. + More than one path is allowed. + """ + + from .publish_functions import cli_publish_from_app + + cli_publish_from_app(project, path, host, user, targets) + + +@cli_main.command() +@click.option("-e", "--executable", help="Executable") +@click.option("-u", "--upload_dir", help="Upload dir") +@click.option("-h", "--host", help="Host", default=None) +@click.option("-p", "--port", help="Port", default=None) +def webserver(executable, upload_dir, host=None, port=None): + """Start service for communication with Webpublish Front end. + + OP must be congigured on a machine, eg. OPENPYPE_MONGO filled AND + FTRACK_BOT_API_KEY provided with api key from Ftrack. + + Expect "pype.club" user created on Ftrack. + """ + + from .webserver_service import run_webserver + + run_webserver(executable, upload_dir, host, port) diff --git a/openpype/hosts/webpublisher/api/__init__.py b/openpype/hosts/webpublisher/api/__init__.py index dbeb628073..afea838e2c 100644 --- a/openpype/hosts/webpublisher/api/__init__.py +++ b/openpype/hosts/webpublisher/api/__init__.py @@ -1,32 +1,23 @@ import os import logging -from avalon import api as avalon -from avalon import io -from pyblish import api as pyblish -import openpype.hosts.webpublisher +import pyblish.api + +from openpype.host import HostBase +from openpype.hosts.webpublisher import WEBPUBLISHER_ROOT_DIR log = logging.getLogger("openpype.hosts.webpublisher") -HOST_DIR = os.path.dirname(os.path.abspath( - openpype.hosts.webpublisher.__file__)) -PLUGINS_DIR = os.path.join(HOST_DIR, "plugins") -PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +class WebpublisherHost(HostBase): + name = "webpublisher" -def install(): - print("Installing Pype config...") + def install(self): + print("Installing Pype config...") + pyblish.api.register_host(self.name) - pyblish.register_plugin_path(PUBLISH_PATH) - log.info(PUBLISH_PATH) - - io.install() - - -def uninstall(): - pyblish.deregister_plugin_path(PUBLISH_PATH) - - -# to have required methods for interface -def ls(): - pass + publish_plugin_dir = os.path.join( + WEBPUBLISHER_ROOT_DIR, "plugins", "publish" + ) + pyblish.api.register_plugin_path(publish_plugin_dir) + self.log.info(publish_plugin_dir) diff --git a/openpype/lib/remote_publish.py b/openpype/hosts/webpublisher/lib.py similarity index 70% rename from openpype/lib/remote_publish.py rename to openpype/hosts/webpublisher/lib.py index 9d97671a61..4bc3f1db80 100644 --- a/openpype/lib/remote_publish.py +++ b/openpype/hosts/webpublisher/lib.py @@ -1,15 +1,18 @@ import os from datetime import datetime -import sys -from bson.objectid import ObjectId import collections +import json + +from bson.objectid import ObjectId import pyblish.util import pyblish.api -from openpype import uninstall -from openpype.lib.mongo import OpenPypeMongoConnection -from openpype.lib.plugin_tools import parse_json +from openpype.client.mongo import OpenPypeMongoConnection +from openpype.settings import get_project_settings +from openpype.lib import Logger +from openpype.lib.profiles_filtering import filter_profiles +from openpype.pipeline.publish.lib import find_close_plugin ERROR_STATUS = "error" IN_PROGRESS_STATUS = "in_progress" @@ -18,21 +21,51 @@ SENT_REPROCESSING_STATUS = "sent_for_reprocessing" FINISHED_REPROCESS_STATUS = "republishing_finished" FINISHED_OK_STATUS = "finished_ok" +log = Logger.get_logger(__name__) -def headless_publish(log, close_plugin_name=None, is_test=False): - """Runs publish in a opened host with a context and closes Python process. + +def parse_json(path): + """Parses json file at 'path' location + + Returns: + (dict) or None if unparsable + Raises: + AsssertionError if 'path' doesn't exist """ - if not is_test: - dbcon = get_webpublish_conn() - _id = os.environ.get("BATCH_LOG_ID") - if not _id: - log.warning("Unable to store log records, " - "batch will be unfinished!") - return + path = path.strip('\"') + assert os.path.isfile(path), ( + "Path to json file doesn't exist. \"{}\"".format(path) + ) + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + log.error( + "Error loading json: {} - Exception: {}".format(path, exc) + ) + return data - publish_and_log(dbcon, _id, log, close_plugin_name=close_plugin_name) + +def get_batch_asset_task_info(ctx): + """Parses context data from webpublisher's batch metadata + + Returns: + (tuple): asset, task_name (Optional), task_type + """ + task_type = "default_task_type" + task_name = None + asset = None + + if ctx["type"] == "task": + items = ctx["path"].split('/') + asset = items[-2] + task_name = ctx["name"] + task_type = ctx["attributes"]["type"] else: - publish(log, close_plugin_name) + asset = ctx["name"] + + return asset, task_name, task_type def get_webpublish_conn(): @@ -61,39 +94,13 @@ def start_webpublish_log(dbcon, batch_id, user): }).inserted_id -def publish(log, close_plugin_name=None): - """Loops through all plugins, logs to console. Used for tests. - - Args: - log (OpenPypeLogger) - close_plugin_name (str): name of plugin with responsibility to - close host app - """ - # Error exit as soon as any error occurs. - error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" - - close_plugin = _get_close_plugin(close_plugin_name, log) - - for result in pyblish.util.publish_iter(): - for record in result["records"]: - log.info("{}: {}".format( - result["plugin"].label, record.msg)) - - if result["error"]: - log.error(error_format.format(**result)) - uninstall() - if close_plugin: # close host app explicitly after error - context = pyblish.api.Context() - close_plugin().process(context) - - def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): """Loops through all plugins, logs ok and fails into OP DB. Args: dbcon (OpenPypeMongoConnection) _id (str) - id of current job in DB - log (OpenPypeLogger) + log (openpype.lib.Logger) batch_id (str) - id sent from frontend close_plugin_name (str): name of plugin with responsibility to close host app @@ -102,7 +109,7 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}\n" error_format += "-" * 80 + "\n" - close_plugin = _get_close_plugin(close_plugin_name, log) + close_plugin = find_close_plugin(close_plugin_name, log) if isinstance(_id, str): _id = ObjectId(_id) @@ -118,7 +125,6 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): if result["error"]: log.error(error_format.format(**result)) - uninstall() log_lines = [error_format.format(**result)] + log_lines dbcon.update_one( {"_id": _id}, @@ -173,14 +179,12 @@ def publish_and_log(dbcon, _id, log, close_plugin_name=None, batch_id=None): ) -def fail_batch(_id, batches_in_progress, dbcon): - """Set current batch as failed as there are some stuck batches.""" - running_batches = [str(batch["_id"]) - for batch in batches_in_progress - if batch["_id"] != _id] - msg = "There are still running batches {}\n". \ - format("\n".join(running_batches)) - msg += "Ask admin to check them and reprocess current batch" +def fail_batch(_id, dbcon, msg): + """Set current batch as failed as there is some problem. + + Raises: + ValueError + """ dbcon.update_one( {"_id": _id}, {"$set": @@ -224,16 +228,6 @@ def find_variant_key(application_manager, host): return found_variant_key -def _get_close_plugin(close_plugin_name, log): - if close_plugin_name: - plugins = pyblish.api.discover() - for plugin in plugins: - if plugin.__name__ == close_plugin_name: - return plugin - - log.warning("Close plugin not found, app might not close.") - - def get_task_data(batch_dir): """Return parsed data from first task manifest.json @@ -257,3 +251,19 @@ def get_task_data(batch_dir): "Cannot parse batch meta in {} folder".format(task_data)) return task_data + + +def get_timeout(project_name, host_name, task_type): + """Returns timeout(seconds) from Setting profile.""" + filter_data = { + "task_types": task_type, + "hosts": host_name + } + timeout_profiles = (get_project_settings(project_name)["webpublisher"] + ["timeout_profiles"]) + matching_item = filter_profiles(timeout_profiles, filter_data) + timeout = 3600 + if matching_item: + timeout = matching_item["timeout"] + + return timeout diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py index ca14538d7d..eb2737b276 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_batch_data.py @@ -1,18 +1,25 @@ -"""Loads batch context from json and continues in publish process. +"""Parses batch context from json and continues in publish process. Provides: context -> Loaded batch file. + - asset + - task (task name) + - taskType + - project_name + - variant """ import os import pyblish.api -from avalon import io -from openpype.lib.plugin_tools import ( + +from openpype.pipeline import legacy_io +from openpype_modules.webpublisher.lib import ( parse_json, - get_batch_asset_task_info + get_batch_asset_task_info, + get_webpublish_conn, + IN_PROGRESS_STATUS ) -from openpype.lib.remote_publish import get_webpublish_conn, IN_PROGRESS_STATUS class CollectBatchData(pyblish.api.ContextPlugin): @@ -24,7 +31,7 @@ class CollectBatchData(pyblish.api.ContextPlugin): # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.495 label = "Collect batch data" - host = ["webpublisher"] + hosts = ["webpublisher"] def process(self, context): batch_dir = os.environ.get("OPENPYPE_PUBLISH_DATA") @@ -52,14 +59,15 @@ class CollectBatchData(pyblish.api.ContextPlugin): ) os.environ["AVALON_ASSET"] = asset_name - io.Session["AVALON_ASSET"] = asset_name + legacy_io.Session["AVALON_ASSET"] = asset_name os.environ["AVALON_TASK"] = task_name - io.Session["AVALON_TASK"] = task_name + legacy_io.Session["AVALON_TASK"] = task_name context.data["asset"] = asset_name context.data["task"] = task_name context.data["taskType"] = task_type context.data["project_name"] = project_name + context.data["variant"] = batch_data["variant"] self._set_ctx_path(batch_data) diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py index 65cef14703..79ed499a20 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_published_files.py @@ -12,18 +12,19 @@ import clique import tempfile import math -from avalon import io import pyblish.api + +from openpype.client import ( + get_asset_by_name, + get_last_version_by_subset_name +) from openpype.lib import ( prepare_template_data, - get_asset, get_ffprobe_streams, convert_ffprobe_fps_value, ) -from openpype.lib.plugin_tools import ( - parse_json, - get_subset_name_with_asset_doc -) +from openpype.pipeline.create import get_subset_name +from openpype_modules.webpublisher.lib import parse_json class CollectPublishedFiles(pyblish.api.ContextPlugin): @@ -36,15 +37,25 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): This is not applicable for 'studio' processing where host application is called to process uploaded workfile and render frames itself. + + For each task configure what properties should resulting instance have + based on uploaded files: + - uploading sequence of 'png' >> create instance of 'render' family, + by adding 'review' to 'Families' and 'Create review' to Tags it will + produce review. + + There might be difference between single(>>image) and sequence(>>render) + uploaded files. """ # must be really early, context values are only in json file order = pyblish.api.CollectorOrder - 0.490 label = "Collect rendered frames" - host = ["webpublisher"] + hosts = ["webpublisher"] targets = ["filespublish"] # from Settings task_type_to_family = [] + sync_next_version = False # find max version to be published, use for all def process(self, context): batch_dir = context.data["batchDir"] @@ -56,35 +67,52 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): self.log.info("task_sub:: {}".format(task_subfolders)) + project_name = context.data["project_name"] asset_name = context.data["asset"] - asset_doc = get_asset() + asset_doc = get_asset_by_name(project_name, asset_name) task_name = context.data["task"] task_type = context.data["taskType"] project_name = context.data["project_name"] + variant = context.data["variant"] + + next_versions = [] + instances = [] for task_dir in task_subfolders: task_data = parse_json(os.path.join(task_dir, "manifest.json")) self.log.info("task_data:: {}".format(task_data)) is_sequence = len(task_data["files"]) > 1 + first_file = task_data["files"][0] - _, extension = os.path.splitext(task_data["files"][0]) + _, extension = os.path.splitext(first_file) + extension = extension.lower() family, families, tags = self._get_family( self.task_type_to_family, task_type, is_sequence, extension.replace(".", '')) - subset_name = get_subset_name_with_asset_doc( - family, task_data["variant"], task_name, asset_doc, - project_name=project_name, host_name="webpublisher" + subset_name = get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name=project_name, + host_name="webpublisher", + project_settings=context.data["project_settings"] ) - version = self._get_last_version(asset_name, subset_name) + 1 + version = self._get_next_version( + project_name, asset_doc, subset_name + ) + next_versions.append(version) instance = context.create_instance(subset_name) instance.data["asset"] = asset_name instance.data["subset"] = subset_name + # set configurable result family instance.data["family"] = family + # set configurable additional families instance.data["families"] = families instance.data["version"] = version instance.data["stagingDir"] = tempfile.mkdtemp() @@ -108,24 +136,40 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): instance.data["representations"] = self._get_single_repre( task_dir, task_data["files"], tags ) - file_url = os.path.join(task_dir, task_data["files"][0]) - no_of_frames = self._get_number_of_frames(file_url) - if no_of_frames: + if family != 'workfile': + file_url = os.path.join(task_dir, task_data["files"][0]) try: - frame_end = int(frame_start) + math.ceil(no_of_frames) - instance.data["frameEnd"] = math.ceil(frame_end) - 1 - self.log.debug("frameEnd:: {}".format( - instance.data["frameEnd"])) - except ValueError: + no_of_frames = self._get_number_of_frames(file_url) + if no_of_frames: + frame_end = int(frame_start) + \ + math.ceil(no_of_frames) + frame_end = math.ceil(frame_end) - 1 + instance.data["frameEnd"] = frame_end + self.log.debug("frameEnd:: {}".format( + instance.data["frameEnd"])) + except Exception: self.log.warning("Unable to count frames " "duration {}".format(no_of_frames)) - # raise ValueError("STOP") instance.data["handleStart"] = asset_doc["data"]["handleStart"] instance.data["handleEnd"] = asset_doc["data"]["handleEnd"] + if "review" in tags: + first_file_path = os.path.join(task_dir, first_file) + instance.data["thumbnailSource"] = first_file_path + + instances.append(instance) self.log.info("instance.data:: {}".format(instance.data)) + if not self.sync_next_version: + return + + # overwrite specific version with same version for all + max_next_version = max(next_versions) + for inst in instances: + inst.data["version"] = max_next_version + self.log.debug("overwritten version:: {}".format(max_next_version)) + def _get_subset_name(self, family, subset_template, task_name, variant): fill_pairs = { "variant": variant, @@ -137,6 +181,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): def _get_single_repre(self, task_dir, files, tags): _, ext = os.path.splitext(files[0]) + ext = ext.lower() repre_data = { "name": ext[1:], "ext": ext[1:], @@ -156,6 +201,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): frame_start = list(collections[0].indexes)[0] frame_end = list(collections[0].indexes)[-1] ext = collections[0].tail + ext = ext.lower() repre_data = { "frameStart": frame_start, "frameEnd": frame_end, @@ -163,7 +209,7 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): "ext": ext[1:], "files": files, "stagingDir": task_dir, - "tags": tags + "tags": tags # configurable tags from Settings } self.log.info("sequences repre_data.data:: {}".format(repre_data)) return [repre_data] @@ -201,70 +247,42 @@ class CollectPublishedFiles(pyblish.api.ContextPlugin): for config in families_config: if is_sequence != config["is_sequence"]: continue - if (extension in config["extensions"] or - '' in config["extensions"]): # all extensions setting + extensions = config.get("extensions") or [] + lower_extensions = set() + for ext in extensions: + if ext: + ext = ext.lower() + if ext.startswith("."): + ext = ext[1:] + lower_extensions.add(ext) + + # all extensions setting + if not lower_extensions or extension in lower_extensions: found_family = config["result_family"] break msg = "No family found for combination of " +\ "task_type: {}, is_sequence:{}, extension: {}".format( task_type, is_sequence, extension) - found_family = "render" assert found_family, msg return (found_family, config["families"], config["tags"]) - def _get_last_version(self, asset_name, subset_name): - """Returns version number or 0 for 'asset' and 'subset'""" - query = [ - { - "$match": {"type": "asset", "name": asset_name} - }, - { - "$lookup": - { - "from": os.environ["AVALON_PROJECT"], - "localField": "_id", - "foreignField": "parent", - "as": "subsets" - } - }, - { - "$unwind": "$subsets" - }, - { - "$match": {"subsets.type": "subset", - "subsets.name": subset_name}}, - { - "$lookup": - { - "from": os.environ["AVALON_PROJECT"], - "localField": "subsets._id", - "foreignField": "parent", - "as": "versions" - } - }, - { - "$unwind": "$versions" - }, - { - "$group": { - "_id": { - "asset_name": "$name", - "subset_name": "$subsets.name" - }, - 'version': {'$max': "$versions.name"} - } - } - ] - version = list(io.aggregate(query)) + def _get_next_version(self, project_name, asset_doc, subset_name): + """Returns version number or 1 for 'asset' and 'subset'""" - if version: - return version[0].get("version") or 0 - else: - return 0 + version_doc = get_last_version_by_subset_name( + project_name, + subset_name, + asset_doc["_id"], + fields=["name"] + ) + version = 1 + if version_doc: + version += int(version_doc["name"]) + return version def _get_number_of_frames(self, file_url): """Return duration in frames""" diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py index 92f581be5f..948e86c23e 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_instances.py @@ -10,7 +10,7 @@ import re import copy import pyblish.api -from openpype.lib import get_subset_name_with_asset_doc +from openpype.pipeline.create import get_subset_name class CollectTVPaintInstances(pyblish.api.ContextPlugin): @@ -47,13 +47,14 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): new_instances = [] # Workfile instance - workfile_subset_name = get_subset_name_with_asset_doc( + workfile_subset_name = get_subset_name( self.workfile_family, self.workfile_variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) workfile_instance = self._create_workfile_instance( context, workfile_subset_name @@ -61,13 +62,14 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): new_instances.append(workfile_instance) # Review instance - review_subset_name = get_subset_name_with_asset_doc( + review_subset_name = get_subset_name( self.review_family, self.review_variant, task_name, asset_doc, project_name, - host_name + host_name, + project_settings=context.data["project_settings"] ) review_instance = self._create_review_instance( context, review_subset_name @@ -114,14 +116,15 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): "family": "render" } - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( self.render_pass_family, render_pass, task_name, asset_doc, project_name, host_name, - dynamic_data=dynamic_data + dynamic_data=dynamic_data, + project_settings=context.data["project_settings"] ) instance = self._create_render_pass_instance( @@ -137,14 +140,15 @@ class CollectTVPaintInstances(pyblish.api.ContextPlugin): # Override family for subset name "family": "render" } - subset_name = get_subset_name_with_asset_doc( + subset_name = get_subset_name( self.render_layer_family, variant, task_name, asset_doc, project_name, host_name, - dynamic_data=dynamic_data + dynamic_data=dynamic_data, + project_settings=context.data["project_settings"] ) instance = self._create_render_layer_instance( context, layers, subset_name diff --git a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py index f0f29260a2..b5f8ed9c8f 100644 --- a/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/collect_tvpaint_workfile_data.py @@ -16,11 +16,11 @@ import uuid import json import shutil import pyblish.api -from openpype.lib.plugin_tools import parse_json from openpype.hosts.tvpaint.worker import ( SenderTVPaintCommands, CollectSceneData ) +from openpype_modules.webpublisher.lib import parse_json class CollectTVPaintWorkfileData(pyblish.api.ContextPlugin): diff --git a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py b/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py deleted file mode 100644 index cb6ed8481c..0000000000 --- a/openpype/hosts/webpublisher/plugins/publish/extract_thumbnail.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -import shutil - -import pyblish.api -from openpype.lib import ( - get_ffmpeg_tool_path, - - run_subprocess, - - get_transcode_temp_directory, - convert_for_ffmpeg, - should_convert_for_ffmpeg -) - - -class ExtractThumbnail(pyblish.api.InstancePlugin): - """Create jpg thumbnail from input using ffmpeg.""" - - label = "Extract Thumbnail" - order = pyblish.api.ExtractorOrder - families = [ - "render", - "image" - ] - hosts = ["webpublisher"] - targets = ["filespublish"] - - def process(self, instance): - self.log.info("subset {}".format(instance.data['subset'])) - - filtered_repres = self._get_filtered_repres(instance) - for repre in filtered_repres: - repre_files = repre["files"] - if not isinstance(repre_files, (list, tuple)): - input_file = repre_files - else: - file_index = int(float(len(repre_files)) * 0.5) - input_file = repre_files[file_index] - - stagingdir = os.path.normpath(repre["stagingDir"]) - - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("Input filepath: {}".format(full_input_path)) - - do_convert = should_convert_for_ffmpeg(full_input_path) - # If result is None the requirement of conversion can't be - # determined - if do_convert is None: - self.log.info(( - "Can't determine if representation requires conversion." - " Skipped." - )) - continue - - # Do conversion if needed - # - change staging dir of source representation - # - must be set back after output definitions processing - convert_dir = None - if do_convert: - convert_dir = get_transcode_temp_directory() - filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, - convert_dir, - None, - None, - self.log - ) - full_input_path = os.path.join(convert_dir, filename) - - filename = os.path.splitext(input_file)[0] - while filename.endswith("."): - filename = filename[:-1] - thumbnail_filename = filename + "_thumbnail.jpg" - full_output_path = os.path.join(stagingdir, thumbnail_filename) - - self.log.info("output {}".format(full_output_path)) - - ffmpeg_args = [ - get_ffmpeg_tool_path("ffmpeg"), - "-y", - "-i", full_input_path, - "-vframes", "1", - full_output_path - ] - - # run subprocess - self.log.debug("{}".format(" ".join(ffmpeg_args))) - try: # temporary until oiiotool is supported cross platform - run_subprocess( - ffmpeg_args, logger=self.log - ) - except RuntimeError as exp: - if "Compression" in str(exp): - self.log.debug( - "Unsupported compression on input files. Skipping!!!" - ) - return - self.log.warning("Conversion crashed", exc_info=True) - raise - - new_repre = { - "name": "thumbnail", - "ext": "jpg", - "files": thumbnail_filename, - "stagingDir": stagingdir, - "thumbnail": True, - "tags": ["thumbnail"] - } - - # adding representation - self.log.debug("Adding: {}".format(new_repre)) - instance.data["representations"].append(new_repre) - - # Cleanup temp folder - if convert_dir is not None and os.path.exists(convert_dir): - shutil.rmtree(convert_dir) - - def _get_filtered_repres(self, instance): - filtered_repres = [] - repres = instance.data.get("representations") or [] - for repre in repres: - self.log.debug(repre) - tags = repre.get("tags") or [] - # Skip instance if already has thumbnail representation - if "thumbnail" in tags: - return [] - - if "review" not in tags: - continue - - if not repre.get("files"): - self.log.info(( - "Representation \"{}\" don't have files. Skipping" - ).format(repre["name"])) - continue - - filtered_repres.append(repre) - return filtered_repres diff --git a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py index a5e4868411..d8b7bb9078 100644 --- a/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py +++ b/openpype/hosts/webpublisher/plugins/publish/validate_tvpaint_workfile_data.py @@ -13,7 +13,7 @@ class ValidateWorkfileData(pyblish.api.ContextPlugin): targets = ["tvpaint_worker"] def process(self, context): - # Data collected in `CollectAvalonEntities` + # Data collected in `CollectContextEntities` frame_start = context.data["frameStart"] frame_end = context.data["frameEnd"] handle_start = context.data["handleStart"] diff --git a/openpype/hosts/webpublisher/publish_functions.py b/openpype/hosts/webpublisher/publish_functions.py new file mode 100644 index 0000000000..83f53ced68 --- /dev/null +++ b/openpype/hosts/webpublisher/publish_functions.py @@ -0,0 +1,205 @@ +import os +import time +import pyblish.api +import pyblish.util + +from openpype.lib import Logger +from openpype.lib.applications import ( + ApplicationManager, + get_app_environments_for_context, +) +from openpype.pipeline import install_host +from openpype.hosts.webpublisher.api import WebpublisherHost + +from .lib import ( + get_batch_asset_task_info, + get_webpublish_conn, + start_webpublish_log, + publish_and_log, + fail_batch, + find_variant_key, + get_task_data, + get_timeout, + IN_PROGRESS_STATUS +) + + +def cli_publish(project_name, batch_path, user_email, targets): + """Start headless publishing. + + Used to publish rendered assets, workfiles etc via Webpublisher. + Eventually should be yanked out to Webpublisher cli. + + Publish use json from passed paths argument. + + Args: + project_name (str): project to publish (only single context is + expected per call of remotepublish + batch_path (str): Path batch folder. Contains subfolders with + resources (workfile, another subfolder 'renders' etc.) + user_email (string): email address for webpublisher - used to + find Ftrack user with same email + targets (list): Pyblish targets + (to choose validator for example) + + Raises: + RuntimeError: When there is no path to process. + """ + + if not batch_path: + raise RuntimeError("No publish paths specified") + + log = Logger.get_logger("remotepublish") + log.info("remotepublish command") + + # Register target and host + webpublisher_host = WebpublisherHost() + + os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path + os.environ["AVALON_PROJECT"] = project_name + os.environ["AVALON_APP"] = webpublisher_host.name + os.environ["USER_EMAIL"] = user_email + os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib + + if targets: + if isinstance(targets, str): + targets = [targets] + for target in targets: + pyblish.api.register_target(target) + + install_host(webpublisher_host) + + log.info("Running publish ...") + + _, batch_id = os.path.split(batch_path) + dbcon = get_webpublish_conn() + _id = start_webpublish_log(dbcon, batch_id, user_email) + + task_data = get_task_data(batch_path) + if not task_data["context"]: + msg = "Batch manifest must contain context data" + msg += "Create new batch and set context properly." + fail_batch(_id, dbcon, msg) + + publish_and_log(dbcon, _id, log, batch_id=batch_id) + + log.info("Publish finished.") + + +def cli_publish_from_app( + project_name, batch_path, host_name, user_email, targets +): + """Opens installed variant of 'host' and run remote publish there. + + Eventually should be yanked out to Webpublisher cli. + + Currently implemented and tested for Photoshop where customer + wants to process uploaded .psd file and publish collected layers + from there. Triggered by Webpublisher. + + Checks if no other batches are running (status =='in_progress). If + so, it sleeps for SLEEP (this is separate process), + waits for WAIT_FOR seconds altogether. + + Requires installed host application on the machine. + + Runs publish process as user would, in automatic fashion. + + Args: + project_name (str): project to publish (only single context is + expected per call of remotepublish + batch_path (str): Path batch folder. Contains subfolders with + resources (workfile, another subfolder 'renders' etc.) + host_name (str): 'photoshop' + user_email (string): email address for webpublisher - used to + find Ftrack user with same email + targets (list): Pyblish targets + (to choose validator for example) + """ + + log = Logger.get_logger("RemotePublishFromApp") + + log.info("remotepublishphotoshop command") + + task_data = get_task_data(batch_path) + + workfile_path = os.path.join(batch_path, + task_data["task"], + task_data["files"][0]) + + print("workfile_path {}".format(workfile_path)) + + batch_id = task_data["batch"] + dbcon = get_webpublish_conn() + # safer to start logging here, launch might be broken altogether + _id = start_webpublish_log(dbcon, batch_id, user_email) + + batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) + if len(batches_in_progress) > 1: + running_batches = [str(batch["_id"]) + for batch in batches_in_progress + if batch["_id"] != _id] + msg = "There are still running batches {}\n". \ + format("\n".join(running_batches)) + msg += "Ask admin to check them and reprocess current batch" + fail_batch(_id, dbcon, msg) + + if not task_data["context"]: + msg = "Batch manifest must contain context data" + msg += "Create new batch and set context properly." + fail_batch(_id, dbcon, msg) + + asset_name, task_name, task_type = get_batch_asset_task_info( + task_data["context"]) + + application_manager = ApplicationManager() + found_variant_key = find_variant_key(application_manager, host_name) + app_name = "{}/{}".format(host_name, found_variant_key) + + # must have for proper launch of app + env = get_app_environments_for_context( + project_name, + asset_name, + task_name, + app_name + ) + print("env:: {}".format(env)) + os.environ.update(env) + + os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path + # must pass identifier to update log lines for a batch + os.environ["BATCH_LOG_ID"] = str(_id) + os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib + os.environ["USER_EMAIL"] = user_email + + pyblish.api.register_host(host_name) + if targets: + if isinstance(targets, str): + targets = [targets] + current_targets = os.environ.get("PYBLISH_TARGETS", "").split( + os.pathsep) + for target in targets: + current_targets.append(target) + + os.environ["PYBLISH_TARGETS"] = os.pathsep.join( + set(current_targets)) + + data = { + "last_workfile_path": workfile_path, + "start_last_workfile": True, + "project_name": project_name, + "asset_name": asset_name, + "task_name": task_name + } + + launched_app = application_manager.launch(app_name, **data) + + timeout = get_timeout(project_name, host_name, task_type) + + time_start = time.time() + while launched_app.poll() is None: + time.sleep(0.5) + if time.time() - time_start > timeout: + launched_app.terminate() + msg = "Timeout reached" + fail_batch(_id, dbcon, msg) diff --git a/openpype/hosts/webpublisher/webserver_service/__init__.py b/openpype/hosts/webpublisher/webserver_service/__init__.py new file mode 100644 index 0000000000..73111d286e --- /dev/null +++ b/openpype/hosts/webpublisher/webserver_service/__init__.py @@ -0,0 +1,6 @@ +from .webserver import run_webserver + + +__all__ = ( + "run_webserver", +) diff --git a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py index 1f9089aa27..4039d2c8ec 100644 --- a/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py +++ b/openpype/hosts/webpublisher/webserver_service/webpublish_routes.py @@ -2,42 +2,46 @@ import os import json import datetime -from bson.objectid import ObjectId import collections -from aiohttp.web_response import Response import subprocess +from bson.objectid import ObjectId +from aiohttp.web_response import Response -from avalon.api import AvalonMongoDB - -from openpype.lib import OpenPypeMongoConnection -from openpype_modules.avalon_apps.rest_api import _RestApiEndpoint +from openpype.client import ( + get_projects, + get_assets, +) +from openpype.lib import Logger from openpype.settings import get_project_settings - -from openpype.lib import PypeLogger -from openpype.lib.remote_publish import ( +from openpype_modules.webserver.base_routes import RestApiEndpoint +from openpype_modules.webpublisher import WebpublisherAddon +from openpype_modules.webpublisher.lib import ( + get_webpublish_conn, get_task_data, ERROR_STATUS, REPROCESS_STATUS ) -log = PypeLogger.get_logger("WebServer") +log = Logger.get_logger("WebpublishRoutes") -class RestApiResource: - """Resource carrying needed info and Avalon DB connection for publish.""" - def __init__(self, server_manager, executable, upload_dir, - studio_task_queue=None): - self.server_manager = server_manager - self.upload_dir = upload_dir - self.executable = executable +class ResourceRestApiEndpoint(RestApiEndpoint): + def __init__(self, resource): + self.resource = resource + super(ResourceRestApiEndpoint, self).__init__() - if studio_task_queue is None: - studio_task_queue = collections.deque().dequeu - self.studio_task_queue = studio_task_queue - self.dbcon = AvalonMongoDB() - self.dbcon.install() +class WebpublishApiEndpoint(ResourceRestApiEndpoint): + @property + def dbcon(self): + return self.resource.dbcon + +class JsonApiResource: + """Resource for json manipulation. + + All resources handling sending output to REST should inherit from + """ @staticmethod def json_dump_handler(value): if isinstance(value, datetime.datetime): @@ -57,28 +61,36 @@ class RestApiResource: ).encode("utf-8") -class OpenPypeRestApiResource(RestApiResource): +class RestApiResource(JsonApiResource): + """Resource carrying needed info and Avalon DB connection for publish.""" + def __init__(self, server_manager, executable, upload_dir, + studio_task_queue=None): + self.server_manager = server_manager + self.upload_dir = upload_dir + self.executable = executable + + if studio_task_queue is None: + studio_task_queue = collections.deque().dequeu + self.studio_task_queue = studio_task_queue + + +class WebpublishRestApiResource(JsonApiResource): """Resource carrying OP DB connection for storing batch info into DB.""" - def __init__(self, ): - mongo_client = OpenPypeMongoConnection.get_mongo_client() - database_name = os.environ["OPENPYPE_DATABASE_NAME"] - self.dbcon = mongo_client[database_name]["webpublishes"] + + def __init__(self): + self.dbcon = get_webpublish_conn() -class ProjectsEndpoint(_RestApiEndpoint): +class ProjectsEndpoint(ResourceRestApiEndpoint): """Returns list of dict with project info (id, name).""" async def get(self) -> Response: output = [] - for project_name in self.dbcon.database.collection_names(): - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) - if project_doc: - ret_val = { - "id": project_doc["_id"], - "name": project_doc["name"] - } - output.append(ret_val) + for project_doc in get_projects(): + ret_val = { + "id": project_doc["_id"], + "name": project_doc["name"] + } + output.append(ret_val) return Response( status=200, body=self.resource.encode(output), @@ -86,7 +98,7 @@ class ProjectsEndpoint(_RestApiEndpoint): ) -class HiearchyEndpoint(_RestApiEndpoint): +class HiearchyEndpoint(ResourceRestApiEndpoint): """Returns dictionary with context tree from assets.""" async def get(self, project_name) -> Response: query_projection = { @@ -98,10 +110,7 @@ class HiearchyEndpoint(_RestApiEndpoint): "type": 1, } - asset_docs = self.dbcon.database[project_name].find( - {"type": "asset"}, - query_projection - ) + asset_docs = get_assets(project_name, fields=query_projection.keys()) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs @@ -185,7 +194,7 @@ class TaskNode(Node): self["attributes"] = {} -class BatchPublishEndpoint(_RestApiEndpoint): +class BatchPublishEndpoint(WebpublishApiEndpoint): """Triggers headless publishing of batch.""" async def post(self, request) -> Response: # Validate existence of openpype executable @@ -205,7 +214,7 @@ class BatchPublishEndpoint(_RestApiEndpoint): # TVPaint filter { "extensions": [".tvpp"], - "command": "remotepublish", + "command": "publish", "arguments": { "targets": ["tvpaint_worker"] }, @@ -214,13 +223,13 @@ class BatchPublishEndpoint(_RestApiEndpoint): # Photoshop filter { "extensions": [".psd", ".psb"], - "command": "remotepublishfromapp", + "command": "publishfromapp", "arguments": { - # Command 'remotepublishfromapp' requires --host argument + # Command 'publishfromapp' requires --host argument "host": "photoshop", # Make sure targets are set to None for cases that default # would change - # - targets argument is not used in 'remotepublishfromapp' + # - targets argument is not used in 'publishfromapp' "targets": ["remotepublish"] }, # does publish need to be handled by a queue, eg. only @@ -232,7 +241,7 @@ class BatchPublishEndpoint(_RestApiEndpoint): batch_dir = os.path.join(self.resource.upload_dir, content["batch"]) # Default command and arguments - command = "remotepublish" + command = "publish" add_args = { # All commands need 'project' and 'user' "project": content["project_name"], @@ -263,6 +272,8 @@ class BatchPublishEndpoint(_RestApiEndpoint): args = [ openpype_app, + "module", + WebpublisherAddon.name, command, batch_dir ] @@ -290,7 +301,7 @@ class BatchPublishEndpoint(_RestApiEndpoint): ) -class TaskPublishEndpoint(_RestApiEndpoint): +class TaskPublishEndpoint(WebpublishApiEndpoint): """Prepared endpoint triggered after each task - for future development.""" async def post(self, request) -> Response: return Response( @@ -300,8 +311,12 @@ class TaskPublishEndpoint(_RestApiEndpoint): ) -class BatchStatusEndpoint(_RestApiEndpoint): - """Returns dict with info for batch_id.""" +class BatchStatusEndpoint(WebpublishApiEndpoint): + """Returns dict with info for batch_id. + + Uses 'WebpublishRestApiResource'. + """ + async def get(self, batch_id) -> Response: output = self.dbcon.find_one({"batch_id": batch_id}) @@ -320,8 +335,12 @@ class BatchStatusEndpoint(_RestApiEndpoint): ) -class UserReportEndpoint(_RestApiEndpoint): - """Returns list of dict with batch info for user (email address).""" +class UserReportEndpoint(WebpublishApiEndpoint): + """Returns list of dict with batch info for user (email address). + + Uses 'WebpublishRestApiResource'. + """ + async def get(self, user) -> Response: output = list(self.dbcon.find({"user": user}, projection={"log": False})) @@ -340,7 +359,7 @@ class UserReportEndpoint(_RestApiEndpoint): ) -class ConfiguredExtensionsEndpoint(_RestApiEndpoint): +class ConfiguredExtensionsEndpoint(WebpublishApiEndpoint): """Returns dict of extensions which have mapping to family. Returns: @@ -380,8 +399,12 @@ class ConfiguredExtensionsEndpoint(_RestApiEndpoint): ) -class BatchReprocessEndpoint(_RestApiEndpoint): - """Marks latest 'batch_id' for reprocessing, returns 404 if not found.""" +class BatchReprocessEndpoint(WebpublishApiEndpoint): + """Marks latest 'batch_id' for reprocessing, returns 404 if not found. + + Uses 'WebpublishRestApiResource'. + """ + async def post(self, batch_id) -> Response: batches = self.dbcon.find({"batch_id": batch_id, "status": ERROR_STATUS}).sort("_id", -1) diff --git a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py b/openpype/hosts/webpublisher/webserver_service/webserver.py similarity index 80% rename from openpype/hosts/webpublisher/webserver_service/webserver_cli.py rename to openpype/hosts/webpublisher/webserver_service/webserver.py index 909ea38bc6..093b53d9d3 100644 --- a/openpype/hosts/webpublisher/webserver_service/webserver_cli.py +++ b/openpype/hosts/webpublisher/webserver_service/webserver.py @@ -6,11 +6,19 @@ import requests import json import subprocess -from openpype.lib import PypeLogger +from openpype.client import OpenPypeMongoConnection +from openpype.modules import ModulesManager +from openpype.lib import Logger + +from openpype_modules.webpublisher.lib import ( + ERROR_STATUS, + REPROCESS_STATUS, + SENT_REPROCESSING_STATUS +) from .webpublish_routes import ( RestApiResource, - OpenPypeRestApiResource, + WebpublishRestApiResource, HiearchyEndpoint, ProjectsEndpoint, ConfiguredExtensionsEndpoint, @@ -20,32 +28,29 @@ from .webpublish_routes import ( TaskPublishEndpoint, UserReportEndpoint ) -from openpype.lib.remote_publish import ( - ERROR_STATUS, - REPROCESS_STATUS, - SENT_REPROCESSING_STATUS -) + +log = Logger.get_logger("webserver_gui") -log = PypeLogger().get_logger("webserver_gui") - - -def run_webserver(*args, **kwargs): +def run_webserver(executable, upload_dir, host=None, port=None): """Runs webserver in command line, adds routes.""" - from openpype.modules import ModulesManager + + if not host: + host = "localhost" + if not port: + port = 8079 manager = ModulesManager() webserver_module = manager.modules_by_name["webserver"] - host = kwargs.get("host") or "localhost" - port = kwargs.get("port") or 8079 + server_manager = webserver_module.create_new_server_manager(port, host) webserver_url = server_manager.url # queue for remotepublishfromapp tasks studio_task_queue = collections.deque() resource = RestApiResource(server_manager, - upload_dir=kwargs["upload_dir"], - executable=kwargs["executable"], + upload_dir=upload_dir, + executable=executable, studio_task_queue=studio_task_queue) projects_endpoint = ProjectsEndpoint(resource) server_manager.add_route( @@ -69,16 +74,14 @@ def run_webserver(*args, **kwargs): ) # triggers publish - webpublisher_task_publish_endpoint = \ - BatchPublishEndpoint(resource) + webpublisher_task_publish_endpoint = BatchPublishEndpoint(resource) server_manager.add_route( "POST", "/api/webpublish/batch", webpublisher_task_publish_endpoint.dispatch ) - webpublisher_batch_publish_endpoint = \ - TaskPublishEndpoint(resource) + webpublisher_batch_publish_endpoint = TaskPublishEndpoint(resource) server_manager.add_route( "POST", "/api/webpublish/task", @@ -86,34 +89,33 @@ def run_webserver(*args, **kwargs): ) # reporting - openpype_resource = OpenPypeRestApiResource() - batch_status_endpoint = BatchStatusEndpoint(openpype_resource) + webpublish_resource = WebpublishRestApiResource() + batch_status_endpoint = BatchStatusEndpoint(webpublish_resource) server_manager.add_route( "GET", "/api/batch_status/{batch_id}", batch_status_endpoint.dispatch ) - user_status_endpoint = UserReportEndpoint(openpype_resource) + user_status_endpoint = UserReportEndpoint(webpublish_resource) server_manager.add_route( "GET", "/api/publishes/{user}", user_status_endpoint.dispatch ) - webpublisher_batch_reprocess_endpoint = \ - BatchReprocessEndpoint(openpype_resource) + batch_reprocess_endpoint = BatchReprocessEndpoint(webpublish_resource) server_manager.add_route( "POST", "/api/webpublish/reprocess/{batch_id}", - webpublisher_batch_reprocess_endpoint.dispatch + batch_reprocess_endpoint.dispatch ) server_manager.start_server() last_reprocessed = time.time() while True: if time.time() - last_reprocessed > 20: - reprocess_failed(kwargs["upload_dir"], webserver_url) + reprocess_failed(upload_dir, webserver_url) last_reprocessed = time.time() if studio_task_queue: args = studio_task_queue.popleft() @@ -124,8 +126,6 @@ def run_webserver(*args, **kwargs): def reprocess_failed(upload_dir, webserver_url): # log.info("check_reprocesable_records") - from openpype.lib import OpenPypeMongoConnection - mongo_client = OpenPypeMongoConnection.get_mongo_client() database_name = os.environ["OPENPYPE_DATABASE_NAME"] dbcon = mongo_client[database_name]["webpublishes"] diff --git a/openpype/lib/__init__.py b/openpype/lib/__init__.py index e8b6d18f4e..9eb7724a60 100644 --- a/openpype/lib/__init__.py +++ b/openpype/lib/__init__.py @@ -30,7 +30,7 @@ from .vendor_bin_utils import ( ) from .attribute_definitions import ( - AbtractAttrDef, + AbstractAttrDef, UIDef, UISeparatorDef, @@ -42,12 +42,12 @@ from .attribute_definitions import ( EnumDef, BoolDef, FileDef, + FileDefItem, ) from .env_tools import ( env_value_to_bool, get_paths_from_environ, - get_global_environments ) from .terminal import Terminal @@ -63,7 +63,10 @@ from .execute import ( path_to_subprocess_arg, CREATE_NO_WINDOW ) -from .log import PypeLogger, timeit +from .log import ( + Logger, + PypeLogger, +) from .path_templates import ( merge_dict, @@ -79,12 +82,10 @@ from .mongo import ( validate_mongo_connection, OpenPypeMongoConnection ) -from .anatomy import ( - Anatomy -) -from .config import ( +from .dateutils import ( get_datetime_data, + get_timestamp, get_formatted_current_time ) @@ -105,45 +106,29 @@ from .transcoding import ( get_transcode_temp_directory, should_convert_for_ffmpeg, convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, get_ffprobe_data, get_ffprobe_streams, get_ffmpeg_codec_args, get_ffmpeg_format_args, convert_ffprobe_fps_value, + convert_ffprobe_fps_to_float, ) from .avalon_context import ( CURRENT_DOC_SCHEMAS, - PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX, create_project, - is_latest, - any_outdated, - get_asset, - get_hierarchy, - get_linked_assets, - get_latest_version, - get_system_general_anatomy_data, get_workfile_template_key, get_workfile_template_key_from_context, - get_workdir_data, - get_workdir, - get_workdir_with_workdir_data, get_last_workfile_with_version, get_last_workfile, - create_workfile_doc, - save_workfile_data_to_doc, - get_workfile_doc, - BuildWorkfile, get_creator_by_name, get_custom_workfile_template, - change_timer_to_current_context, - get_custom_workfile_template_by_context, get_custom_workfile_template_by_string_context, get_custom_workfile_template @@ -181,14 +166,12 @@ from .plugin_tools import ( get_subset_name, get_subset_name_with_asset_doc, prepare_template_data, - filter_pyblish_plugins, - set_plugin_attributes_from_settings, source_hash, - get_unique_layer_name, - get_background_layers, ) from .path_tools import ( + format_file_size, + collect_frames, create_hard_link, version_up, get_version_from_path, @@ -198,18 +181,6 @@ from .path_tools import ( get_project_basic_paths, ) -from .editorial import ( - is_overlapping_otio_ranges, - otio_range_to_frame_range, - otio_range_with_handles, - convert_to_padded_path, - trim_media_range, - range_from_frames, - frames_to_secons, - frames_to_timecode, - make_sequence_collection -) - from .openpype_version import ( op_version_control_available, get_openpype_version, @@ -221,6 +192,12 @@ from .openpype_version import ( is_current_version_higher_than_expected ) + +from .connections import ( + requests_get, + requests_post +) + terminal = Terminal __all__ = [ @@ -241,14 +218,13 @@ __all__ = [ "env_value_to_bool", "get_paths_from_environ", - "get_global_environments", "get_vendor_bin_path", "get_oiio_tools_path", "get_ffmpeg_tool_path", "is_oiio_supported", - "AbtractAttrDef", + "AbstractAttrDef", "UIDef", "UISeparatorDef", @@ -260,6 +236,7 @@ __all__ = [ "EnumDef", "BoolDef", "FileDef", + "FileDefItem", "import_filepath", "modules_from_path", @@ -270,42 +247,26 @@ __all__ = [ "get_transcode_temp_directory", "should_convert_for_ffmpeg", "convert_for_ffmpeg", + "convert_input_paths_for_ffmpeg", "get_ffprobe_data", "get_ffprobe_streams", "get_ffmpeg_codec_args", "get_ffmpeg_format_args", "convert_ffprobe_fps_value", + "convert_ffprobe_fps_to_float", "CURRENT_DOC_SCHEMAS", - "PROJECT_NAME_ALLOWED_SYMBOLS", - "PROJECT_NAME_REGEX", "create_project", - "is_latest", - "any_outdated", - "get_asset", - "get_hierarchy", - "get_linked_assets", - "get_latest_version", - "get_system_general_anatomy_data", "get_workfile_template_key", "get_workfile_template_key_from_context", - "get_workdir_data", - "get_workdir", - "get_workdir_with_workdir_data", "get_last_workfile_with_version", "get_last_workfile", - "create_workfile_doc", - "save_workfile_data_to_doc", - "get_workfile_doc", - "BuildWorkfile", "get_creator_by_name", - "change_timer_to_current_context", - "get_custom_workfile_template_by_context", "get_custom_workfile_template_by_string_context", "get_custom_workfile_template", @@ -338,12 +299,10 @@ __all__ = [ "TaskNotSetError", "get_subset_name", "get_subset_name_with_asset_doc", - "filter_pyblish_plugins", - "set_plugin_attributes_from_settings", "source_hash", - "get_unique_layer_name", - "get_background_layers", + "format_file_size", + "collect_frames", "create_hard_link", "version_up", "get_version_from_path", @@ -358,27 +317,16 @@ __all__ = [ "terminal", - "Anatomy", - "get_datetime_data", "get_formatted_current_time", + "Logger", "PypeLogger", + "get_default_components", "validate_mongo_connection", "OpenPypeMongoConnection", - "timeit", - - "is_overlapping_otio_ranges", - "otio_range_with_handles", - "convert_to_padded_path", - "otio_range_to_frame_range", - "trim_media_range", - "range_from_frames", - "frames_to_secons", - "frames_to_timecode", - "make_sequence_collection", "create_project_folders", "create_workdir_extra_folders", "get_project_basic_paths", @@ -390,4 +338,7 @@ __all__ = [ "is_running_from_build", "is_running_staging", "is_current_version_studio_latest", + + "requests_get", + "requests_post" ] diff --git a/openpype/lib/abstract_metaplugins.py b/openpype/lib/abstract_metaplugins.py deleted file mode 100644 index f8163956ad..0000000000 --- a/openpype/lib/abstract_metaplugins.py +++ /dev/null @@ -1,10 +0,0 @@ -from abc import ABCMeta -from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin - - -class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin): - pass - - -class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin): - pass diff --git a/openpype/lib/applications.py b/openpype/lib/applications.py index ad59ae0dbc..7cc296f47b 100644 --- a/openpype/lib/applications.py +++ b/openpype/lib/applications.py @@ -11,26 +11,22 @@ from abc import ABCMeta, abstractmethod import six +from openpype.client import ( + get_project, + get_asset_by_name, +) from openpype.settings import ( get_system_settings, - get_project_settings + get_project_settings, + get_local_settings ) from openpype.settings.constants import ( METADATA_KEYS, M_DYNAMIC_KEY_LABEL ) -from . import ( - PypeLogger, - Anatomy -) +from .log import Logger from .profiles_filtering import filter_profiles from .local_settings import get_openpype_username -from .avalon_context import ( - get_workdir_data, - get_workdir_with_workdir_data, - get_workfile_template_key, - get_last_workfile -) from .python_module_tools import ( modules_from_path, @@ -142,7 +138,7 @@ def get_logger(): """Global lib.applications logger getter.""" global _logger if _logger is None: - _logger = PypeLogger.get_logger(__name__) + _logger = Logger.get_logger(__name__) return _logger @@ -211,6 +207,7 @@ class ApplicationGroup: data (dict): Group defying data loaded from settings. manager (ApplicationManager): Manager that created the group. """ + def __init__(self, name, data, manager): self.name = name self.manager = manager @@ -374,8 +371,9 @@ class ApplicationManager: will always use these values. Gives ability to create manager using different settings. """ + def __init__(self, system_settings=None): - self.log = PypeLogger.get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.app_groups = {} self.applications = {} @@ -471,6 +469,19 @@ class ApplicationManager: for tool in group: self.tools[tool.full_name] = tool + def find_latest_available_variant_for_group(self, group_name): + group = self.app_groups.get(group_name) + if group is None or not group.enabled: + return None + + output = None + for _, variant in reversed(sorted(group.variants.items())): + executable = variant.find_executable() + if executable: + output = variant + break + return output + def launch(self, app_name, **data): """Launch procedure. @@ -530,13 +541,13 @@ class EnvironmentToolGroup: variants = data.get("variants") or {} label_by_key = variants.pop(M_DYNAMIC_KEY_LABEL, {}) variants_by_name = {} - for variant_name, variant_env in variants.items(): + for variant_name, variant_data in variants.items(): if variant_name in METADATA_KEYS: continue variant_label = label_by_key.get(variant_name) or variant_name tool = EnvironmentTool( - variant_name, variant_label, variant_env, self + variant_name, variant_label, variant_data, self ) variants_by_name[variant_name] = tool self.variants = variants_by_name @@ -560,15 +571,30 @@ class EnvironmentTool: Args: name (str): Name of the tool. - environment (dict): Variant environments. + variant_data (dict): Variant data with environments and + host and app variant filters. group (str): Name of group which wraps tool. """ - def __init__(self, name, label, environment, group): + def __init__(self, name, label, variant_data, group): + # Backwards compatibility 3.9.1 - 3.9.2 + # - 'variant_data' contained only environments but contain also host + # and application variant filters + host_names = variant_data.get("host_names", []) + app_variants = variant_data.get("app_variants", []) + + if "environment" in variant_data: + environment = variant_data["environment"] + else: + environment = variant_data + + self.host_names = host_names + self.app_variants = app_variants self.name = name self.variant_label = label self.label = " ".join((group.label, label)) self.group = group + self._environment = environment self.full_name = "/".join((group.name, name)) @@ -579,6 +605,19 @@ class EnvironmentTool: def environment(self): return copy.deepcopy(self._environment) + def is_valid_for_app(self, app): + """Is tool valid for application. + + Args: + app (Application): Application for which are prepared environments. + """ + if self.app_variants and app.full_name not in self.app_variants: + return False + + if self.host_names and app.host_name not in self.host_names: + return False + return True + class ApplicationExecutable: """Representation of executable loaded from settings.""" @@ -633,7 +672,11 @@ class ApplicationExecutable: if os.path.exists(plist_filepath): import plistlib - parsed_plist = plistlib.readPlist(plist_filepath) + if hasattr(plistlib, "load"): + with open(plist_filepath, "rb") as stream: + parsed_plist = plistlib.load(stream) + else: + parsed_plist = plistlib.readPlist(plist_filepath) executable_filename = parsed_plist.get("CFBundleExecutable") if executable_filename: @@ -705,7 +748,7 @@ class LaunchHook: Always should be called """ - self.log = PypeLogger().get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.launch_context = launch_context @@ -847,7 +890,7 @@ class ApplicationLaunchContext: # Logger logger_name = "{}-{}".format(self.__class__.__name__, self.app_name) - self.log = PypeLogger.get_logger(logger_name) + self.log = Logger.get_logger(logger_name) self.executable = executable @@ -865,24 +908,25 @@ class ApplicationLaunchContext: self.launch_args.extend(self.data.pop("app_args")) # Handle launch environemtns - env = self.data.pop("env", None) - if env is not None and not isinstance(env, dict): + src_env = self.data.pop("env", None) + if src_env is not None and not isinstance(src_env, dict): self.log.warning(( "Passed `env` kwarg has invalid type: {}. Expected: `dict`." " Using `os.environ` instead." - ).format(str(type(env)))) - env = None + ).format(str(type(src_env)))) + src_env = None - if env is None: - env = os.environ + if src_env is None: + src_env = os.environ - # subprocess.Popen keyword arguments - self.kwargs = { - "env": { - key: str(value) - for key, value in env.items() - } + ignored_env = {"QT_API", } + env = { + key: str(value) + for key, value in src_env.items() + if key not in ignored_env } + # subprocess.Popen keyword arguments + self.kwargs = {"env": env} if platform.system().lower() == "windows": # Detach new process from currently running process on Windows @@ -920,6 +964,63 @@ class ApplicationLaunchContext: ) self.kwargs["env"] = value + def _collect_addons_launch_hook_paths(self): + """Helper to collect application launch hooks from addons. + + Module have to have implemented 'get_launch_hook_paths' method which + can expect appliction as argument or nothing. + + Returns: + List[str]: Paths to launch hook directories. + """ + + expected_types = (list, tuple, set) + + output = [] + for module in self.modules_manager.get_enabled_modules(): + # Skip module if does not have implemented 'get_launch_hook_paths' + func = getattr(module, "get_launch_hook_paths", None) + if func is None: + continue + + func = module.get_launch_hook_paths + if hasattr(inspect, "signature"): + sig = inspect.signature(func) + expect_args = len(sig.parameters) > 0 + else: + expect_args = len(inspect.getargspec(func)[0]) > 0 + + # Pass application argument if method expect it. + try: + if expect_args: + hook_paths = func(self.application) + else: + hook_paths = func() + except Exception: + self.log.warning( + "Failed to call 'get_launch_hook_paths'", + exc_info=True + ) + continue + + if not hook_paths: + continue + + # Convert string to list + if isinstance(hook_paths, six.string_types): + hook_paths = [hook_paths] + + # Skip invalid types + if not isinstance(hook_paths, expected_types): + self.log.warning(( + "Result of `get_launch_hook_paths`" + " has invalid type {}. Expected {}" + ).format(type(hook_paths), expected_types)) + continue + + output.extend(hook_paths) + return output + def paths_to_launch_hooks(self): """Directory paths where to look for launch hooks.""" # This method has potential to be part of application manager (maybe). @@ -927,32 +1028,24 @@ class ApplicationLaunchContext: # TODO load additional studio paths from settings import openpype - pype_dir = os.path.dirname(os.path.abspath(openpype.__file__)) + openpype_dir = os.path.dirname(os.path.abspath(openpype.__file__)) - # --- START: Backwards compatibility --- - hooks_dir = os.path.join(pype_dir, "hooks") + global_hooks_dir = os.path.join(openpype_dir, "hooks") - subfolder_names = ["global"] - if self.host_name: - subfolder_names.append(self.host_name) - for subfolder_name in subfolder_names: - path = os.path.join(hooks_dir, subfolder_name) - if ( - os.path.exists(path) - and os.path.isdir(path) - and path not in paths - ): - paths.append(path) - # --- END: Backwards compatibility --- - - subfolders_list = [ - ["hooks"] + hooks_dirs = [ + global_hooks_dir ] if self.host_name: - subfolders_list.append(["hosts", self.host_name, "hooks"]) + # If host requires launch hooks and is module then launch hooks + # should be collected using 'collect_launch_hook_paths' + # - module have to implement 'get_launch_hook_paths' + host_module = self.modules_manager.get_host_module(self.host_name) + if not host_module: + hooks_dirs.append(os.path.join( + openpype_dir, "hosts", self.host_name, "hooks" + )) - for subfolders in subfolders_list: - path = os.path.join(pype_dir, *subfolders) + for path in hooks_dirs: if ( os.path.exists(path) and os.path.isdir(path) @@ -961,7 +1054,7 @@ class ApplicationLaunchContext: paths.append(path) # Load modules paths - paths.extend(self.modules_manager.collect_launch_hook_paths()) + paths.extend(self._collect_addons_launch_hook_paths()) return paths @@ -981,8 +1074,8 @@ class ApplicationLaunchContext: self.log.debug("Discovery of launch hooks started.") paths = self.paths_to_launch_hooks() - self.log.debug("Paths where will look for launch hooks:{}".format( - "\n- ".join(paths) + self.log.debug("Paths searched for launch hooks:\n{}".format( + "\n".join("- {}".format(path) for path in paths) )) all_classes = { @@ -992,7 +1085,7 @@ class ApplicationLaunchContext: for path in paths: if not os.path.exists(path): self.log.info( - "Path to launch hooks does not exists: \"{}\"".format(path) + "Path to launch hooks does not exist: \"{}\"".format(path) ) continue @@ -1013,13 +1106,14 @@ class ApplicationLaunchContext: hook = klass(self) if not hook.is_valid: self.log.debug( - "Hook is not valid for current launch context." + "Skipped hook invalid for current launch context: " + "{}".format(klass.__name__) ) continue if inspect.isabstract(hook): self.log.debug("Skipped abstract hook: {}".format( - str(hook) + klass.__name__ )) continue @@ -1031,7 +1125,8 @@ class ApplicationLaunchContext: except Exception: self.log.warning( - "Initialization of hook failed. {}".format(str(klass)), + "Initialization of hook failed: " + "{}".format(klass.__name__), exc_info=True ) @@ -1242,11 +1337,20 @@ class EnvironmentPrepData(dict): if data.get("env") is None: data["env"] = os.environ.copy() + if "system_settings" not in data: + data["system_settings"] = get_system_settings() + super(EnvironmentPrepData, self).__init__(data) def get_app_environments_for_context( - project_name, asset_name, task_name, app_name, env_group=None, env=None + project_name, + asset_name, + task_name, + app_name, + env_group=None, + env=None, + modules_manager=None ): """Prepare environment variables by context. Args: @@ -1257,11 +1361,15 @@ def get_app_environments_for_context( by ApplicationManager. env (dict): Initial environment variables. `os.environ` is used when not passed. + modules_manager (ModulesManager): Initialized modules manager. Returns: dict: Environments for passed context and application. """ - from avalon.api import AvalonMongoDB + + from openpype.modules import ModulesManager + from openpype.pipeline import AvalonMongoDB, Anatomy + from openpype.lib.openpype_version import is_running_staging # Avalon database connection dbcon = AvalonMongoDB() @@ -1269,11 +1377,11 @@ def get_app_environments_for_context( dbcon.install() # Project document - project_doc = dbcon.find_one({"type": "project"}) - asset_doc = dbcon.find_one({ - "type": "asset", - "name": asset_name - }) + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + if modules_manager is None: + modules_manager = ModulesManager() # Prepare app object which can be obtained only from ApplciationManager app_manager = ApplicationManager() @@ -1297,9 +1405,12 @@ def get_app_environments_for_context( "env": env }) + data["env"].update(anatomy.root_environments()) + if is_running_staging(): + data["env"]["OPENPYPE_IS_STAGING"] = "1" - prepare_app_environments(data, env_group) - prepare_context_environments(data, env_group) + prepare_app_environments(data, env_group, modules_manager) + prepare_context_environments(data, env_group, modules_manager) # Discard avalon connection dbcon.uninstall() @@ -1319,9 +1430,12 @@ def _merge_env(env, current_env): return result -def _add_python_version_paths(app, env, logger): +def _add_python_version_paths(app, env, logger, modules_manager): """Add vendor packages specific for a Python version.""" + for module in modules_manager.get_enabled_modules(): + module.modify_application_launch_arguments(app, env) + # Skip adding if host name is not set if not app.host_name: return @@ -1354,7 +1468,9 @@ def _add_python_version_paths(app, env, logger): env["PYTHONPATH"] = os.pathsep.join(python_paths) -def prepare_app_environments(data, env_group=None, implementation_envs=True): +def prepare_app_environments( + data, env_group=None, implementation_envs=True, modules_manager=None +): """Modify launch environments based on launched app and context. Args: @@ -1365,8 +1481,32 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): app = data["app"] log = data["log"] + source_env = data["env"].copy() - _add_python_version_paths(app, data["env"], log) + if modules_manager is None: + from openpype.modules import ModulesManager + + modules_manager = ModulesManager() + + _add_python_version_paths(app, source_env, log, modules_manager) + + # Use environments from local settings + filtered_local_envs = {} + system_settings = data["system_settings"] + whitelist_envs = system_settings["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + filtered_local_envs = { + key: value + for key, value in local_envs.items() + if key in whitelist_envs + } + + # Apply local environment variables for already existing values + for key, value in filtered_local_envs.items(): + if key in source_env: + source_env[key] = value # `added_env_keys` has debug purpose added_env_keys = {app.group.name, app.name} @@ -1384,7 +1524,7 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): # Make sure each tool group can be added only once for key in asset_doc["data"].get("tools_env") or []: tool = app.manager.tools.get(key) - if not tool: + if not tool or not tool.is_valid_for_app(app): continue groups_by_name[tool.group.name] = tool.group tool_by_group_name[tool.group.name][tool.name] = tool @@ -1411,17 +1551,28 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): # Choose right platform tool_env = parse_environments(_env_values, env_group) + + # Apply local environment variables + # - must happen between all values because they may be used during + # merge + for key, value in filtered_local_envs.items(): + if key in tool_env: + tool_env[key] = value + # Merge dictionaries env_values = _merge_env(tool_env, env_values) - merged_env = _merge_env(env_values, data["env"]) + merged_env = _merge_env(env_values, source_env) + loaded_env = acre.compute(merged_env, cleanup=False) final_env = None # Add host specific environments if app.host_name and implementation_envs: - module = __import__("openpype.hosts", fromlist=[app.host_name]) - host_module = getattr(module, app.host_name, None) + host_module = modules_manager.get_host_module(app.host_name) + if not host_module: + module = __import__("openpype.hosts", fromlist=[app.host_name]) + host_module = getattr(module, app.host_name, None) add_implementation_envs = None if host_module: add_implementation_envs = getattr( @@ -1434,7 +1585,7 @@ def prepare_app_environments(data, env_group=None, implementation_envs=True): if final_env is None: final_env = loaded_env - keys_to_remove = set(data["env"].keys()) - set(final_env.keys()) + keys_to_remove = set(source_env.keys()) - set(final_env.keys()) # Update env data["env"].update(final_env) @@ -1480,13 +1631,16 @@ def apply_project_environments_value( return env -def prepare_context_environments(data, env_group=None): +def prepare_context_environments(data, env_group=None, modules_manager=None): """Modify launch environments with context data for launched host. Args: data (EnvironmentPrepData): Dictionary where result and intermediate result will be stored. """ + + from openpype.pipeline.template_data import get_template_data + # Context environments log = data["log"] @@ -1507,7 +1661,9 @@ def prepare_context_environments(data, env_group=None): # Load project specific environments project_name = project_doc["name"] project_settings = get_project_settings(project_name) + system_settings = get_system_settings() data["project_settings"] = project_settings + data["system_settings"] = system_settings # Apply project specific environments on current env value apply_project_environments_value( project_name, data["env"], project_settings, env_group @@ -1530,8 +1686,8 @@ def prepare_context_environments(data, env_group=None): if not app.is_host: return - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, app.host_name + workdir_data = get_template_data( + project_doc, asset_doc, task_name, app.host_name, system_settings ) data["workdir_data"] = workdir_data @@ -1542,7 +1698,14 @@ def prepare_context_environments(data, env_group=None): data["task_type"] = task_type try: - workdir = get_workdir_with_workdir_data(workdir_data, anatomy) + from openpype.pipeline.workfile import get_workdir_with_workdir_data + + workdir = get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + project_settings=project_settings + ) except Exception as exc: raise ApplicationLaunchFailed( @@ -1563,10 +1726,10 @@ def prepare_context_environments(data, env_group=None): data["env"]["AVALON_APP"] = app.host_name data["env"]["AVALON_WORKDIR"] = workdir - _prepare_last_workfile(data, workdir) + _prepare_last_workfile(data, workdir, modules_manager) -def _prepare_last_workfile(data, workdir): +def _prepare_last_workfile(data, workdir, modules_manager): """last workfile workflow preparation. Function check if should care about last workfile workflow and tries @@ -1581,9 +1744,13 @@ def _prepare_last_workfile(data, workdir): result will be stored. workdir (str): Path to folder where workfiles should be stored. """ - import avalon.api + + from openpype.modules import ModulesManager from openpype.pipeline import HOST_WORKFILE_EXTENSIONS + if not modules_manager: + modules_manager = ModulesManager() + log = data["log"] _workdir_data = data.get("workdir_data") @@ -1631,13 +1798,26 @@ def _prepare_last_workfile(data, workdir): # Last workfile path last_workfile_path = data.get("last_workfile_path") or "" if not last_workfile_path: - extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) + host_module = modules_manager.get_host_module(app.host_name) + if host_module: + extensions = host_module.get_workfile_extensions() + else: + extensions = HOST_WORKFILE_EXTENSIONS.get(app.host_name) + if extensions: + from openpype.pipeline.workfile import ( + get_workfile_template_key, + get_last_workfile + ) + anatomy = data["anatomy"] project_settings = data["project_settings"] task_type = workdir_data["task"]["type"] template_key = get_workfile_template_key( - task_type, app.host_name, project_settings=project_settings + task_type, + app.host_name, + project_name, + project_settings=project_settings ) # Find last workfile file_template = str(anatomy.templates[template_key]["file"]) diff --git a/openpype/lib/attribute_definitions.py b/openpype/lib/attribute_definitions.py index 189a5e7acd..b5cd15f41a 100644 --- a/openpype/lib/attribute_definitions.py +++ b/openpype/lib/attribute_definitions.py @@ -1,19 +1,90 @@ +import os import re import collections import uuid -from abc import ABCMeta, abstractmethod +import json +import copy +from abc import ABCMeta, abstractmethod, abstractproperty + import six +import clique + +# Global variable which store attribude definitions by type +# - default types are registered on import +_attr_defs_by_type = {} + + +def register_attr_def_class(cls): + """Register attribute definition. + + Currently are registered definitions used to deserialize data to objects. + + Attrs: + cls (AbstractAttrDef): Non-abstract class to be registered with unique + 'type' attribute. + + Raises: + KeyError: When type was already registered. + """ + + if cls.type in _attr_defs_by_type: + raise KeyError("Type \"{}\" was already registered".format(cls.type)) + _attr_defs_by_type[cls.type] = cls + + +def get_attributes_keys(attribute_definitions): + """Collect keys from list of attribute definitions. + + Args: + attribute_definitions (List[AbstractAttrDef]): Objects of attribute + definitions. + + Returns: + Set[str]: Keys that will be created using passed attribute definitions. + """ + + keys = set() + if not attribute_definitions: + return keys + + for attribute_def in attribute_definitions: + if not isinstance(attribute_def, UIDef): + keys.add(attribute_def.key) + return keys + + +def get_default_values(attribute_definitions): + """Receive default values for attribute definitions. + + Args: + attribute_definitions (List[AbstractAttrDef]): Attribute definitions + for which default values should be collected. + + Returns: + Dict[str, Any]: Default values for passet attribute definitions. + """ + + output = {} + if not attribute_definitions: + return output + + for attr_def in attribute_definitions: + # Skip UI definitions + if not isinstance(attr_def, UIDef): + output[attr_def.key] = attr_def.default + return output class AbstractAttrDefMeta(ABCMeta): - """Meta class to validate existence of 'key' attribute. + """Metaclass to validate existence of 'key' attribute. - Each object of `AbtractAttrDef` mus have defined 'key' attribute. + Each object of `AbstractAttrDef` mus have defined 'key' attribute. """ + def __call__(self, *args, **kwargs): obj = super(AbstractAttrDefMeta, self).__call__(*args, **kwargs) init_class = getattr(obj, "__init__class__", None) - if init_class is not AbtractAttrDef: + if init_class is not AbstractAttrDef: raise TypeError("{} super was not called in __init__.".format( type(obj) )) @@ -21,7 +92,7 @@ class AbstractAttrDefMeta(ABCMeta): @six.add_metaclass(AbstractAttrDefMeta) -class AbtractAttrDef: +class AbstractAttrDef(object): """Abstraction of attribute definiton. Each attribute definition must have implemented validation and @@ -35,27 +106,46 @@ class AbtractAttrDef: How to force to set `key` attribute? Args: - key(str): Under which key will be attribute value stored. - label(str): Attribute label. - tooltip(str): Attribute tooltip. - is_label_horizontal(bool): UI specific argument. Specify if label is + key (str): Under which key will be attribute value stored. + default (Any): Default value of an attribute. + label (str): Attribute label. + tooltip (str): Attribute tooltip. + is_label_horizontal (bool): UI specific argument. Specify if label is next to value input or ahead. + hidden (bool): Will be item hidden (for UI purposes). + disabled (bool): Item will be visible but disabled (for UI purposes). """ + + type_attributes = [] + is_value_def = True def __init__( - self, key, default, label=None, tooltip=None, is_label_horizontal=None + self, + key, + default, + label=None, + tooltip=None, + is_label_horizontal=None, + hidden=False, + disabled=False ): if is_label_horizontal is None: is_label_horizontal = True + + if hidden is None: + hidden = False + self.key = key self.label = label self.tooltip = tooltip self.default = default self.is_label_horizontal = is_label_horizontal - self._id = uuid.uuid4() + self.hidden = hidden + self.disabled = disabled + self._id = uuid.uuid4().hex - self.__init__class__ = AbtractAttrDef + self.__init__class__ = AbstractAttrDef @property def id(self): @@ -64,7 +154,25 @@ class AbtractAttrDef: def __eq__(self, other): if not isinstance(other, self.__class__): return False - return self.key == other.key + return ( + self.key == other.key + and self.hidden == other.hidden + and self.default == other.default + and self.disabled == other.disabled + ) + + def __ne__(self, other): + return not self.__eq__(other) + + @abstractproperty + def type(self): + """Attribute definition type also used as identifier of class. + + Returns: + str: Type of attribute definition. + """ + + pass @abstractmethod def convert_value(self, value): @@ -73,14 +181,46 @@ class AbtractAttrDef: Convert passed value to a valid type. Use default if value can't be converted. """ + pass + def serialize(self): + """Serialize object to data so it's possible to recreate it. + + Returns: + Dict[str, Any]: Serialized object that can be passed to + 'deserialize' method. + """ + + data = { + "type": self.type, + "key": self.key, + "label": self.label, + "tooltip": self.tooltip, + "default": self.default, + "is_label_horizontal": self.is_label_horizontal, + "hidden": self.hidden, + "disabled": self.disabled + } + for attr in self.type_attributes: + data[attr] = getattr(self, attr) + return data + + @classmethod + def deserialize(cls, data): + """Recreate object from data. + + Data can be received using 'serialize' method. + """ + + return cls(**data) + # ----------------------------------------- # UI attribute definitoins won't hold value # ----------------------------------------- -class UIDef(AbtractAttrDef): +class UIDef(AbstractAttrDef): is_value_def = False def __init__(self, key=None, default=None, *args, **kwargs): @@ -91,10 +231,12 @@ class UIDef(AbtractAttrDef): class UISeparatorDef(UIDef): - pass + type = "separator" class UILabelDef(UIDef): + type = "label" + def __init__(self, label): super(UILabelDef, self).__init__(label=label) @@ -103,12 +245,15 @@ class UILabelDef(UIDef): # Attribute defintioins should hold value # --------------------------------------- -class UnknownDef(AbtractAttrDef): +class UnknownDef(AbstractAttrDef): """Definition is not known because definition is not available. This attribute can be used to keep existing data unchanged but does not have known definition of type. """ + + type = "unknown" + def __init__(self, key, default=None, **kwargs): kwargs["default"] = default super(UnknownDef, self).__init__(key, **kwargs) @@ -117,7 +262,27 @@ class UnknownDef(AbtractAttrDef): return value -class NumberDef(AbtractAttrDef): +class HiddenDef(AbstractAttrDef): + """Hidden value of Any type. + + This attribute can be used for UI purposes to pass values related + to other attributes (e.g. in multi-page UIs). + + Keep in mind the value should be possible to parse by json parser. + """ + + type = "hidden" + + def __init__(self, key, default=None, **kwargs): + kwargs["default"] = default + kwargs["hidden"] = True + super(UnknownDef, self).__init__(key, **kwargs) + + def convert_value(self, value): + return value + + +class NumberDef(AbstractAttrDef): """Number definition. Number can have defined minimum/maximum value and decimal points. Value @@ -130,6 +295,13 @@ class NumberDef(AbtractAttrDef): default(int, float): Default value for conversion. """ + type = "number" + type_attributes = [ + "minimum", + "maximum", + "decimals" + ] + def __init__( self, key, minimum=None, maximum=None, decimals=None, default=None, **kwargs @@ -186,7 +358,7 @@ class NumberDef(AbtractAttrDef): return round(float(value), self.decimals) -class TextDef(AbtractAttrDef): +class TextDef(AbstractAttrDef): """Text definition. Text can have multiline option so endline characters are allowed regex @@ -200,6 +372,13 @@ class TextDef(AbtractAttrDef): placeholder(str): UI placeholder for attribute. default(str, None): Default value. Empty string used when not defined. """ + + type = "text" + type_attributes = [ + "multiline", + "placeholder", + ] + def __init__( self, key, multiline=None, regex=None, placeholder=None, default=None, **kwargs @@ -238,17 +417,23 @@ class TextDef(AbtractAttrDef): return value return self.default + def serialize(self): + data = super(TextDef, self).serialize() + data["regex"] = self.regex.pattern + return data -class EnumDef(AbtractAttrDef): + +class EnumDef(AbstractAttrDef): """Enumeration of single item from items. Args: - items: Items definition that can be coverted to - `collections.OrderedDict`. Dictionary represent {value: label} - relation. + items: Items definition that can be coverted using + 'prepare_enum_items'. default: Default value. Must be one key(value) from passed items. """ + type = "enum" + def __init__(self, key, items, default=None, **kwargs): if not items: raise ValueError(( @@ -256,41 +441,106 @@ class EnumDef(AbtractAttrDef): " defined values on initialization." ).format(self.__class__.__name__)) - items = collections.OrderedDict(items) - if default not in items: - for _key in items.keys(): - default = _key + items = self.prepare_enum_items(items) + item_values = [item["value"] for item in items] + if default not in item_values: + for value in item_values: + default = value break super(EnumDef, self).__init__(key, default=default, **kwargs) self.items = items + self._item_values = set(item_values) def __eq__(self, other): if not super(EnumDef, self).__eq__(other): return False - if set(self.items.keys()) != set(other.items.keys()): - return False - - for key, label in self.items.items(): - if other.items[key] != label: - return False - return True + return self.items == other.items def convert_value(self, value): - if value in self.items: + if value in self._item_values: return value return self.default + def serialize(self): + data = super(EnumDef, self).serialize() + data["items"] = copy.deepcopy(self.items) + return data -class BoolDef(AbtractAttrDef): + @staticmethod + def prepare_enum_items(items): + """Convert items to unified structure. + + Output is a list where each item is dictionary with 'value' + and 'label'. + + ```python + # Example output + [ + {"label": "Option 1", "value": 1}, + {"label": "Option 2", "value": 2}, + {"label": "Option 3", "value": 3} + ] + ``` + + Args: + items (Union[Dict[str, Any], List[Any], List[Dict[str, Any]]): The + items to convert. + + Returns: + List[Dict[str, Any]]: Unified structure of items. + """ + + output = [] + if isinstance(items, dict): + for value, label in items.items(): + output.append({"label": label, "value": value}) + + elif isinstance(items, (tuple, list, set)): + for item in items: + if isinstance(item, dict): + # Validate if 'value' is available + if "value" not in item: + raise KeyError("Item does not contain 'value' key.") + + if "label" not in item: + item["label"] = str(item["value"]) + elif isinstance(item, (list, tuple)): + if len(item) == 2: + value, label = item + elif len(item) == 1: + value = item[0] + label = str(value) + else: + raise ValueError(( + "Invalid items count {}." + " Expected 1 or 2. Value: {}" + ).format(len(item), str(item))) + + item = {"label": label, "value": value} + else: + item = {"label": str(item), "value": item} + output.append(item) + + else: + raise TypeError( + "Unknown type for enum items '{}'".format(type(items)) + ) + + return output + + +class BoolDef(AbstractAttrDef): """Boolean representation. Args: default(bool): Default value. Set to `False` if not defined. """ + type = "bool" + def __init__(self, key, default=None, **kwargs): if default is None: default = False @@ -302,57 +552,296 @@ class BoolDef(AbtractAttrDef): return self.default -class FileDef(AbtractAttrDef): +class FileDefItem(object): + def __init__( + self, directory, filenames, frames=None, template=None + ): + self.directory = directory + + self.filenames = [] + self.is_sequence = False + self.template = None + self.frames = [] + self.is_empty = True + + self.set_filenames(filenames, frames, template) + + def __str__(self): + return json.dumps(self.to_dict()) + + def __repr__(self): + if self.is_empty: + filename = "< empty >" + elif self.is_sequence: + filename = self.template + else: + filename = self.filenames[0] + + return "<{}: \"{}\">".format( + self.__class__.__name__, + os.path.join(self.directory, filename) + ) + + @property + def label(self): + if self.is_empty: + return None + + if not self.is_sequence: + return self.filenames[0] + + frame_start = self.frames[0] + filename_template = os.path.basename(self.template) + if len(self.frames) == 1: + return "{} [{}]".format(filename_template, frame_start) + + frame_end = self.frames[-1] + expected_len = (frame_end - frame_start) + 1 + if expected_len == len(self.frames): + return "{} [{}-{}]".format( + filename_template, frame_start, frame_end + ) + + ranges = [] + _frame_start = None + _frame_end = None + for frame in range(frame_start, frame_end + 1): + if frame not in self.frames: + add_to_ranges = _frame_start is not None + elif _frame_start is None: + _frame_start = _frame_end = frame + add_to_ranges = frame == frame_end + else: + _frame_end = frame + add_to_ranges = frame == frame_end + + if add_to_ranges: + if _frame_start != _frame_end: + _range = "{}-{}".format(_frame_start, _frame_end) + else: + _range = str(_frame_start) + ranges.append(_range) + _frame_start = _frame_end = None + return "{} [{}]".format( + filename_template, ",".join(ranges) + ) + + def split_sequence(self): + if not self.is_sequence: + raise ValueError("Cannot split single file item") + + paths = [ + os.path.join(self.directory, filename) + for filename in self.filenames + ] + return self.from_paths(paths, False) + + @property + def ext(self): + if self.is_empty: + return None + _, ext = os.path.splitext(self.filenames[0]) + if ext: + return ext + return None + + @property + def lower_ext(self): + ext = self.ext + if ext is not None: + return ext.lower() + return ext + + @property + def is_dir(self): + if self.is_empty: + return False + + # QUESTION a better way how to define folder (in init argument?) + if self.ext: + return False + return True + + def set_directory(self, directory): + self.directory = directory + + def set_filenames(self, filenames, frames=None, template=None): + if frames is None: + frames = [] + is_sequence = False + if frames: + is_sequence = True + + if is_sequence and not template: + raise ValueError("Missing template for sequence") + + self.is_empty = len(filenames) == 0 + self.filenames = filenames + self.template = template + self.frames = frames + self.is_sequence = is_sequence + + @classmethod + def create_empty_item(cls): + return cls("", "") + + @classmethod + def from_value(cls, value, allow_sequences): + """Convert passed value to FileDefItem objects. + + Returns: + list: Created FileDefItem objects. + """ + + # Convert single item to iterable + if not isinstance(value, (list, tuple, set)): + value = [value] + + output = [] + str_filepaths = [] + for item in value: + if isinstance(item, dict): + item = cls.from_dict(item) + + if isinstance(item, FileDefItem): + if not allow_sequences and item.is_sequence: + output.extend(item.split_sequence()) + else: + output.append(item) + + elif isinstance(item, six.string_types): + str_filepaths.append(item) + else: + raise TypeError( + "Unknown type \"{}\". Can't convert to {}".format( + str(type(item)), cls.__name__ + ) + ) + + if str_filepaths: + output.extend(cls.from_paths(str_filepaths, allow_sequences)) + + return output + + @classmethod + def from_dict(cls, data): + return cls( + data["directory"], + data["filenames"], + data.get("frames"), + data.get("template") + ) + + @classmethod + def from_paths(cls, paths, allow_sequences): + filenames_by_dir = collections.defaultdict(list) + for path in paths: + normalized = os.path.normpath(path) + directory, filename = os.path.split(normalized) + filenames_by_dir[directory].append(filename) + + output = [] + for directory, filenames in filenames_by_dir.items(): + if allow_sequences: + cols, remainders = clique.assemble(filenames) + else: + cols = [] + remainders = filenames + + for remainder in remainders: + output.append(cls(directory, [remainder])) + + for col in cols: + frames = list(col.indexes) + paths = [filename for filename in col] + template = col.format("{head}{padding}{tail}") + + output.append(cls( + directory, paths, frames, template + )) + + return output + + def to_dict(self): + output = { + "is_sequence": self.is_sequence, + "directory": self.directory, + "filenames": list(self.filenames), + } + if self.is_sequence: + output.update({ + "template": self.template, + "frames": list(sorted(self.frames)), + }) + + return output + + +class FileDef(AbstractAttrDef): """File definition. It is possible to define filters of allowed file extensions and if supports folders. Args: - multipath(bool): Allow multiple path. + single_item(bool): Allow only single path item. folders(bool): Allow folder paths. - extensions(list): Allow files with extensions. Empty list will + extensions(List[str]): Allow files with extensions. Empty list will allow all extensions and None will disable files completely. - default(str, list): Defautl value. + extensions_label(str): Custom label shown instead of extensions in UI. + default(str, List[str]): Default value. """ + type = "path" + type_attributes = [ + "single_item", + "folders", + "extensions", + "allow_sequences", + "extensions_label", + ] + def __init__( - self, key, multipath=False, folders=None, extensions=None, - default=None, **kwargs + self, key, single_item=True, folders=None, extensions=None, + allow_sequences=True, extensions_label=None, default=None, **kwargs ): if folders is None and extensions is None: folders = True extensions = [] if default is None: - if multipath: - default = [] + if single_item: + default = FileDefItem.create_empty_item().to_dict() else: - default = "" + default = [] else: - if multipath: + if single_item: + if isinstance(default, dict): + FileDefItem.from_dict(default) + + elif isinstance(default, six.string_types): + default = FileDefItem.from_paths([default.strip()])[0] + + else: + raise TypeError(( + "'default' argument must be 'str' or 'dict' not '{}'" + ).format(type(default))) + + else: if not isinstance(default, (tuple, list, set)): raise TypeError(( "'default' argument must be 'list', 'tuple' or 'set'" ", not '{}'" ).format(type(default))) - else: - if not isinstance(default, six.string_types): - raise TypeError(( - "'default' argument must be 'str' not '{}'" - ).format(type(default))) - default = default.strip() - # Change horizontal label is_label_horizontal = kwargs.get("is_label_horizontal") if is_label_horizontal is None: - is_label_horizontal = True - if multipath: - is_label_horizontal = False - kwargs["is_label_horizontal"] = is_label_horizontal + kwargs["is_label_horizontal"] = False - self.multipath = multipath + self.single_item = single_item self.folders = folders - self.extensions = extensions + self.extensions = set(extensions) + self.allow_sequences = allow_sequences + self.extensions_label = extensions_label super(FileDef, self).__init__(key, default=default, **kwargs) def __eq__(self, other): @@ -360,30 +849,111 @@ class FileDef(AbtractAttrDef): return False return ( - self.multipath == other.multipath + self.single_item == other.single_item and self.folders == other.folders and self.extensions == other.extensions + and self.allow_sequences == other.allow_sequences ) def convert_value(self, value): - if isinstance(value, six.string_types): - if self.multipath: - value = [value.strip()] - else: - value = value.strip() - return value + if isinstance(value, six.string_types) or isinstance(value, dict): + value = [value] if isinstance(value, (tuple, list, set)): - _value = [] + string_paths = [] + dict_items = [] for item in value: if isinstance(item, six.string_types): - _value.append(item.strip()) + string_paths.append(item.strip()) + elif isinstance(item, dict): + try: + FileDefItem.from_dict(item) + dict_items.append(item) + except (ValueError, KeyError): + pass - if self.multipath: - return _value + if string_paths: + file_items = FileDefItem.from_paths(string_paths) + dict_items.extend([ + file_item.to_dict() + for file_item in file_items + ]) - if not _value: + if not self.single_item: + return dict_items + + if not dict_items: return self.default - return _value[0].strip() + return dict_items[0] - return str(value).strip() + if self.single_item: + return FileDefItem.create_empty_item().to_dict() + return [] + + +def serialize_attr_def(attr_def): + """Serialize attribute definition to data. + + Args: + attr_def (AbstractAttrDef): Attribute definition to serialize. + + Returns: + Dict[str, Any]: Serialized data. + """ + + return attr_def.serialize() + + +def serialize_attr_defs(attr_defs): + """Serialize attribute definitions to data. + + Args: + attr_defs (List[AbstractAttrDef]): Attribute definitions to serialize. + + Returns: + List[Dict[str, Any]]: Serialized data. + """ + + return [ + serialize_attr_def(attr_def) + for attr_def in attr_defs + ] + + +def deserialize_attr_def(attr_def_data): + """Deserialize attribute definition from data. + + Args: + attr_def (Dict[str, Any]): Attribute definition data to deserialize. + """ + + attr_type = attr_def_data.pop("type") + cls = _attr_defs_by_type[attr_type] + return cls.deserialize(attr_def_data) + + +def deserialize_attr_defs(attr_defs_data): + """Deserialize attribute definitions. + + Args: + List[Dict[str, Any]]: List of attribute definitions. + """ + + return [ + deserialize_attr_def(attr_def_data) + for attr_def_data in attr_defs_data + ] + + +# Register attribute definitions +for _attr_class in ( + UISeparatorDef, + UILabelDef, + UnknownDef, + NumberDef, + TextDef, + EnumDef, + BoolDef, + FileDef +): + register_attr_def_class(_attr_class) diff --git a/openpype/lib/avalon_context.py b/openpype/lib/avalon_context.py index b4e6abb72d..a9ae27cb79 100644 --- a/openpype/lib/avalon_context.py +++ b/openpype/lib/avalon_context.py @@ -1,43 +1,84 @@ """Should be used only inside of hosts.""" -import os -import json -import re -import copy + import platform import logging -import collections import functools -import getpass +import warnings -from bson.objectid import ObjectId +import six -from openpype.settings import ( - get_project_settings, - get_system_settings +from openpype.client import ( + get_project, + get_asset_by_name, +) +from openpype.client.operations import ( + CURRENT_ASSET_DOC_SCHEMA, + CURRENT_PROJECT_SCHEMA, + CURRENT_PROJECT_CONFIG_SCHEMA, ) -from .anatomy import Anatomy from .profiles_filtering import filter_profiles -from .events import emit_event from .path_templates import StringTemplate -# avalon module is not imported at the top -# - may not be in path at the time of pype.lib initialization -avalon = None +legacy_io = None log = logging.getLogger("AvalonContext") +# Backwards compatibility - should not be used anymore +# - Will be removed in OP 3.16.* CURRENT_DOC_SCHEMAS = { - "project": "openpype:project-3.0", - "asset": "openpype:asset-3.0", - "config": "openpype:config-2.0" + "project": CURRENT_PROJECT_SCHEMA, + "asset": CURRENT_ASSET_DOC_SCHEMA, + "config": CURRENT_PROJECT_CONFIG_SCHEMA } -PROJECT_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_" -PROJECT_NAME_REGEX = re.compile( - "^[{}]+$".format(PROJECT_NAME_ALLOWED_SYMBOLS) -) +class AvalonContextDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", AvalonContextDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=AvalonContextDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +@deprecated("openpype.client.operations.create_project") def create_project( project_name, project_code, library_project=False, dbcon=None ): @@ -61,222 +102,27 @@ def create_project( Returns: dict: Created project document. + + Deprecated: + Function will be removed after release version 3.16.* """ - from openpype.settings import ProjectSettings, SaveWarningExc - from avalon.api import AvalonMongoDB - from avalon.schema import validate + from openpype.client.operations import create_project - if dbcon is None: - dbcon = AvalonMongoDB() - - if not PROJECT_NAME_REGEX.match(project_name): - raise ValueError(( - "Project name \"{}\" contain invalid characters" - ).format(project_name)) - - database = dbcon.database - project_doc = database[project_name].find_one( - {"type": "project"}, - {"name": 1} - ) - if project_doc: - raise ValueError("Project with name \"{}\" already exists".format( - project_name - )) - - project_doc = { - "type": "project", - "name": project_name, - "data": { - "code": project_code, - "library_project": library_project - }, - "schema": CURRENT_DOC_SCHEMAS["project"] - } - # Insert document with basic data - database[project_name].insert_one(project_doc) - # Load ProjectSettings for the project and save it to store all attributes - # and Anatomy - try: - project_settings_entity = ProjectSettings(project_name) - project_settings_entity.save() - except SaveWarningExc as exc: - print(str(exc)) - except Exception: - database[project_name].delete_one({"type": "project"}) - raise - - project_doc = database[project_name].find_one({"type": "project"}) - - try: - # Validate created project document - validate(project_doc) - except Exception: - # Remove project if is not valid - database[project_name].delete_one({"type": "project"}) - raise - - return project_doc + return create_project(project_name, project_code, library_project) -def with_avalon(func): +def with_pipeline_io(func): @functools.wraps(func) - def wrap_avalon(*args, **kwargs): - global avalon - if avalon is None: - import avalon + def wrapped(*args, **kwargs): + global legacy_io + if legacy_io is None: + from openpype.pipeline import legacy_io return func(*args, **kwargs) - return wrap_avalon - - -@with_avalon -def is_latest(representation): - """Return whether the representation is from latest version - - Args: - representation (dict): The representation document from the database. - - Returns: - bool: Whether the representation is of latest version. - - """ - - version = avalon.io.find_one({"_id": representation['parent']}) - if version["type"] == "hero_version": - return True - - # Get highest version under the parent - highest_version = avalon.io.find_one({ - "type": "version", - "parent": version["parent"] - }, sort=[("name", -1)], projection={"name": True}) - - if version['name'] == highest_version['name']: - return True - else: - return False - - -@with_avalon -def any_outdated(): - """Return whether the current scene has any outdated content""" - - checked = set() - host = avalon.api.registered_host() - for container in host.ls(): - representation = container['representation'] - if representation in checked: - continue - - representation_doc = avalon.io.find_one( - { - "_id": ObjectId(representation), - "type": "representation" - }, - projection={"parent": True} - ) - if representation_doc and not is_latest(representation_doc): - return True - elif not representation_doc: - log.debug("Container '{objectName}' has an invalid " - "representation, it is missing in the " - "database".format(**container)) - - checked.add(representation) - - return False - - -@with_avalon -def get_asset(asset_name=None): - """ Returning asset document from database by its name. - - Doesn't count with duplicities on asset names! - - Args: - asset_name (str) - - Returns: - (MongoDB document) - """ - if not asset_name: - asset_name = avalon.api.Session["AVALON_ASSET"] - - asset_document = avalon.io.find_one({ - "name": asset_name, - "type": "asset" - }) - - if not asset_document: - raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) - - return asset_document - - -@with_avalon -def get_hierarchy(asset_name=None): - """ - Obtain asset hierarchy path string from mongo db - - Args: - asset_name (str) - - Returns: - (string): asset hierarchy path - - """ - if not asset_name: - asset_name = avalon.io.Session.get( - "AVALON_ASSET", - os.environ["AVALON_ASSET"] - ) - - asset_entity = avalon.io.find_one({ - "type": 'asset', - "name": asset_name - }) - - not_set = "PARENTS_NOT_SET" - entity_parents = asset_entity.get("data", {}).get("parents", not_set) - - # If entity already have parents then just return joined - if entity_parents != not_set: - return "/".join(entity_parents) - - # Else query parents through visualParents and store result to entity - hierarchy_items = [] - entity = asset_entity - while True: - parent_id = entity.get("data", {}).get("visualParent") - if not parent_id: - break - entity = avalon.io.find_one({"_id": parent_id}) - hierarchy_items.append(entity["name"]) - - # Add parents to entity data for next query - entity_data = asset_entity.get("data", {}) - entity_data["parents"] = hierarchy_items - avalon.io.update_many( - {"_id": asset_entity["_id"]}, - {"$set": {"data": entity_data}} - ) - - return "/".join(hierarchy_items) - - -def get_system_general_anatomy_data(): - system_settings = get_system_settings() - studio_name = system_settings["general"]["studio_name"] - studio_code = system_settings["general"]["studio_code"] - return { - "studio": { - "name": studio_name, - "code": studio_code - } - } + return wrapped +@deprecated("openpype.client.get_linked_asset_ids") def get_linked_asset_ids(asset_doc): """Return linked asset ids for `asset_doc` from DB @@ -285,115 +131,21 @@ def get_linked_asset_ids(asset_doc): Returns: (list): MongoDB ids of input links. - """ - output = [] - if not asset_doc: - return output - input_links = asset_doc["data"].get("inputLinks") or [] - if input_links: - for item in input_links: - # Backwards compatibility for "_id" key which was replaced with - # "id" - if "_id" in item: - link_id = item["_id"] - else: - link_id = item["id"] - output.append(link_id) - - return output - - -@with_avalon -def get_linked_assets(asset_doc): - """Return linked assets for `asset_doc` from DB - - Args: - asset_doc (dict): Asset document from DB - - Returns: - (list) Asset documents of input links for passed asset doc. - """ - link_ids = get_linked_asset_ids(asset_doc) - if not link_ids: - return [] - - return list(avalon.io.find({"_id": {"$in": link_ids}})) - - -@with_avalon -def get_latest_version(asset_name, subset_name, dbcon=None, project_name=None): - """Retrieve latest version from `asset_name`, and `subset_name`. - - Do not use if you want to query more than 5 latest versions as this method - query 3 times to mongo for each call. For those cases is better to use - more efficient way, e.g. with help of aggregations. - - Args: - asset_name (str): Name of asset. - subset_name (str): Name of subset. - dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection - with Session. - project_name (str, optional): Find latest version in specific project. - - Returns: - None: If asset, subset or version were not found. - dict: Last version document for entered . + Deprecated: + Function will be removed after release version 3.16.* """ - if not dbcon: - log.debug("Using `avalon.io` for query.") - dbcon = avalon.io - # Make sure is installed - dbcon.install() + from openpype.client import get_linked_asset_ids + from openpype.pipeline import legacy_io - if project_name and project_name != dbcon.Session.get("AVALON_PROJECT"): - # `avalon.io` has only `_database` attribute - # but `AvalonMongoDB` has `database` - database = getattr(dbcon, "database", dbcon._database) - collection = database[project_name] - else: - project_name = dbcon.Session.get("AVALON_PROJECT") - collection = dbcon + project_name = legacy_io.active_project() - log.debug(( - "Getting latest version for Project: \"{}\" Asset: \"{}\"" - " and Subset: \"{}\"" - ).format(project_name, asset_name, subset_name)) - - # Query asset document id by asset name - asset_doc = collection.find_one( - {"type": "asset", "name": asset_name}, - {"_id": True} - ) - if not asset_doc: - log.info( - "Asset \"{}\" was not found in Database.".format(asset_name) - ) - return None - - subset_doc = collection.find_one( - {"type": "subset", "name": subset_name, "parent": asset_doc["_id"]}, - {"_id": True} - ) - if not subset_doc: - log.info( - "Subset \"{}\" was not found in Database.".format(subset_name) - ) - return None - - version_doc = collection.find_one( - {"type": "version", "parent": subset_doc["_id"]}, - sort=[("name", -1)], - ) - if not version_doc: - log.info( - "Subset \"{}\" does not have any version yet.".format(subset_name) - ) - return None - return version_doc + return get_linked_asset_ids(project_name, asset_doc=asset_doc) +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key_from_context") def get_workfile_template_key_from_context( asset_name, task_name, host_name, project_name=None, dbcon=None, project_settings=None @@ -421,39 +173,30 @@ def get_workfile_template_key_from_context( Raises: ValueError: When both 'dbcon' and 'project_name' were not passed. + + Deprecated: + Function will be removed after release version 3.16.* """ - if not dbcon: - if not project_name: + + from openpype.pipeline.workfile import ( + get_workfile_template_key_from_context + ) + + if not project_name: + if not dbcon: raise ValueError(( "`get_workfile_template_key_from_context` requires to pass" " one of 'dbcon' or 'project_name' arguments." )) - from avalon.api import AvalonMongoDB + project_name = dbcon.active_project() - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name - - elif not project_name: - project_name = dbcon.Session["AVALON_PROJECT"] - - asset_doc = dbcon.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": 1 - } - ) - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - - return get_workfile_template_key( - task_type, host_name, project_name, project_settings + return get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings ) +@deprecated( + "openpype.pipeline.workfile.get_workfile_template_key") def get_workfile_template_key( task_type, host_name, project_name=None, project_settings=None ): @@ -476,204 +219,19 @@ def get_workfile_template_key( Raises: ValueError: When both 'project_name' and 'project_settings' were not passed. + + Deprecated: + Function will be removed after release version 3.16.* """ - default = "work" - if not task_type or not host_name: - return default - if not project_settings: - if not project_name: - raise ValueError(( - "`get_workfile_template_key` requires to pass" - " one of 'project_name' or 'project_settings' arguments." - )) - project_settings = get_project_settings(project_name) + from openpype.pipeline.workfile import get_workfile_template_key - try: - profiles = ( - project_settings - ["global"] - ["tools"] - ["Workfiles"] - ["workfile_template_profiles"] - ) - except Exception: - profiles = [] - - if not profiles: - return default - - profile_filter = { - "task_types": task_type, - "hosts": host_name - } - profile = filter_profiles(profiles, profile_filter) - if profile: - return profile["workfile_template"] or default - return default - - -# TODO rename function as is not just "work" specific -def get_workdir_data(project_doc, asset_doc, task_name, host_name): - """Prepare data for workdir template filling from entered information. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. - - Returns: - dict: Data prepared for filling workdir template. - """ - task_type = asset_doc['data']['tasks'].get(task_name, {}).get('type') - - project_task_types = project_doc["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") - - asset_parents = asset_doc["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_doc["name"] - if asset_parents: - parent_name = asset_parents[-1] - - data = { - "project": { - "name": project_doc["name"], - "code": project_doc["data"].get("code") - }, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - }, - "asset": asset_doc["name"], - "parent": parent_name, - "app": host_name, - "user": getpass.getuser(), - "hierarchy": hierarchy, - } - - system_general_data = get_system_general_anatomy_data() - data.update(system_general_data) - - return data - - -def get_workdir_with_workdir_data( - workdir_data, anatomy=None, project_name=None, template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - It is possible to pass only project's name instead of project's anatomy but - one of them **must** be entered. It is preferred to enter anatomy if is - available as initialization of a new Anatomy object may be time consuming. - - Args: - workdir_data (dict): Data to fill workdir template. - anatomy (Anatomy): Anatomy object for specific project. Optional if - `project_name` is entered. - project_name (str): Project's name. Optional if `anatomy` is entered - otherwise Anatomy object is created with using the project name. - template_key (str): Key of work templates in anatomy templates. If not - passed `get_workfile_template_key_from_context` is used to get it. - dbcon(AvalonMongoDB): Mongo connection. Required only if 'template_key' - and 'project_name' are not passed. - - Returns: - TemplateResult: Workdir path. - - Raises: - ValueError: When both `anatomy` and `project_name` are set to None. - """ - if not anatomy and not project_name: - raise ValueError(( - "Missing required arguments one of `project_name` or `anatomy`" - " must be entered." - )) - - if not anatomy: - anatomy = Anatomy(project_name) - - if not template_key: - template_key = get_workfile_template_key( - workdir_data["task"]["type"], - workdir_data["app"], - project_name=workdir_data["project"]["name"] - ) - - anatomy_filled = anatomy.format(workdir_data) - # Output is TemplateResult object which contain useful data - return anatomy_filled[template_key]["folder"] - - -def get_workdir( - project_doc, - asset_doc, - task_name, - host_name, - anatomy=None, - template_key=None -): - """Fill workdir path from entered data and project's anatomy. - - Args: - project_doc (dict): Mongo document of project from MongoDB. - asset_doc (dict): Mongo document of asset from MongoDB. - task_name (str): Task name for which are workdir data preapred. - host_name (str): Host which is used to workdir. This is required - because workdir template may contain `{app}` key. In `Session` - is stored under `AVALON_APP` key. - anatomy (Anatomy): Optional argument. Anatomy object is created using - project name from `project_doc`. It is preferred to pass this - argument as initialization of a new Anatomy object may be time - consuming. - template_key (str): Key of work templates in anatomy templates. Default - value is defined in `get_workdir_with_workdir_data`. - - Returns: - TemplateResult: Workdir path. - """ - if not anatomy: - anatomy = Anatomy(project_doc["name"]) - - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, host_name - ) - # Output is TemplateResult object which contain useful data - return get_workdir_with_workdir_data( - workdir_data, anatomy, template_key=template_key + return get_workfile_template_key( + task_type, host_name, project_name, project_settings ) -def template_data_from_session(session=None): - """ Return dictionary with template from session keys. - - Args: - session (dict, Optional): The Session to use. If not provided use the - currently active global Session. - Returns: - dict: All available data from session. - """ - from avalon import io - import avalon.api - - if session is None: - session = avalon.api.Session - - project_name = session["AVALON_PROJECT"] - project_doc = io._database[project_name].find_one({"type": "project"}) - asset_doc = io._database[project_name].find_one({ - "type": "asset", - "name": session["AVALON_ASSET"] - }) - task_name = session["AVALON_TASK"] - host_name = session["AVALON_APP"] - return get_workdir_data(project_doc, asset_doc, task_name, host_name) - - +@deprecated("openpype.pipeline.context_tools.compute_session_changes") def compute_session_changes( session, task=None, asset=None, app=None, template_key=None ): @@ -695,79 +253,48 @@ def compute_session_changes( Returns: dict: The required changes in the Session dictionary. + Deprecated: + Function will be removed after release version 3.16.* """ - changes = dict() - # If no changes, return directly - if not any([task, asset, app]): - return changes + from openpype.pipeline import legacy_io + from openpype.pipeline.context_tools import compute_session_changes - # Get asset document and asset - asset_document = None - asset_tasks = None - if isinstance(asset, dict): - # Assume asset database document - asset_document = asset - asset_tasks = asset_document.get("data", {}).get("tasks") - asset = asset["name"] + if isinstance(asset, six.string_types): + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset) - if not asset_document or not asset_tasks: - from avalon import io - - # Assume asset name - asset_document = io.find_one( - { - "name": asset, - "type": "asset" - }, - {"data.tasks": True} - ) - assert asset_document, "Asset must exist" - - # Detect any changes compared session - mapping = { - "AVALON_ASSET": asset, - "AVALON_TASK": task, - "AVALON_APP": app, - } - changes = { - key: value - for key, value in mapping.items() - if value and value != session.get(key) - } - if not changes: - return changes - - # Compute work directory (with the temporary changed session so far) - _session = session.copy() - _session.update(changes) - - changes["AVALON_WORKDIR"] = get_workdir_from_session(_session) - - return changes + return compute_session_changes( + session, + asset, + task, + template_key + ) +@deprecated("openpype.pipeline.context_tools.get_workdir_from_session") def get_workdir_from_session(session=None, template_key=None): - import avalon.api + """Calculate workdir path based on session data. - if session is None: - session = avalon.api.Session - project_name = session["AVALON_PROJECT"] - host_name = session["AVALON_APP"] - anatomy = Anatomy(project_name) - template_data = template_data_from_session(session) - anatomy_filled = anatomy.format(template_data) + Args: + session (Union[None, Dict[str, str]]): Session to use. If not passed + current context session is used (from legacy_io). + template_key (Union[str, None]): Precalculate template key to define + workfile template name in Anatomy. - if not template_key: - task_type = template_data["task"]["type"] - template_key = get_workfile_template_key( - task_type, - host_name, - project_name=project_name - ) - return anatomy_filled[template_key]["folder"] + Returns: + str: Workdir path. + + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.context_tools import get_workdir_from_session + + return get_workdir_from_session(session, template_key) +@deprecated("openpype.pipeline.context_tools.change_current_context") def update_current_task(task=None, asset=None, app=None, template_key=None): """Update active Session to a new task work area. @@ -781,818 +308,33 @@ def update_current_task(task=None, asset=None, app=None, template_key=None): Returns: dict: The changed key, values in the current Session. - """ - import avalon.api - - changes = compute_session_changes( - avalon.api.Session, - task=task, - asset=asset, - app=app, - template_key=template_key - ) - - # Update the Session and environments. Pop from environments all keys with - # value set to None. - for key, value in changes.items(): - avalon.api.Session[key] = value - if value is None: - os.environ.pop(key, None) - else: - os.environ[key] = value - - # Emit session change - emit_event("taskChanged", changes.copy()) - - return changes - - -@with_avalon -def get_workfile_doc(asset_id, task_name, filename, dbcon=None): - """Return workfile document for entered context. - - Do not use this method to get more than one document. In that cases use - custom query as this will return documents from database one by one. - - Args: - asset_id (ObjectId): Mongo ID of an asset under which workfile belongs. - task_name (str): Name of task under which the workfile belongs. - filename (str): Name of a workfile. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `avalon.io` is used if not entered. - - Returns: - dict: Workfile document or None. - """ - # Use avalon.io if dbcon is not entered - if not dbcon: - dbcon = avalon.io - - return dbcon.find_one({ - "type": "workfile", - "parent": asset_id, - "task_name": task_name, - "filename": filename - }) - - -@with_avalon -def create_workfile_doc(asset_doc, task_name, filename, workdir, dbcon=None): - """Creates or replace workfile document in mongo. - - Do not use this method to update data. This method will remove all - additional data from existing document. - - Args: - asset_doc (dict): Document of asset under which workfile belongs. - task_name (str): Name of task for which is workfile related to. - filename (str): Filename of workfile. - workdir (str): Path to directory where `filename` is located. - dbcon (AvalonMongoDB): Optionally enter avalon AvalonMongoDB object and - `avalon.io` is used if not entered. - """ - # Use avalon.io if dbcon is not entered - if not dbcon: - dbcon = avalon.io - - # Filter of workfile document - doc_filter = { - "type": "workfile", - "parent": asset_doc["_id"], - "task_name": task_name, - "filename": filename - } - # Document data are copy of filter - doc_data = copy.deepcopy(doc_filter) - - # Prepare project for workdir data - project_doc = dbcon.find_one({"type": "project"}) - workdir_data = get_workdir_data( - project_doc, asset_doc, task_name, dbcon.Session["AVALON_APP"] - ) - # Prepare anatomy - anatomy = Anatomy(project_doc["name"]) - # Get workdir path (result is anatomy.TemplateResult) - template_workdir = get_workdir_with_workdir_data( - workdir_data, anatomy - ) - template_workdir_path = str(template_workdir).replace("\\", "/") - - # Replace slashses in workdir path where workfile is located - mod_workdir = workdir.replace("\\", "/") - - # Replace workdir from templates with rootless workdir - rootles_workdir = mod_workdir.replace( - template_workdir_path, - template_workdir.rootless.replace("\\", "/") - ) - - doc_data["schema"] = "pype:workfile-1.0" - doc_data["files"] = ["/".join([rootles_workdir, filename])] - doc_data["data"] = {} - - dbcon.replace_one( - doc_filter, - doc_data, - upsert=True - ) - - -@with_avalon -def save_workfile_data_to_doc(workfile_doc, data, dbcon=None): - if not workfile_doc: - # TODO add log message - return - - if not data: - return - - # Use avalon.io if dbcon is not entered - if not dbcon: - dbcon = avalon.io - - # Convert data to mongo modification keys/values - # - this is naive implementation which does not expect nested - # dictionaries - set_data = {} - for key, value in data.items(): - new_key = "data.{}".format(key) - set_data[new_key] = value - - # Update workfile document with data - dbcon.update_one( - {"_id": workfile_doc["_id"]}, - {"$set": set_data} - ) - - -class BuildWorkfile: - """Wrapper for build workfile process. - - Load representations for current context by build presets. Build presets - are host related, since each host has it's loaders. + Deprecated: + Function will be removed after release version 3.16.* """ - log = logging.getLogger("BuildWorkfile") + from openpype.pipeline import legacy_io + from openpype.pipeline.context_tools import change_current_context - @staticmethod - def map_subsets_by_family(subsets): - subsets_by_family = collections.defaultdict(list) - for subset in subsets: - family = subset["data"].get("family") - if not family: - families = subset["data"].get("families") - if not families: - continue - family = families[0] + project_name = legacy_io.active_project() + if isinstance(asset, six.string_types): + asset = get_asset_by_name(project_name, asset) - subsets_by_family[family].append(subset) - return subsets_by_family + return change_current_context(asset, task, template_key) - def process(self): - """Main method of this wrapper. - Building of workfile is triggered and is possible to implement - post processing of loaded containers if necessary. - """ - containers = self.build_workfile() +@deprecated("openpype.pipeline.workfile.BuildWorkfile") +def BuildWorkfile(): + """Build workfile class was moved to workfile pipeline. - return containers + Deprecated: + Function will be removed after release version 3.16.* + """ + from openpype.pipeline.workfile import BuildWorkfile - @with_avalon - def build_workfile(self): - """Prepares and load containers into workfile. + return BuildWorkfile() - Loads latest versions of current and linked assets to workfile by logic - stored in Workfile profiles from presets. Profiles are set by host, - filtered by current task name and used by families. - Each family can specify representation names and loaders for - representations and first available and successful loaded - representation is returned as container. - - At the end you'll get list of loaded containers per each asset. - - loaded_containers [{ - "asset_entity": , - "containers": [, , ...] - }, { - "asset_entity": , - "containers": [, ...] - }, { - ... - }] - """ - from openpype.pipeline import discover_loader_plugins - - # Get current asset name and entity - current_asset_name = avalon.io.Session["AVALON_ASSET"] - current_asset_entity = avalon.io.find_one({ - "type": "asset", - "name": current_asset_name - }) - - # Skip if asset was not found - if not current_asset_entity: - print("Asset entity with name `{}` was not found".format( - current_asset_name - )) - return - - # Prepare available loaders - loaders_by_name = {} - for loader in discover_loader_plugins(): - loader_name = loader.__name__ - if loader_name in loaders_by_name: - raise KeyError( - "Duplicated loader name {0}!".format(loader_name) - ) - loaders_by_name[loader_name] = loader - - # Skip if there are any loaders - if not loaders_by_name: - self.log.warning("There are no registered loaders.") - return - - # Get current task name - current_task_name = avalon.io.Session["AVALON_TASK"] - - # Load workfile presets for task - self.build_presets = self.get_build_presets( - current_task_name, current_asset_entity - ) - - # Skip if there are any presets for task - if not self.build_presets: - self.log.warning( - "Current task `{}` does not have any loading preset.".format( - current_task_name - ) - ) - return - - # Get presets for loading current asset - current_context_profiles = self.build_presets.get("current_context") - # Get presets for loading linked assets - link_context_profiles = self.build_presets.get("linked_assets") - # Skip if both are missing - if not current_context_profiles and not link_context_profiles: - self.log.warning( - "Current task `{}` has empty loading preset.".format( - current_task_name - ) - ) - return - - elif not current_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any loading" - " preset for it's context." - ).format(current_task_name)) - - elif not link_context_profiles: - self.log.warning(( - "Current task `{}` doesn't have any" - "loading preset for it's linked assets." - ).format(current_task_name)) - - # Prepare assets to process by workfile presets - assets = [] - current_asset_id = None - if current_context_profiles: - # Add current asset entity if preset has current context set - assets.append(current_asset_entity) - current_asset_id = current_asset_entity["_id"] - - if link_context_profiles: - # Find and append linked assets if preset has set linked mapping - link_assets = get_linked_assets(current_asset_entity) - if link_assets: - assets.extend(link_assets) - - # Skip if there are no assets. This can happen if only linked mapping - # is set and there are no links for his asset. - if not assets: - self.log.warning( - "Asset does not have linked assets. Nothing to process." - ) - return - - # Prepare entities from database for assets - prepared_entities = self._collect_last_version_repres(assets) - - # Load containers by prepared entities and presets - loaded_containers = [] - # - Current asset containers - if current_asset_id and current_asset_id in prepared_entities: - current_context_data = prepared_entities.pop(current_asset_id) - loaded_data = self.load_containers_by_asset_data( - current_context_data, current_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # - Linked assets container - for linked_asset_data in prepared_entities.values(): - loaded_data = self.load_containers_by_asset_data( - linked_asset_data, link_context_profiles, loaders_by_name - ) - if loaded_data: - loaded_containers.append(loaded_data) - - # Return list of loaded containers - return loaded_containers - - @with_avalon - def get_build_presets(self, task_name, asset_doc): - """ Returns presets to build workfile for task name. - - Presets are loaded for current project set in - io.Session["AVALON_PROJECT"], filtered by registered host - and entered task name. - - Args: - task_name (str): Task name used for filtering build presets. - - Returns: - (dict): preset per entered task name - """ - host_name = os.environ["AVALON_APP"] - project_settings = get_project_settings( - avalon.io.Session["AVALON_PROJECT"] - ) - - host_settings = project_settings.get(host_name) or {} - # Get presets for host - wb_settings = host_settings.get("workfile_builder") - if not wb_settings: - # backward compatibility - wb_settings = host_settings.get("workfile_build") or {} - - builder_profiles = wb_settings.get("profiles") - if not builder_profiles: - return None - - task_type = ( - asset_doc - .get("data", {}) - .get("tasks", {}) - .get(task_name, {}) - .get("type") - ) - filter_data = { - "task_types": task_type, - "tasks": task_name - } - return filter_profiles(builder_profiles, filter_data) - - def _filter_build_profiles(self, build_profiles, loaders_by_name): - """ Filter build profiles by loaders and prepare process data. - - Valid profile must have "loaders", "families" and "repre_names" keys - with valid values. - - "loaders" expects list of strings representing possible loaders. - - "families" expects list of strings for filtering - by main subset family. - - "repre_names" expects list of strings for filtering by - representation name. - - Lowered "families" and "repre_names" are prepared for each profile with - all required keys. - - Args: - build_profiles (dict): Profiles for building workfile. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list): Filtered and prepared profiles. - """ - valid_profiles = [] - for profile in build_profiles: - # Check loaders - profile_loaders = profile.get("loaders") - if not profile_loaders: - self.log.warning(( - "Build profile has missing loaders configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check if any loader is available - loaders_match = False - for loader_name in profile_loaders: - if loader_name in loaders_by_name: - loaders_match = True - break - - if not loaders_match: - self.log.warning(( - "All loaders from Build profile are not available: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check families - profile_families = profile.get("families") - if not profile_families: - self.log.warning(( - "Build profile is missing families configuration: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Check representation names - profile_repre_names = profile.get("repre_names") - if not profile_repre_names: - self.log.warning(( - "Build profile is missing" - " representation names filtering: {0}" - ).format(json.dumps(profile, indent=4))) - continue - - # Prepare lowered families and representation names - profile["families_lowered"] = [ - fam.lower() for fam in profile_families - ] - profile["repre_names_lowered"] = [ - name.lower() for name in profile_repre_names - ] - - valid_profiles.append(profile) - - return valid_profiles - - def _prepare_profile_for_subsets(self, subsets, profiles): - """Select profile for each subset by it's data. - - Profiles are filtered for each subset individually. - Profile is filtered by subset's family, optionally by name regex and - representation names set in profile. - It is possible to not find matching profile for subset, in that case - subset is skipped and it is possible that none of subsets have - matching profile. - - Args: - subsets (list): Subset documents. - profiles (dict): Build profiles. - - Returns: - (dict) Profile by subset's id. - """ - # Prepare subsets - subsets_by_family = self.map_subsets_by_family(subsets) - - profiles_per_subset_id = {} - for family, subsets in subsets_by_family.items(): - family_low = family.lower() - for profile in profiles: - # Skip profile if does not contain family - if family_low not in profile["families_lowered"]: - continue - - # Precompile name filters as regexes - profile_regexes = profile.get("subset_name_filters") - if profile_regexes: - _profile_regexes = [] - for regex in profile_regexes: - _profile_regexes.append(re.compile(regex)) - profile_regexes = _profile_regexes - - # TODO prepare regex compilation - for subset in subsets: - # Verify regex filtering (optional) - if profile_regexes: - valid = False - for pattern in profile_regexes: - if re.match(pattern, subset["name"]): - valid = True - break - - if not valid: - continue - - profiles_per_subset_id[subset["_id"]] = profile - - # break profiles loop on finding the first matching profile - break - return profiles_per_subset_id - - def load_containers_by_asset_data( - self, asset_entity_data, build_profiles, loaders_by_name - ): - """Load containers for entered asset entity by Build profiles. - - Args: - asset_entity_data (dict): Prepared data with subsets, last version - and representations for specific asset. - build_profiles (dict): Build profiles. - loaders_by_name (dict): Available loaders per name. - - Returns: - (dict) Output contains asset document and loaded containers. - """ - - # Make sure all data are not empty - if not asset_entity_data or not build_profiles or not loaders_by_name: - return - - asset_entity = asset_entity_data["asset_entity"] - - valid_profiles = self._filter_build_profiles( - build_profiles, loaders_by_name - ) - if not valid_profiles: - self.log.warning( - "There are not valid Workfile profiles. Skipping process." - ) - return - - self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) - - subsets_by_id = {} - version_by_subset_id = {} - repres_by_version_id = {} - for subset_id, in_data in asset_entity_data["subsets"].items(): - subset_entity = in_data["subset_entity"] - subsets_by_id[subset_entity["_id"]] = subset_entity - - version_data = in_data["version"] - version_entity = version_data["version_entity"] - version_by_subset_id[subset_id] = version_entity - repres_by_version_id[version_entity["_id"]] = ( - version_data["repres"] - ) - - if not subsets_by_id: - self.log.warning("There are not subsets for asset {0}".format( - asset_entity["name"] - )) - return - - profiles_per_subset_id = self._prepare_profile_for_subsets( - subsets_by_id.values(), valid_profiles - ) - if not profiles_per_subset_id: - self.log.warning("There are not valid subsets.") - return - - valid_repres_by_subset_id = collections.defaultdict(list) - for subset_id, profile in profiles_per_subset_id.items(): - profile_repre_names = profile["repre_names_lowered"] - - version_entity = version_by_subset_id[subset_id] - version_id = version_entity["_id"] - repres = repres_by_version_id[version_id] - for repre in repres: - repre_name_low = repre["name"].lower() - if repre_name_low in profile_repre_names: - valid_repres_by_subset_id[subset_id].append(repre) - - # DEBUG message - msg = "Valid representations for Asset: `{}`".format( - asset_entity["name"] - ) - for subset_id, repres in valid_repres_by_subset_id.items(): - subset = subsets_by_id[subset_id] - msg += "\n# Subset Name/ID: `{}`/{}".format( - subset["name"], subset_id - ) - for repre in repres: - msg += "\n## Repre name: `{}`".format(repre["name"]) - - self.log.debug(msg) - - containers = self._load_containers( - valid_repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ) - - return { - "asset_entity": asset_entity, - "containers": containers - } - - @with_avalon - def _load_containers( - self, repres_by_subset_id, subsets_by_id, - profiles_per_subset_id, loaders_by_name - ): - """Real load by collected data happens here. - - Loading of representations per subset happens here. Each subset can - loads one representation. Loading is tried in specific order. - Representations are tried to load by names defined in configuration. - If subset has representation matching representation name each loader - is tried to load it until any is successful. If none of them was - successful then next representation name is tried. - Subset process loop ends when any representation is loaded or - all matching representations were already tried. - - Args: - repres_by_subset_id (dict): Available representations mapped - by their parent (subset) id. - subsets_by_id (dict): Subset documents mapped by their id. - profiles_per_subset_id (dict): Build profiles mapped by subset id. - loaders_by_name (dict): Available loaders per name. - - Returns: - (list) Objects of loaded containers. - """ - from openpype.pipeline import ( - IncompatibleLoaderError, - load_container, - ) - - loaded_containers = [] - - # Get subset id order from build presets. - build_presets = self.build_presets.get("current_context", []) - build_presets += self.build_presets.get("linked_assets", []) - subset_ids_ordered = [] - for preset in build_presets: - for preset_family in preset["families"]: - for id, subset in subsets_by_id.items(): - if preset_family not in subset["data"].get("families", []): - continue - - subset_ids_ordered.append(id) - - # Order representations from subsets. - print("repres_by_subset_id", repres_by_subset_id) - representations_ordered = [] - representations = [] - for id in subset_ids_ordered: - for subset_id, repres in repres_by_subset_id.items(): - if repres in representations: - continue - - if id == subset_id: - representations_ordered.append((subset_id, repres)) - representations.append(repres) - - print("representations", representations) - - # Load ordered representations. - for subset_id, repres in representations_ordered: - subset_name = subsets_by_id[subset_id]["name"] - - profile = profiles_per_subset_id[subset_id] - loaders_last_idx = len(profile["loaders"]) - 1 - repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 - - repre_by_low_name = { - repre["name"].lower(): repre for repre in repres - } - - is_loaded = False - for repre_name_idx, profile_repre_name in enumerate( - profile["repre_names_lowered"] - ): - # Break iteration if representation was already loaded - if is_loaded: - break - - repre = repre_by_low_name.get(profile_repre_name) - if not repre: - continue - - for loader_idx, loader_name in enumerate(profile["loaders"]): - if is_loaded: - break - - loader = loaders_by_name.get(loader_name) - if not loader: - continue - try: - container = load_container( - loader, - repre["_id"], - name=subset_name - ) - loaded_containers.append(container) - is_loaded = True - - except Exception as exc: - if exc == IncompatibleLoaderError: - self.log.info(( - "Loader `{}` is not compatible with" - " representation `{}`" - ).format(loader_name, repre["name"])) - - else: - self.log.error( - "Unexpected error happened during loading", - exc_info=True - ) - - msg = "Loading failed." - if loader_idx < loaders_last_idx: - msg += " Trying next loader." - elif repre_name_idx < repre_names_last_idx: - msg += ( - " Loading of subset `{}` was not successful." - ).format(subset_name) - else: - msg += " Trying next representation." - self.log.info(msg) - - return loaded_containers - - @with_avalon - def _collect_last_version_repres(self, asset_entities): - """Collect subsets, versions and representations for asset_entities. - - Args: - asset_entities (list): Asset entities for which want to find data - - Returns: - (dict): collected entities - - Example output: - ``` - { - {Asset ID}: { - "asset_entity": , - "subsets": { - {Subset ID}: { - "subset_entity": , - "version": { - "version_entity": , - "repres": [ - , , ... - ] - } - }, - ... - } - }, - ... - } - output[asset_id]["subsets"][subset_id]["version"]["repres"] - ``` - """ - - if not asset_entities: - return {} - - asset_entity_by_ids = {asset["_id"]: asset for asset in asset_entities} - - subsets = list(avalon.io.find({ - "type": "subset", - "parent": {"$in": asset_entity_by_ids.keys()} - })) - subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} - - sorted_versions = list(avalon.io.find({ - "type": "version", - "parent": {"$in": subset_entity_by_ids.keys()} - }).sort("name", -1)) - - subset_id_with_latest_version = [] - last_versions_by_id = {} - for version in sorted_versions: - subset_id = version["parent"] - if subset_id in subset_id_with_latest_version: - continue - subset_id_with_latest_version.append(subset_id) - last_versions_by_id[version["_id"]] = version - - repres = avalon.io.find({ - "type": "representation", - "parent": {"$in": last_versions_by_id.keys()} - }) - - output = {} - for repre in repres: - version_id = repre["parent"] - version = last_versions_by_id[version_id] - - subset_id = version["parent"] - subset = subset_entity_by_ids[subset_id] - - asset_id = subset["parent"] - asset = asset_entity_by_ids[asset_id] - - if asset_id not in output: - output[asset_id] = { - "asset_entity": asset, - "subsets": {} - } - - if subset_id not in output[asset_id]["subsets"]: - output[asset_id]["subsets"][subset_id] = { - "subset_entity": subset, - "version": { - "version_entity": version, - "repres": [] - } - } - - output[asset_id]["subsets"][subset_id]["version"]["repres"].append( - repre - ) - - return output - - -@with_avalon +@deprecated("openpype.pipeline.create.get_legacy_creator_by_name") def get_creator_by_name(creator_name, case_sensitive=False): """Find creator plugin by name. @@ -1603,50 +345,13 @@ def get_creator_by_name(creator_name, case_sensitive=False): Returns: Creator: Return first matching plugin or `None`. + + Deprecated: + Function will be removed after release version 3.16.* """ - from openpype.pipeline import LegacyCreator + from openpype.pipeline.create import get_legacy_creator_by_name - # Lower input creator name if is not case sensitive - if not case_sensitive: - creator_name = creator_name.lower() - - for creator_plugin in avalon.api.discover(LegacyCreator): - _creator_name = creator_plugin.__name__ - - # Lower creator plugin name if is not case sensitive - if not case_sensitive: - _creator_name = _creator_name.lower() - - if _creator_name == creator_name: - return creator_plugin - return None - - -@with_avalon -def change_timer_to_current_context(): - """Called after context change to change timers. - - TODO: - - use TimersManager's static method instead of reimplementing it here - """ - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - if not webserver_url: - log.warning("Couldn't find webserver url") - return - - rest_api_url = "{}/timers_manager/start_timer".format(webserver_url) - try: - import requests - except Exception: - log.warning("Couldn't start timer") - return - data = { - "project_name": avalon.io.Session["AVALON_PROJECT"], - "asset_name": avalon.io.Session["AVALON_ASSET"], - "task_name": avalon.io.Session["AVALON_TASK"] - } - - requests.post(rest_api_url, json=data) + return get_legacy_creator_by_name(creator_name, case_sensitive) def _get_task_context_data_for_anatomy( @@ -1670,7 +375,10 @@ def _get_task_context_data_for_anatomy( dict: With Anatomy context data. """ + from openpype.pipeline.template_data import get_general_template_data + if anatomy is None: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) asset_name = asset_doc["name"] @@ -1709,12 +417,14 @@ def _get_task_context_data_for_anatomy( } } - system_general_data = get_system_general_anatomy_data() + system_general_data = get_general_template_data() data.update(system_general_data) return data +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_context") def get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy=None ): @@ -1736,9 +446,13 @@ def get_custom_workfile_template_by_context( Returns: str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) + + Deprecated: + Function will be removed after release version 3.16.* """ if anatomy is None: + from openpype.pipeline import Anatomy anatomy = Anatomy(project_doc["name"]) # get project, asset, task anatomy context data @@ -1767,6 +481,9 @@ def get_custom_workfile_template_by_context( return None +@deprecated( + "openpype.pipeline.workfile.get_custom_workfile_template_by_string_context" +) def get_custom_workfile_template_by_string_context( template_profiles, project_name, asset_name, task_name, dbcon=None, anatomy=None @@ -1790,47 +507,35 @@ def get_custom_workfile_template_by_string_context( Returns: str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) + + Deprecated: + Function will be removed after release version 3.16.* """ - if dbcon is None: - from avalon.api import AvalonMongoDB + project_name = None + if anatomy is not None: + project_name = anatomy.project_name - dbcon = AvalonMongoDB() + if not project_name and dbcon is not None: + project_name = dbcon.active_project() - dbcon.install() + if not project_name: + raise ValueError("Can't determina project") - if dbcon.Session["AVALON_PROJECT"] != project_name: - dbcon.Session["AVALON_PROJECT"] = project_name - - project_doc = dbcon.find_one( - {"type": "project"}, - # All we need is "name" and "data.code" keys - { - "name": 1, - "data.code": 1 - } - ) - asset_doc = dbcon.find_one( - { - "type": "asset", - "name": asset_name - }, - # All we need is "name" and "data.tasks" keys - { - "name": 1, - "data.tasks": 1 - } - ) + project_doc = get_project(project_name, fields=["name", "data.code"]) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["name", "data.tasks"]) return get_custom_workfile_template_by_context( template_profiles, project_doc, asset_doc, task_name, anatomy ) +@deprecated("openpype.pipeline.context_tools.get_custom_workfile_template") def get_custom_workfile_template(template_profiles): """Filter and fill workfile template profiles by current context. - Current context is defined by `avalon.api.Session`. That's why this + Current context is defined by `legacy_io.Session`. That's why this function should be used only inside host where context is set and stable. Args: @@ -1839,19 +544,23 @@ def get_custom_workfile_template(template_profiles): Returns: str: Path to template or None if none of profiles match current context. (Existence of formatted path is not validated.) + + Deprecated: + Function will be removed after release version 3.16.* """ - # Use `avalon.io` as Mongo connection - from avalon import io + + from openpype.pipeline import legacy_io return get_custom_workfile_template_by_string_context( template_profiles, - io.Session["AVALON_PROJECT"], - io.Session["AVALON_ASSET"], - io.Session["AVALON_TASK"], - io + legacy_io.Session["AVALON_PROJECT"], + legacy_io.Session["AVALON_ASSET"], + legacy_io.Session["AVALON_TASK"], + legacy_io ) +@deprecated("openpype.pipeline.workfile.get_last_workfile_with_version") def get_last_workfile_with_version( workdir, file_template, fill_data, extensions ): @@ -1866,79 +575,19 @@ def get_last_workfile_with_version( Returns: tuple: Last workfile with version if there is any otherwise returns (None, None). + + Deprecated: + Function will be removed after release version 3.16.* """ - if not os.path.exists(workdir): - return None, None - # Fast match on extension - filenames = [ - filename - for filename in os.listdir(workdir) - if os.path.splitext(filename)[1] in extensions - ] + from openpype.pipeline.workfile import get_last_workfile_with_version - # Build template without optionals, version to digits only regex - # and comment to any definable value. - _ext = [] - for ext in extensions: - if not ext.startswith("."): - ext = "." + ext - # Escape dot for regex - ext = "\\" + ext - _ext.append(ext) - ext_expression = "(?:" + "|".join(_ext) + ")" - - # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end - file_template = re.sub(r"\.?{ext}", ext_expression, file_template) - # Replace optional keys with optional content regex - file_template = re.sub(r"<.*?>", r".*?", file_template) - # Replace `{version}` with group regex - file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) - file_template = re.sub(r"{comment.*?}", r".+?", file_template) - file_template = StringTemplate.format_strict_template( - file_template, fill_data + return get_last_workfile_with_version( + workdir, file_template, fill_data, extensions ) - # Match with ignore case on Windows due to the Windows - # OS not being case-sensitive. This avoids later running - # into the error that the file did exist if it existed - # with a different upper/lower-case. - kwargs = {} - if platform.system().lower() == "windows": - kwargs["flags"] = re.IGNORECASE - - # Get highest version among existing matching files - version = None - output_filenames = [] - for filename in sorted(filenames): - match = re.match(file_template, filename, **kwargs) - if not match: - continue - - file_version = int(match.group(1)) - if version is None or file_version > version: - output_filenames[:] = [] - version = file_version - - if file_version == version: - output_filenames.append(filename) - - output_filename = None - if output_filenames: - if len(output_filenames) == 1: - output_filename = output_filenames[0] - else: - last_time = None - for _output_filename in output_filenames: - full_path = os.path.join(workdir, _output_filename) - mod_time = os.path.getmtime(full_path) - if last_time is None or last_time < mod_time: - output_filename = _output_filename - last_time = mod_time - - return output_filename, version - +@deprecated("openpype.pipeline.workfile.get_last_workfile") def get_last_workfile( workdir, file_template, fill_data, extensions, full_path=False ): @@ -1955,19 +604,51 @@ def get_last_workfile( Returns: str: Last or first workfile as filename of full path to filename. + + Deprecated: + Function will be removed after release version 3.16.* """ - filename, version = get_last_workfile_with_version( - workdir, file_template, fill_data, extensions + + from openpype.pipeline.workfile import get_last_workfile + + return get_last_workfile( + workdir, file_template, fill_data, extensions, full_path ) - if filename is None: - data = copy.deepcopy(fill_data) - data["version"] = 1 - data.pop("comment", None) - if not data.get("ext"): - data["ext"] = extensions[0] - filename = StringTemplate.format_strict_template(file_template, data) - if full_path: - return os.path.normpath(os.path.join(workdir, filename)) - return filename +@deprecated("openpype.client.get_linked_representation_id") +def get_linked_ids_for_representations( + project_name, repre_ids, dbcon=None, link_type=None, max_depth=0 +): + """Returns list of linked ids of particular type (if provided). + + Goes from representations to version, back to representations + Args: + project_name (str) + repre_ids (list) or (ObjectId) + dbcon (avalon.mongodb.AvalonMongoDB, optional): Avalon Mongo connection + with Session. + link_type (str): ['reference', '..] + max_depth (int): limit how many levels of recursion + + Returns: + (list) of ObjectId - linked representations + + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.client import get_linked_representation_id + + if not isinstance(repre_ids, list): + repre_ids = [repre_ids] + + output = [] + for repre_id in repre_ids: + output.extend(get_linked_representation_id( + project_name, + repre_id=repre_id, + link_type=link_type, + max_depth=max_depth + )) + return output diff --git a/openpype/lib/connections.py b/openpype/lib/connections.py new file mode 100644 index 0000000000..91b745a4c1 --- /dev/null +++ b/openpype/lib/connections.py @@ -0,0 +1,38 @@ +import requests +import os + + +def requests_post(*args, **kwargs): + """Wrap request post method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.post(*args, **kwargs) + + +def requests_get(*args, **kwargs): + """Wrap request get method. + + Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment + variable is found. This is useful when Deadline or Muster server are + running with self-signed certificates and their certificate is not + added to trusted certificates on client machines. + + Warning: + Disabling SSL certificate validation is defeating one line + of defense SSL is providing and it is not recommended. + + """ + if "verify" not in kwargs: + kwargs["verify"] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) + return requests.get(*args, **kwargs) diff --git a/openpype/lib/config.py b/openpype/lib/dateutils.py similarity index 88% rename from openpype/lib/config.py rename to openpype/lib/dateutils.py index 57e8efa57d..68cd1d1c5b 100644 --- a/openpype/lib/config.py +++ b/openpype/lib/dateutils.py @@ -76,7 +76,20 @@ def get_datetime_data(datetime_obj=None): } -def get_formatted_current_time(): - return datetime.datetime.now().strftime( +def get_timestamp(datetime_obj=None): + """Get standardized timestamp from datetime object. + + Args: + datetime_obj (datetime.datetime): Object of datetime. Current time + is used if not passed. + """ + + if datetime_obj is None: + datetime_obj = datetime.datetime.now() + return datetime_obj.strftime( "%Y%m%dT%H%M%SZ" ) + + +def get_formatted_current_time(): + return get_timestamp() diff --git a/openpype/lib/delivery.py b/openpype/lib/delivery.py index ffcfe9fa4d..efb542de75 100644 --- a/openpype/lib/delivery.py +++ b/openpype/lib/delivery.py @@ -1,81 +1,113 @@ """Functions useful for delivery action or loader""" import os import shutil -import glob -import clique -import collections - -from .path_templates import ( - StringTemplate, - TemplateUnsolved, -) +import functools +import warnings +class DeliveryDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", DeliveryDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=DeliveryDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +@deprecated("openpype.lib.path_tools.collect_frames") def collect_frames(files): + """Returns dict of source path and its frame, if from sequence + + Uses clique as most precise solution, used when anatomy template that + created files is not known. + + Assumption is that frames are separated by '.', negative frames are not + allowed. + + Args: + files(list) or (set with single value): list of source paths + + Returns: + (dict): {'/asset/subset_v001.0001.png': '0001', ....} + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - Returns dict of source path and its frame, if from sequence - Uses clique as most precise solution, used when anatomy template that - created files is not known. + from .path_tools import collect_frames - Assumption is that frames are separated by '.', negative frames are not - allowed. + return collect_frames(files) - Args: - files(list) or (set with single value): list of source paths - Returns: - (dict): {'/asset/subset_v001.0001.png': '0001', ....} + +@deprecated("openpype.lib.path_tools.format_file_size") +def sizeof_fmt(num, suffix=None): + """Returns formatted string with size in appropriate unit + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - patterns = [clique.PATTERNS["frames"]] - collections, remainder = clique.assemble(files, minimum_items=1, - patterns=patterns) - sources_and_frames = {} - if collections: - for collection in collections: - src_head = collection.head - src_tail = collection.tail - - for index in collection.indexes: - src_frame = collection.format("{padding}") % index - src_file_name = "{}{}{}".format(src_head, src_frame, - src_tail) - sources_and_frames[src_file_name] = src_frame - else: - sources_and_frames[remainder.pop()] = None - - return sources_and_frames - - -def sizeof_fmt(num, suffix='B'): - """Returns formatted string with size in appropriate unit""" - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) + from .path_tools import format_file_size + return format_file_size(num, suffix) +@deprecated("openpype.pipeline.load.get_representation_path_with_anatomy") def path_from_representation(representation, anatomy): - try: - template = representation["data"]["template"] + """Get representation path using representation document and anatomy. - except KeyError: - return None + Args: + representation (Dict[str, Any]): Representation document. + anatomy (Anatomy): Project anatomy. - try: - context = representation["context"] - context["root"] = anatomy.roots - path = StringTemplate.format_strict_template(template, context) - return os.path.normpath(path) + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. + """ - except TemplateUnsolved: - # Template references unavailable data - return None + from openpype.pipeline.load import get_representation_path_with_anatomy - return path + return get_representation_path_with_anatomy(representation, anatomy) +@deprecated def copy_file(src_path, dst_path): """Hardlink file if possible(to save space), copy if not""" from openpype.lib import create_hard_link # safer importing @@ -91,131 +123,96 @@ def copy_file(src_path, dst_path): shutil.copyfile(src_path, dst_path) +@deprecated("openpype.pipeline.delivery.get_format_dict") def get_format_dict(anatomy, location_path): """Returns replaced root values from user provider value. - Args: - anatomy (Anatomy) - location_path (str): user provided value - Returns: - (dict): prepared for formatting of a template + Args: + anatomy (Anatomy) + location_path (str): user provided value + + Returns: + (dict): prepared for formatting of a template + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - format_dict = {} - if location_path: - location_path = location_path.replace("\\", "/") - root_names = anatomy.root_names_from_templates( - anatomy.templates["delivery"] - ) - if root_names is None: - format_dict["root"] = location_path - else: - format_dict["root"] = {} - for name in root_names: - format_dict["root"][name] = location_path - return format_dict + + from openpype.pipeline.delivery import get_format_dict + + return get_format_dict(anatomy, location_path) +@deprecated("openpype.pipeline.delivery.check_destination_path") def check_destination_path(repre_id, anatomy, anatomy_data, datetime_data, template_name): """ Try to create destination path based on 'template_name'. - In the case that path cannot be filled, template contains unmatched - keys, provide error message to filter out repre later. + In the case that path cannot be filled, template contains unmatched + keys, provide error message to filter out repre later. - Args: - anatomy (Anatomy) - anatomy_data (dict): context to fill anatomy - datetime_data (dict): values with actual date - template_name (str): to pick correct delivery template - Returns: - (collections.defauldict): {"TYPE_OF_ERROR":"ERROR_DETAIL"} + Args: + anatomy (Anatomy) + anatomy_data (dict): context to fill anatomy + datetime_data (dict): values with actual date + template_name (str): to pick correct delivery template + + Returns: + (collections.defauldict): {"TYPE_OF_ERROR":"ERROR_DETAIL"} + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - anatomy_data.update(datetime_data) - anatomy_filled = anatomy.format_all(anatomy_data) - dest_path = anatomy_filled["delivery"][template_name] - report_items = collections.defaultdict(list) - if not dest_path.solved: - msg = ( - "Missing keys in Representation's context" - " for anatomy template \"{}\"." - ).format(template_name) + from openpype.pipeline.delivery import check_destination_path - sub_msg = ( - "Representation: {}
" - ).format(repre_id) - - if dest_path.missing_keys: - keys = ", ".join(dest_path.missing_keys) - sub_msg += ( - "- Missing keys: \"{}\"
" - ).format(keys) - - if dest_path.invalid_types: - items = [] - for key, value in dest_path.invalid_types.items(): - items.append("\"{}\" {}".format(key, str(value))) - - keys = ", ".join(items) - sub_msg += ( - "- Invalid value DataType: \"{}\"
" - ).format(keys) - - report_items[msg].append(sub_msg) - - return report_items + return check_destination_path( + repre_id, + anatomy, + anatomy_data, + datetime_data, + template_name + ) +@deprecated("openpype.pipeline.delivery.deliver_single_file") def process_single_file( src_path, repre, anatomy, template_name, anatomy_data, format_dict, report_items, log ): """Copy single file to calculated path based on template - Args: - src_path(str): path of source representation file - _repre (dict): full repre, used only in process_sequence, here only - as to share same signature - anatomy (Anatomy) - template_name (string): user selected delivery template name - anatomy_data (dict): data from repre to fill anatomy with - format_dict (dict): root dictionary with names and values - report_items (collections.defaultdict): to return error messages - log (Logger): for log printing - Returns: - (collections.defaultdict , int) + Args: + src_path(str): path of source representation file + _repre (dict): full repre, used only in process_sequence, here only + as to share same signature + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (Logger): for log printing + + Returns: + (collections.defaultdict , int) + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - # Make sure path is valid for all platforms - src_path = os.path.normpath(src_path.replace("\\", "/")) - if not os.path.exists(src_path): - msg = "{} doesn't exist for {}".format(src_path, repre["_id"]) - report_items["Source file was not found"].append(msg) - return report_items, 0 + from openpype.pipeline.delivery import deliver_single_file - anatomy_filled = anatomy.format(anatomy_data) - if format_dict: - template_result = anatomy_filled["delivery"][template_name] - delivery_path = template_result.rootless.format(**format_dict) - else: - delivery_path = anatomy_filled["delivery"][template_name] - - # Backwards compatibility when extension contained `.` - delivery_path = delivery_path.replace("..", ".") - # Make sure path is valid for all platforms - delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) - - delivery_folder = os.path.dirname(delivery_path) - if not os.path.exists(delivery_folder): - os.makedirs(delivery_folder) - - log.debug("Copying single: {} -> {}".format(src_path, delivery_path)) - copy_file(src_path, delivery_path) - - return report_items, 1 + return deliver_single_file( + src_path, repre, anatomy, template_name, anatomy_data, format_dict, + report_items, log + ) +@deprecated("openpype.pipeline.delivery.deliver_sequence") def process_sequence( src_path, repre, anatomy, template_name, anatomy_data, format_dict, report_items, log @@ -223,128 +220,33 @@ def process_sequence( """ For Pype2(mainly - works in 3 too) where representation might not contain files. - Uses listing physical files (not 'files' on repre as a)might not be - present, b)might not be reliable for representation and copying them. + Uses listing physical files (not 'files' on repre as a)might not be + present, b)might not be reliable for representation and copying them. - TODO Should be refactored when files are sufficient to drive all - representations. + TODO Should be refactored when files are sufficient to drive all + representations. - Args: - src_path(str): path of source representation file - repre (dict): full representation - anatomy (Anatomy) - template_name (string): user selected delivery template name - anatomy_data (dict): data from repre to fill anatomy with - format_dict (dict): root dictionary with names and values - report_items (collections.defaultdict): to return error messages - log (Logger): for log printing - Returns: - (collections.defaultdict , int) + Args: + src_path(str): path of source representation file + repre (dict): full representation + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (Logger): for log printing + + Returns: + (collections.defaultdict , int) + + Deprecated: + Function was moved to different location and will be removed + after 3.16.* release. """ - src_path = os.path.normpath(src_path.replace("\\", "/")) - def hash_path_exist(myPath): - res = myPath.replace('#', '*') - glob_search_results = glob.glob(res) - if len(glob_search_results) > 0: - return True - return False + from openpype.pipeline.delivery import deliver_sequence - if not hash_path_exist(src_path): - msg = "{} doesn't exist for {}".format(src_path, - repre["_id"]) - report_items["Source file was not found"].append(msg) - return report_items, 0 - - delivery_templates = anatomy.templates.get("delivery") or {} - delivery_template = delivery_templates.get(template_name) - if delivery_template is None: - msg = ( - "Delivery template \"{}\" in anatomy of project \"{}\"" - " was not found" - ).format(template_name, anatomy.project_name) - report_items[""].append(msg) - return report_items, 0 - - # Check if 'frame' key is available in template which is required - # for sequence delivery - if "{frame" not in delivery_template: - msg = ( - "Delivery template \"{}\" in anatomy of project \"{}\"" - "does not contain '{{frame}}' key to fill. Delivery of sequence" - " can't be processed." - ).format(template_name, anatomy.project_name) - report_items[""].append(msg) - return report_items, 0 - - dir_path, file_name = os.path.split(str(src_path)) - - context = repre["context"] - ext = context.get("ext", context.get("representation")) - - if not ext: - msg = "Source extension not found, cannot find collection" - report_items[msg].append(src_path) - log.warning("{} <{}>".format(msg, context)) - return report_items, 0 - - ext = "." + ext - # context.representation could be .psd - ext = ext.replace("..", ".") - - src_collections, remainder = clique.assemble(os.listdir(dir_path)) - src_collection = None - for col in src_collections: - if col.tail != ext: - continue - - src_collection = col - break - - if src_collection is None: - msg = "Source collection of files was not found" - report_items[msg].append(src_path) - log.warning("{} <{}>".format(msg, src_path)) - return report_items, 0 - - frame_indicator = "@####@" - - anatomy_data["frame"] = frame_indicator - anatomy_filled = anatomy.format(anatomy_data) - - if format_dict: - template_result = anatomy_filled["delivery"][template_name] - delivery_path = template_result.rootless.format(**format_dict) - else: - delivery_path = anatomy_filled["delivery"][template_name] - - delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) - delivery_folder = os.path.dirname(delivery_path) - dst_head, dst_tail = delivery_path.split(frame_indicator) - dst_padding = src_collection.padding - dst_collection = clique.Collection( - head=dst_head, - tail=dst_tail, - padding=dst_padding + return deliver_sequence( + src_path, repre, anatomy, template_name, anatomy_data, format_dict, + report_items, log ) - - if not os.path.exists(delivery_folder): - os.makedirs(delivery_folder) - - src_head = src_collection.head - src_tail = src_collection.tail - uploaded = 0 - for index in src_collection.indexes: - src_padding = src_collection.format("{padding}") % index - src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) - src = os.path.normpath( - os.path.join(dir_path, src_file_name) - ) - - dst_padding = dst_collection.format("{padding}") % index - dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) - log.debug("Copying single: {} -> {}".format(src, dst)) - copy_file(src, dst) - uploaded += 1 - - return report_items, uploaded diff --git a/openpype/lib/env_tools.py b/openpype/lib/env_tools.py index 6521d20f1e..25bcbf7c1b 100644 --- a/openpype/lib/env_tools.py +++ b/openpype/lib/env_tools.py @@ -69,57 +69,3 @@ def get_paths_from_environ(env_key=None, env_value=None, return_first=False): return None # Return all existing paths from environment variable return existing_paths - - -def get_global_environments(env=None): - """Load global environments from Pype. - - Return prepared and parsed global environments by pype's settings. Use - combination of "global" environments set in pype's settings and enabled - modules. - - Args: - env (dict, optional): Initial environments. Empty dictionary is used - when not entered. - - Returns; - dict of str: Loaded and processed environments. - - """ - import acre - from openpype.modules import ModulesManager - from openpype.settings import get_environments - - if env is None: - env = {} - - # Get global environments from settings - all_settings_env = get_environments() - parsed_global_env = acre.parse(all_settings_env["global"]) - - # Merge with entered environments - merged_env = acre.append(env, parsed_global_env) - - # Get environments from Pype modules - modules_manager = ModulesManager() - - module_envs = modules_manager.collect_global_environments() - publish_plugin_dirs = modules_manager.collect_plugin_paths()["publish"] - - # Set pyblish plugins paths if any module want to register them - if publish_plugin_dirs: - publish_paths_str = os.environ.get("PYBLISHPLUGINPATH") or "" - publish_paths = publish_paths_str.split(os.pathsep) - _publish_paths = { - os.path.normpath(path) for path in publish_paths if path - } - for path in publish_plugin_dirs: - _publish_paths.add(os.path.normpath(path)) - module_envs["PYBLISHPLUGINPATH"] = os.pathsep.join(_publish_paths) - - # Merge environments with current environments and update values - if module_envs: - parsed_envs = acre.parse(module_envs) - merged_env = acre.merge(parsed_envs, merged_env) - - return acre.compute(merged_env, cleanup=True) diff --git a/openpype/lib/events.py b/openpype/lib/events.py index 7bec6ee30d..096201312f 100644 --- a/openpype/lib/events.py +++ b/openpype/lib/events.py @@ -1,6 +1,7 @@ """Events holding data about specific event.""" import os import re +import copy import inspect import logging import weakref @@ -11,6 +12,10 @@ except Exception: from openpype.lib.python_2_comp import WeakMethod +class MissingEventSystem(Exception): + pass + + class EventCallback(object): """Callback registered to a topic. @@ -69,22 +74,52 @@ class EventCallback(object): "Registered callback is not callable. \"{}\"" ).format(str(func))) - # Collect additional data about function - # - name - # - path - # - if expect argument or not + # Collect function name and path to file for logging func_name = func.__name__ func_path = os.path.abspath(inspect.getfile(func)) + + # Get expected arguments from function spec + # - positional arguments are always preferred + expect_args = False + expect_kwargs = False + fake_event = "fake" if hasattr(inspect, "signature"): + # Python 3 using 'Signature' object where we try to bind arg + # or kwarg. Using signature is recommended approach based on + # documentation. sig = inspect.signature(func) - expect_args = len(sig.parameters) > 0 + try: + sig.bind(fake_event) + expect_args = True + except TypeError: + pass + + try: + sig.bind(event=fake_event) + expect_kwargs = True + except TypeError: + pass + else: - expect_args = len(inspect.getargspec(func)[0]) > 0 + # In Python 2 'signature' is not available so 'getcallargs' is used + # - 'getcallargs' is marked as deprecated since Python 3.0 + try: + inspect.getcallargs(func, fake_event) + expect_args = True + except TypeError: + pass + + try: + inspect.getcallargs(func, event=fake_event) + expect_kwargs = True + except TypeError: + pass self._func_ref = func_ref self._func_name = func_name self._func_path = func_path self._expect_args = expect_args + self._expect_kwargs = expect_kwargs self._ref_valid = func_ref is not None self._enabled = True @@ -152,6 +187,10 @@ class EventCallback(object): try: if self._expect_args: callback(event) + + elif self._expect_kwargs: + callback(event=event) + else: callback() @@ -176,16 +215,20 @@ class Event(object): topic (str): Identifier of event. data (Any): Data specific for event. Dictionary is recommended. source (str): Identifier of source. + event_system (EventSystem): Event system in which can be event + triggered. """ + _data = {} - def __init__(self, topic, data=None, source=None): + def __init__(self, topic, data=None, source=None, event_system=None): self._id = str(uuid4()) self._topic = topic if data is None: data = {} self._data = data self._source = source + self._event_system = event_system def __getitem__(self, key): return self._data[key] @@ -199,6 +242,12 @@ class Event(object): @property def source(self): + """Event's source used for triggering callbacks. + + Returns: + Union[str, None]: Source string or None. Source is optional. + """ + return self._source @property @@ -207,32 +256,164 @@ class Event(object): @property def topic(self): + """Event's topic used for triggering callbacks. + + Returns: + str: Topic string. + """ + return self._topic def emit(self): """Emit event and trigger callbacks.""" - StoredCallbacks.emit_event(self) + if self._event_system is None: + raise MissingEventSystem( + "Can't emit event {}. Does not have set event system.".format( + str(repr(self)) + ) + ) + self._event_system.emit_event(self) + def to_data(self): + """Convert Event object to data. -class StoredCallbacks: - _registered_callbacks = [] + Returns: + Dict[str, Any]: Event data. + """ + + return { + "id": self.id, + "topic": self.topic, + "source": self.source, + "data": copy.deepcopy(self.data) + } @classmethod - def add_callback(cls, topic, callback): + def from_data(cls, event_data, event_system=None): + """Create event from data. + + Args: + event_data (Dict[str, Any]): Event data with defined keys. Can be + created using 'to_data' method. + event_system (EventSystem): System to which the event belongs. + + Returns: + Event: Event with attributes from passed data. + """ + + obj = cls( + event_data["topic"], + event_data["data"], + event_data["source"], + event_system + ) + obj._id = event_data["id"] + return obj + + +class EventSystem(object): + """Encapsulate event handling into an object. + + System wraps registered callbacks and triggered events into single object + so it is possible to create mutltiple independent systems that have their + topics and callbacks. + + + """ + + def __init__(self): + self._registered_callbacks = [] + + def add_callback(self, topic, callback): + """Register callback in event system. + + Args: + topic (str): Topic for EventCallback. + callback (Callable): Function or method that will be called + when topic is triggered. + + Returns: + EventCallback: Created callback object which can be used to + stop listening. + """ + callback = EventCallback(topic, callback) - cls._registered_callbacks.append(callback) + self._registered_callbacks.append(callback) return callback - @classmethod - def emit_event(cls, event): + def create_event(self, topic, data, source): + """Create new event which is bound to event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Object of event. + """ + + return Event(topic, data, source, self) + + def emit(self, topic, data, source): + """Create event based on passed data and emit it. + + This is easiest way how to trigger event in an event system. + + Args: + topic (str): Event topic. + data (dict): Data related to event. + source (str): Source of event. + + Returns: + Event: Created and emitted event. + """ + + event = self.create_event(topic, data, source) + event.emit() + return event + + def emit_event(self, event): + """Emit event object. + + Args: + event (Event): Prepared event with topic and data. + """ + invalid_callbacks = [] - for callback in cls._registered_callbacks: + for callback in self._registered_callbacks: callback.process_event(event) if not callback.is_ref_valid: invalid_callbacks.append(callback) for callback in invalid_callbacks: - cls._registered_callbacks.remove(callback) + self._registered_callbacks.remove(callback) + + +class GlobalEventSystem: + """Event system living in global scope of process. + + This is primarily used in host implementation to trigger events + related to DCC changes or changes of context in the host implementation. + """ + + _global_event_system = None + + @classmethod + def get_global_event_system(cls): + if cls._global_event_system is None: + cls._global_event_system = EventSystem() + return cls._global_event_system + + @classmethod + def add_callback(cls, topic, callback): + event_system = cls.get_global_event_system() + return event_system.add_callback(topic, callback) + + @classmethod + def emit(cls, topic, data, source): + event_system = cls.get_global_event_system() + return event_system.emit(topic, data, source) def register_event_callback(topic, callback): @@ -249,7 +430,8 @@ def register_event_callback(topic, callback): enable/disable listening to a topic or remove the callback from the topic completely. """ - return StoredCallbacks.add_callback(topic, callback) + + return GlobalEventSystem.add_callback(topic, callback) def emit_event(topic, data=None, source=None): @@ -263,6 +445,5 @@ def emit_event(topic, data=None, source=None): Returns: Event: Object of event that was emitted. """ - event = Event(topic, data, source) - event.emit() - return event + + return GlobalEventSystem.emit(topic, data, source) diff --git a/openpype/lib/execute.py b/openpype/lib/execute.py index c3e35772f3..39532b7aa5 100644 --- a/openpype/lib/execute.py +++ b/openpype/lib/execute.py @@ -5,7 +5,7 @@ import platform import json import tempfile -from .log import PypeLogger as Logger +from .log import Logger from .vendor_bin_utils import find_executable # MSDN process creation flag (Windows only) @@ -40,7 +40,7 @@ def execute(args, log_levels = ['DEBUG:', 'INFO:', 'ERROR:', 'WARNING:', 'CRITICAL:'] - log = Logger().get_logger('execute') + log = Logger.get_logger('execute') log.info("Executing ({})".format(" ".join(args))) popen = subprocess.Popen( args, @@ -117,12 +117,12 @@ def run_subprocess(*args, **kwargs): full_output = "" _stdout, _stderr = proc.communicate() if _stdout: - _stdout = _stdout.decode("utf-8") + _stdout = _stdout.decode("utf-8", errors="backslashreplace") full_output += _stdout logger.debug(_stdout) if _stderr: - _stderr = _stderr.decode("utf-8") + _stderr = _stderr.decode("utf-8", errors="backslashreplace") # Add additional line break if output already contains stdout if full_output: full_output += "\n" diff --git a/openpype/lib/file_transaction.py b/openpype/lib/file_transaction.py new file mode 100644 index 0000000000..fe70b37cb1 --- /dev/null +++ b/openpype/lib/file_transaction.py @@ -0,0 +1,194 @@ +import os +import logging +import sys +import errno +import six + +from openpype.lib import create_hard_link + +# this is needed until speedcopy for linux is fixed +if sys.platform == "win32": + from speedcopy import copyfile +else: + from shutil import copyfile + + +class FileTransaction(object): + """File transaction with rollback options. + + The file transaction is a three-step process. + + 1) Rename any existing files to a "temporary backup" during `process()` + 2) Copy the files to final destination during `process()` + 3) Remove any backed up files (*no rollback possible!) during `finalize()` + + Step 3 is done during `finalize()`. If not called the .bak files will + remain on disk. + + These steps try to ensure that we don't overwrite half of any existing + files e.g. if they are currently in use. + + Note: + A regular filesystem is *not* a transactional file system and even + though this implementation tries to produce a 'safe copy' with a + potential rollback do keep in mind that it's inherently unsafe due + to how filesystem works and a myriad of things could happen during + the transaction that break the logic. A file storage could go down, + permissions could be changed, other machines could be moving or writing + files. A lot can happen. + + Warning: + Any folders created during the transfer will not be removed. + """ + + MODE_COPY = 0 + MODE_HARDLINK = 1 + + def __init__(self, log=None): + if log is None: + log = logging.getLogger("FileTransaction") + + self.log = log + + # The transfer queue + # todo: make this an actual FIFO queue? + self._transfers = {} + + # Destination file paths that a file was transferred to + self._transferred = [] + + # Backup file location mapping to original locations + self._backup_to_original = {} + + def add(self, src, dst, mode=MODE_COPY): + """Add a new file to transfer queue. + + Args: + src (str): Source path. + dst (str): Destination path. + mode (MODE_COPY, MODE_HARDLINK): Transfer mode. + """ + + opts = {"mode": mode} + + src = os.path.normpath(os.path.abspath(src)) + dst = os.path.normpath(os.path.abspath(dst)) + + if dst in self._transfers: + queued_src = self._transfers[dst][0] + if src == queued_src: + self.log.debug( + "File transfer was already in queue: {} -> {}".format( + src, dst)) + return + else: + self.log.warning("File transfer in queue replaced..") + self.log.debug( + "Removed from queue: {} -> {} replaced by {} -> {}".format( + queued_src, dst, src, dst)) + + self._transfers[dst] = (src, opts) + + def process(self): + # Backup any existing files + for dst, (src, _) in self._transfers.items(): + self.log.debug("Checking file ... {} -> {}".format(src, dst)) + path_same = self._same_paths(src, dst) + if path_same or not os.path.exists(dst): + continue + + # Backup original file + # todo: add timestamp or uuid to ensure unique + backup = dst + ".bak" + self._backup_to_original[backup] = dst + self.log.debug( + "Backup existing file: {} -> {}".format(dst, backup)) + os.rename(dst, backup) + + # Copy the files to transfer + for dst, (src, opts) in self._transfers.items(): + path_same = self._same_paths(src, dst) + if path_same: + self.log.debug( + "Source and destionation are same files {} -> {}".format( + src, dst)) + continue + + self._create_folder_for_file(dst) + + if opts["mode"] == self.MODE_COPY: + self.log.debug("Copying file ... {} -> {}".format(src, dst)) + copyfile(src, dst) + elif opts["mode"] == self.MODE_HARDLINK: + self.log.debug("Hardlinking file ... {} -> {}".format( + src, dst)) + create_hard_link(src, dst) + + self._transferred.append(dst) + + def finalize(self): + # Delete any backed up files + for backup in self._backup_to_original.keys(): + try: + os.remove(backup) + except OSError: + self.log.error( + "Failed to remove backup file: {}".format(backup), + exc_info=True) + + def rollback(self): + errors = 0 + # Rollback any transferred files + for path in self._transferred: + try: + os.remove(path) + except OSError: + errors += 1 + self.log.error( + "Failed to rollback created file: {}".format(path), + exc_info=True) + + # Rollback the backups + for backup, original in self._backup_to_original.items(): + try: + os.rename(backup, original) + except OSError: + errors += 1 + self.log.error( + "Failed to restore original file: {} -> {}".format( + backup, original), + exc_info=True) + + if errors: + self.log.error( + "{} errors occurred during rollback.".format(errors), + exc_info=True) + six.reraise(*sys.exc_info()) + + @property + def transferred(self): + """Return the processed transfers destination paths""" + return list(self._transferred) + + @property + def backups(self): + """Return the backup file paths""" + return list(self._backup_to_original.keys()) + + def _create_folder_for_file(self, path): + dirname = os.path.dirname(path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + self.log.critical("An unexpected error occurred.") + six.reraise(*sys.exc_info()) + + def _same_paths(self, src, dst): + # handles same paths but with C:/project vs c:/project + if os.path.exists(src) and os.path.exists(dst): + return os.stat(src) == os.stat(dst) + + return src == dst diff --git a/openpype/lib/git_progress.py b/openpype/lib/git_progress.py deleted file mode 100644 index 331b7b6745..0000000000 --- a/openpype/lib/git_progress.py +++ /dev/null @@ -1,86 +0,0 @@ -import git -from tqdm import tqdm - - -class _GitProgress(git.remote.RemoteProgress): - """ Class handling displaying progress during git operations. - - This is using **tqdm** for showing progress bars. As **GitPython** - is parsing progress directly from git command, it is somehow unreliable - as in some operations it is difficult to get total count of iterations - to display meaningful progress bar. - - """ - _t = None - _code = 0 - _current_status = '' - _current_max = '' - - _description = { - 256: "Checking out files", - 4: "Counting objects", - 128: "Finding sources", - 32: "Receiving objects", - 64: "Resolving deltas", - 16: "Writing objects" - } - - def __init__(self): - super().__init__() - - def __del__(self): - if self._t is not None: - self._t.close() - - def _detroy_tqdm(self): - """ Used to close tqdm when operation ended. - - """ - if self._t is not None: - self._t.close() - self._t = None - - def _check_mask(self, opcode: int) -> bool: - """" Add meaningful description to **GitPython** opcodes. - - :param opcode: OP_MASK opcode - :type opcode: int - :return: String description of opcode - :rtype: str - - .. seealso:: For opcodes look at :class:`git.RemoteProgress` - - """ - if opcode & self.COUNTING: - return self._description.get(self.COUNTING) - elif opcode & self.CHECKING_OUT: - return self._description.get(self.CHECKING_OUT) - elif opcode & self.WRITING: - return self._description.get(self.WRITING) - elif opcode & self.RECEIVING: - return self._description.get(self.RECEIVING) - elif opcode & self.RESOLVING: - return self._description.get(self.RESOLVING) - elif opcode & self.FINDING_SOURCES: - return self._description.get(self.FINDING_SOURCES) - else: - return "Processing" - - def update(self, op_code, cur_count, max_count=None, message=''): - """ Called when git operation update progress. - - .. seealso:: For more details see - :func:`git.objects.submodule.base.Submodule.update` - `Documentation `_ - - """ - code = self._check_mask(op_code) - if self._current_status != code or self._current_max != max_count: - self._current_max = max_count - self._current_status = code - self._detroy_tqdm() - self._t = tqdm(total=max_count) - self._t.set_description(" . {}".format(code)) - - self._t.update(cur_count) diff --git a/openpype/lib/import_utils.py b/openpype/lib/import_utils.py deleted file mode 100644 index e88c07fca6..0000000000 --- a/openpype/lib/import_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import sys -import importlib -from .log import PypeLogger as Logger - -log = Logger().get_logger(__name__) - - -def discover_host_vendor_module(module_name): - host = os.environ["AVALON_APP"] - pype_root = os.environ["OPENPYPE_REPOS_ROOT"] - main_module = module_name.split(".")[0] - module_path = os.path.join( - pype_root, "hosts", host, "vendor", main_module) - - log.debug( - "Importing module from host vendor path: `{}`".format(module_path)) - - if not os.path.exists(module_path): - log.warning( - "Path not existing: `{}`".format(module_path)) - return None - - sys.path.insert(1, module_path) - return importlib.import_module(module_name) diff --git a/openpype/lib/local_settings.py b/openpype/lib/local_settings.py index 97e99b4b5a..c6c9699240 100644 --- a/openpype/lib/local_settings.py +++ b/openpype/lib/local_settings.py @@ -34,7 +34,7 @@ from openpype.settings import ( get_system_settings ) -from .import validate_mongo_connection +from openpype.client.mongo import validate_mongo_connection _PLACEHOLDER = object() diff --git a/openpype/lib/log.py b/openpype/lib/log.py index f33385e0ba..26dcd86eec 100644 --- a/openpype/lib/log.py +++ b/openpype/lib/log.py @@ -24,12 +24,13 @@ import traceback import threading import copy -from . import Terminal -from .mongo import ( +from openpype.client.mongo import ( MongoEnvNotSet, get_default_components, - OpenPypeMongoConnection + OpenPypeMongoConnection, ) +from . import Terminal + try: import log4mongo from log4mongo.handlers import MongoHandler @@ -41,13 +42,13 @@ except ImportError: USE_UNICODE = hasattr(__builtins__, "unicode") -class PypeStreamHandler(logging.StreamHandler): +class LogStreamHandler(logging.StreamHandler): """ StreamHandler class designed to handle utf errors in python 2.x hosts. """ def __init__(self, stream=None): - super(PypeStreamHandler, self).__init__(stream) + super(LogStreamHandler, self).__init__(stream) self.enabled = True def enable(self): @@ -56,7 +57,6 @@ class PypeStreamHandler(logging.StreamHandler): Used to silence output """ self.enabled = True - pass def disable(self): """ Disable StreamHandler @@ -107,13 +107,13 @@ class PypeStreamHandler(logging.StreamHandler): self.handleError(record) -class PypeFormatter(logging.Formatter): +class LogFormatter(logging.Formatter): DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ]' default_formatter = logging.Formatter(DFT) def __init__(self, formats): - super(PypeFormatter, self).__init__() + super(LogFormatter, self).__init__() self.formatters = {} for loglevel in formats: self.formatters[loglevel] = logging.Formatter(formats[loglevel]) @@ -141,7 +141,7 @@ class PypeFormatter(logging.Formatter): return out -class PypeMongoFormatter(logging.Formatter): +class MongoFormatter(logging.Formatter): DEFAULT_PROPERTIES = logging.LogRecord( '', '', '', '', '', '', '', '').__dict__.keys() @@ -161,7 +161,7 @@ class PypeMongoFormatter(logging.Formatter): 'method': record.funcName, 'lineNumber': record.lineno } - document.update(PypeLogger.get_process_data()) + document.update(Logger.get_process_data()) # Standard document decorated with exception info if record.exc_info is not None: @@ -181,7 +181,7 @@ class PypeMongoFormatter(logging.Formatter): return document -class PypeLogger: +class Logger: DFT = '%(levelname)s >>> { %(name)s }: [ %(message)s ] ' DBG = " - { %(name)s }: [ %(message)s ] " INF = ">>> [ %(message)s ] " @@ -216,8 +216,8 @@ class PypeLogger: # Collection name under database in Mongo log_collection_name = "logs" - # OPENPYPE_DEBUG - pype_debug = 0 + # Logging level - OPENPYPE_LOG_LEVEL + log_level = None # Data same for all record documents process_data = None @@ -231,10 +231,7 @@ class PypeLogger: logger = logging.getLogger(name or "__main__") - if cls.pype_debug > 0: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) + logger.setLevel(cls.log_level) add_mongo_handler = cls.use_mongo_logging add_console_handler = True @@ -242,7 +239,7 @@ class PypeLogger: for handler in logger.handlers: if isinstance(handler, MongoHandler): add_mongo_handler = False - elif isinstance(handler, PypeStreamHandler): + elif isinstance(handler, LogStreamHandler): add_console_handler = False if add_console_handler: @@ -295,7 +292,7 @@ class PypeLogger: "username": components["username"], "password": components["password"], "capped": True, - "formatter": PypeMongoFormatter() + "formatter": MongoFormatter() } if components["port"] is not None: kwargs["port"] = int(components["port"]) @@ -306,10 +303,10 @@ class PypeLogger: @classmethod def _get_console_handler(cls): - formatter = PypeFormatter(cls.FORMAT_FILE) - console_handler = PypeStreamHandler() + formatter = LogFormatter(cls.FORMAT_FILE) + console_handler = LogStreamHandler() - console_handler.set_name("PypeStreamHandler") + console_handler.set_name("LogStreamHandler") console_handler.setFormatter(formatter) return console_handler @@ -333,6 +330,9 @@ class PypeLogger: # Define if should logging to mongo be used use_mongo_logging = bool(log4mongo is not None) + if use_mongo_logging: + use_mongo_logging = os.environ.get("OPENPYPE_LOG_TO_SERVER") == "1" + # Set mongo id for process (ONLY ONCE) if use_mongo_logging and cls.mongo_process_id is None: try: @@ -357,8 +357,16 @@ class PypeLogger: # Store result to class definition cls.use_mongo_logging = use_mongo_logging - # Define if is in OPENPYPE_DEBUG mode - cls.pype_debug = int(os.getenv("OPENPYPE_DEBUG") or "0") + # Define what is logging level + log_level = os.getenv("OPENPYPE_LOG_LEVEL") + if not log_level: + # Check OPENPYPE_DEBUG for backwards compatibility + op_debug = os.getenv("OPENPYPE_DEBUG") + if op_debug and int(op_debug) > 0: + log_level = 10 + else: + log_level = 20 + cls.log_level = int(log_level) if not os.environ.get("OPENPYPE_MONGO"): cls.use_mongo_logging = False @@ -409,9 +417,9 @@ class PypeLogger: def get_process_name(cls): """Process name that is like "label" of a process. - Pype's logging can be used from pype itseld of from hosts. Even in Pype - it's good to know if logs are from Pype tray or from pype's event - server. This should help to identify that information. + OpenPype's logging can be used from OpenPyppe itself of from hosts. + Even in OpenPype process it's good to know if logs are from tray or + from other cli commands. This should help to identify that information. """ if cls._process_name is not None: return cls._process_name @@ -477,23 +485,19 @@ class PypeLogger: return OpenPypeMongoConnection.get_mongo_client() -def timeit(method): - """Print time in function. - - For debugging. +class PypeLogger(Logger): + """Duplicate of 'Logger'. + Deprecated: + Class will be removed after release version 3.16.* """ - log = logging.getLogger() - def timed(*args, **kw): - ts = time.time() - result = method(*args, **kw) - te = time.time() - if 'log_time' in kw: - name = kw.get('log_name', method.__name__.upper()) - kw['log_time'][name] = int((te - ts) * 1000) - else: - log.debug('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000)) - return result - return timed + @classmethod + def get_logger(cls, *args, **kwargs): + logger = Logger.get_logger(*args, **kwargs) + # TODO uncomment when replaced most of places + logger.warning(( + "'openpype.lib.PypeLogger' is deprecated class." + " Please use 'openpype.lib.Logger' instead." + )) + return logger diff --git a/openpype/lib/mongo.py b/openpype/lib/mongo.py index c08e76c75c..bb2ee6016a 100644 --- a/openpype/lib/mongo.py +++ b/openpype/lib/mongo.py @@ -1,206 +1,61 @@ -import os -import sys -import time -import logging -import pymongo -import certifi - -if sys.version_info[0] == 2: - from urlparse import urlparse, parse_qs -else: - from urllib.parse import urlparse, parse_qs +import warnings +import functools +from openpype.client.mongo import ( + MongoEnvNotSet, + OpenPypeMongoConnection, +) -class MongoEnvNotSet(Exception): +class MongoDeprecatedWarning(DeprecationWarning): pass -def _decompose_url(url): - """Decompose mongo url to basic components. +def mongo_deprecated(func): + """Mark functions as deprecated. - Used for creation of MongoHandler which expect mongo url components as - separated kwargs. Components are at the end not used as we're setting - connection directly this is just a dumb components for MongoHandler - validation pass. + It will result in a warning being emitted when the function is used. """ - # Use first url from passed url - # - this is because it is possible to pass multiple urls for multiple - # replica sets which would crash on urlparse otherwise - # - please don't use comma in username of password - url = url.split(",")[0] - components = { - "scheme": None, - "host": None, - "port": None, - "username": None, - "password": None, - "auth_db": None - } - result = urlparse(url) - if result.scheme is None: - _url = "mongodb://{}".format(url) - result = urlparse(_url) - - components["scheme"] = result.scheme - components["host"] = result.hostname - try: - components["port"] = result.port - except ValueError: - raise RuntimeError("invalid port specified") - components["username"] = result.username - components["password"] = result.password - - try: - components["auth_db"] = parse_qs(result.query)['authSource'][0] - except KeyError: - # no auth db provided, mongo will use the one we are connecting to - pass - - return components - - -def get_default_components(): - mongo_url = os.environ.get("OPENPYPE_MONGO") - if mongo_url is None: - raise MongoEnvNotSet( - "URL for Mongo logging connection is not set." + @functools.wraps(func) + def new_func(*args, **kwargs): + warnings.simplefilter("always", MongoDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'." + " Function was moved to 'openpype.client.mongo'." + ).format(func.__name__), + category=MongoDeprecatedWarning, + stacklevel=2 ) - return _decompose_url(mongo_url) + return func(*args, **kwargs) + return new_func +@mongo_deprecated +def get_default_components(): + from openpype.client.mongo import get_default_components + + return get_default_components() + + +@mongo_deprecated def should_add_certificate_path_to_mongo_url(mongo_url): - """Check if should add ca certificate to mongo url. + from openpype.client.mongo import should_add_certificate_path_to_mongo_url - Since 30.9.2021 cloud mongo requires newer certificates that are not - available on most of workstation. This adds path to certifi certificate - which is valid for it. To add the certificate path url must have scheme - 'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query. - """ - parsed = urlparse(mongo_url) - query = parse_qs(parsed.query) - lowered_query_keys = set(key.lower() for key in query.keys()) - add_certificate = False - # Check if url 'ssl' or 'tls' are set to 'true' - for key in ("ssl", "tls"): - if key in query and "true" in query["ssl"]: - add_certificate = True - break - - # Check if url contains 'mongodb+srv' - if not add_certificate and parsed.scheme == "mongodb+srv": - add_certificate = True - - # Check if url does already contain certificate path - if add_certificate and "tlscafile" in lowered_query_keys: - add_certificate = False - - return add_certificate + return should_add_certificate_path_to_mongo_url(mongo_url) +@mongo_deprecated def validate_mongo_connection(mongo_uri): - """Check if provided mongodb URL is valid. + from openpype.client.mongo import validate_mongo_connection - Args: - mongo_uri (str): URL to validate. - - Raises: - ValueError: When port in mongo uri is not valid. - pymongo.errors.InvalidURI: If passed mongo is invalid. - pymongo.errors.ServerSelectionTimeoutError: If connection timeout - passed so probably couldn't connect to mongo server. - - """ - client = OpenPypeMongoConnection.create_connection( - mongo_uri, retry_attempts=1 - ) - client.close() + return validate_mongo_connection(mongo_uri) -class OpenPypeMongoConnection: - """Singleton MongoDB connection. - - Keeps MongoDB connections by url. - """ - mongo_clients = {} - log = logging.getLogger("OpenPypeMongoConnection") - - @staticmethod - def get_default_mongo_url(): - return os.environ["OPENPYPE_MONGO"] - - @classmethod - def get_mongo_client(cls, mongo_url=None): - if mongo_url is None: - mongo_url = cls.get_default_mongo_url() - - connection = cls.mongo_clients.get(mongo_url) - if connection: - # Naive validation of existing connection - try: - connection.server_info() - with connection.start_session(): - pass - except Exception: - connection = None - - if not connection: - cls.log.debug("Creating mongo connection to {}".format(mongo_url)) - connection = cls.create_connection(mongo_url) - cls.mongo_clients[mongo_url] = connection - - return connection - - @classmethod - def create_connection(cls, mongo_url, timeout=None, retry_attempts=None): - parsed = urlparse(mongo_url) - # Force validation of scheme - if parsed.scheme not in ["mongodb", "mongodb+srv"]: - raise pymongo.errors.InvalidURI(( - "Invalid URI scheme:" - " URI must begin with 'mongodb://' or 'mongodb+srv://'" - )) - - if timeout is None: - timeout = int(os.environ.get("AVALON_TIMEOUT") or 1000) - - kwargs = { - "serverSelectionTimeoutMS": timeout - } - if should_add_certificate_path_to_mongo_url(mongo_url): - kwargs["ssl_ca_certs"] = certifi.where() - - mongo_client = pymongo.MongoClient(mongo_url, **kwargs) - - if retry_attempts is None: - retry_attempts = 3 - - elif not retry_attempts: - retry_attempts = 1 - - last_exc = None - valid = False - t1 = time.time() - for attempt in range(1, retry_attempts + 1): - try: - mongo_client.server_info() - with mongo_client.start_session(): - pass - valid = True - break - - except Exception as exc: - last_exc = exc - if attempt < retry_attempts: - cls.log.warning( - "Attempt {} failed. Retrying... ".format(attempt) - ) - time.sleep(1) - - if not valid: - raise last_exc - - cls.log.info("Connected to {}, delay {:.3f}s".format( - mongo_url, time.time() - t1 - )) - return mongo_client +__all__ = ( + "MongoEnvNotSet", + "OpenPypeMongoConnection", + "get_default_components", + "should_add_certificate_path_to_mongo_url", + "validate_mongo_connection", +) diff --git a/openpype/lib/openpype_version.py b/openpype/lib/openpype_version.py index d547d34755..e052002468 100644 --- a/openpype/lib/openpype_version.py +++ b/openpype/lib/openpype_version.py @@ -57,15 +57,66 @@ def is_running_from_build(): return True +def is_staging_enabled(): + return os.environ.get("OPENPYPE_USE_STAGING") == "1" + + def is_running_staging(): """Currently used OpenPype is staging version. + This function is not 100% proper check of staging version. It is possible + to have enabled to use staging version but be in different one. + + The function is based on 4 factors: + - env 'OPENPYPE_IS_STAGING' is set + - current production version + - current staging version + - use staging is enabled + + First checks for 'OPENPYPE_IS_STAGING' environment which can be set to '1'. + The value should be set only when a process without access to + OpenPypeVersion is launched (e.g. in DCCs). If current version is same + as production version it is expected that it is not staging, and it + doesn't matter what would 'is_staging_enabled' return. If current version + is same as staging version it is expected we're in staging. In all other + cases 'is_staging_enabled' is used as source of outpu value. + + The function is used to decide which icon is used. To check e.g. updates + the output should be combined with other functions from this file. + Returns: - bool: True if openpype version containt 'staging'. + bool: Using staging version or not. """ - if "staging" in get_openpype_version(): + + if os.environ.get("OPENPYPE_IS_STAGING") == "1": return True - return False + + if not op_version_control_available(): + return False + + from openpype.settings import get_global_settings + + global_settings = get_global_settings() + production_version = global_settings["production_version"] + latest_version = None + if not production_version or production_version == "latest": + latest_version = get_latest_version(local=False, remote=True) + production_version = latest_version + + current_version = get_openpype_version() + if current_version == production_version: + return False + + staging_version = global_settings["staging_version"] + if not staging_version or staging_version == "latest": + if latest_version is None: + latest_version = get_latest_version(local=False, remote=True) + staging_version = latest_version + + if current_version == production_version: + return True + + return is_staging_enabled() # ---------------------------------------- @@ -131,13 +182,11 @@ def get_remote_versions(*args, **kwargs): return None -def get_latest_version(staging=None, local=None, remote=None): +def get_latest_version(local=None, remote=None): """Get latest version from repository path.""" - if staging is None: - staging = is_running_staging() + if op_version_control_available(): return get_OpenPypeVersion().get_latest_version( - staging=staging, local=local, remote=remote ) @@ -146,9 +195,9 @@ def get_latest_version(staging=None, local=None, remote=None): def get_expected_studio_version(staging=None): """Expected production or staging version in studio.""" - if staging is None: - staging = is_running_staging() if op_version_control_available(): + if staging is None: + staging = is_staging_enabled() return get_OpenPypeVersion().get_expected_studio_version(staging) return None @@ -158,7 +207,7 @@ def get_expected_version(staging=None): if expected_version is None: # Look for latest if expected version is not set in settings expected_version = get_latest_version( - staging=staging, + local=False, remote=True ) return expected_version diff --git a/openpype/lib/path_templates.py b/openpype/lib/path_templates.py index 14e5fe59f8..0f99efb430 100644 --- a/openpype/lib/path_templates.py +++ b/openpype/lib/path_templates.py @@ -6,11 +6,6 @@ import collections import six -from .log import PypeLogger - -log = PypeLogger.get_logger(__name__) - - KEY_PATTERN = re.compile(r"(\{.*?[^{0]*\})") KEY_PADDING_PATTERN = re.compile(r"([^:]+)\S+[><]\S+") SUB_DICT_PATTERN = re.compile(r"([^\[\]]+)") @@ -211,15 +206,28 @@ class StringTemplate(object): if counted_symb > -1: parts = tmp_parts.pop(counted_symb) counted_symb -= 1 + # If part contains only single string keep value + # unchanged if parts: # Remove optional start char parts.pop(0) - if counted_symb < 0: - out_parts = new_parts - else: - out_parts = tmp_parts[counted_symb] - # Store temp parts - out_parts.append(OptionalPart(parts)) + + if not parts: + value = "<>" + elif ( + len(parts) == 1 + and isinstance(parts[0], six.string_types) + ): + value = "<{}>".format(parts[0]) + else: + value = OptionalPart(parts) + + if counted_symb < 0: + out_parts = new_parts + else: + out_parts = tmp_parts[counted_symb] + # Store value + out_parts.append(value) continue if counted_symb < 0: @@ -365,6 +373,7 @@ class TemplateResult(str): when value of key in data is dictionary but template expect string of number. """ + used_values = None solved = None template = None @@ -383,6 +392,12 @@ class TemplateResult(str): new_obj.invalid_types = invalid_types return new_obj + def __copy__(self, *args, **kwargs): + return self.copy() + + def __deepcopy__(self, *args, **kwargs): + return self.copy() + def validate(self): if not self.solved: raise TemplateUnsolved( @@ -391,6 +406,30 @@ class TemplateResult(str): self.invalid_types ) + def copy(self): + cls = self.__class__ + return cls( + str(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + + def normalized(self): + """Convert to normalized path.""" + + cls = self.__class__ + return cls( + os.path.normpath(self.replace("\\", "/")), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + class TemplatesResultDict(dict): """Holds and wrap TemplateResults for easy bug report.""" @@ -762,6 +801,7 @@ class OptionalPart: parts(list): Parts of template. Can contain 'str', 'OptionalPart' or 'FormattingPart'. """ + def __init__(self, parts): self._parts = parts diff --git a/openpype/lib/path_tools.py b/openpype/lib/path_tools.py index 851bc872fb..0b6d0a3391 100644 --- a/openpype/lib/path_tools.py +++ b/openpype/lib/path_tools.py @@ -1,19 +1,81 @@ import os import re -import abc -import json import logging -import six import platform +import functools +import warnings -from openpype.settings import get_project_settings - -from .anatomy import Anatomy -from .profiles_filtering import filter_profiles +import clique log = logging.getLogger(__name__) +class PathToolsDeprecatedWarning(DeprecationWarning): + pass + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", PathToolsDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=PathToolsDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +def format_file_size(file_size, suffix=None): + """Returns formatted string with size in appropriate unit. + + Args: + file_size (int): Size of file in bytes. + suffix (str): Suffix for formatted size. Default is 'B' (as bytes). + + Returns: + str: Formatted size using proper unit and passed suffix (e.g. 7 MiB). + """ + + if suffix is None: + suffix = "B" + + for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: + if abs(file_size) < 1024.0: + return "%3.1f%s%s" % (file_size, unit, suffix) + file_size /= 1024.0 + return "%.1f%s%s" % (file_size, "Yi", suffix) + + def create_hard_link(src_path, dst_path): """Create hardlink of file. @@ -50,6 +112,43 @@ def create_hard_link(src_path, dst_path): ) +def collect_frames(files): + """Returns dict of source path and its frame, if from sequence + + Uses clique as most precise solution, used when anatomy template that + created files is not known. + + Assumption is that frames are separated by '.', negative frames are not + allowed. + + Args: + files(list) or (set with single value): list of source paths + + Returns: + (dict): {'/asset/subset_v001.0001.png': '0001', ....} + """ + + patterns = [clique.PATTERNS["frames"]] + collections, remainder = clique.assemble( + files, minimum_items=1, patterns=patterns) + + sources_and_frames = {} + if collections: + for collection in collections: + src_head = collection.head + src_tail = collection.tail + + for index in collection.indexes: + src_frame = collection.format("{padding}") % index + src_file_name = "{}{}{}".format( + src_head, src_frame, src_tail) + sources_and_frames[src_file_name] = src_frame + else: + sources_and_frames[remainder.pop()] = None + + return sources_and_frames + + def _rreplace(s, a, b, n=1): """Replace a with b in string s from right side n times.""" return b.join(s.rsplit(a, n)) @@ -119,12 +218,12 @@ def get_version_from_path(file): """Find version number in file path string. Args: - file (string): file path + file (str): file path Returns: - v: version number in string ('001') - + str: version number in string ('001') """ + pattern = re.compile(r"[\._]v([0-9]+)", re.IGNORECASE) try: return pattern.findall(file)[-1] @@ -140,16 +239,17 @@ def get_last_version_from_path(path_dir, filter): """Find last version of given directory content. Args: - path_dir (string): directory path + path_dir (str): directory path filter (list): list of strings used as file name filter Returns: - string: file name with last version + str: file name with last version Example: last_version_file = get_last_version_from_path( "/project/shots/shot01/work", ["shot01", "compositing", "nk"]) """ + assert os.path.isdir(path_dir), "`path_dir` argument needs to be directory" assert isinstance(filter, list) and ( len(filter) != 0), "`filter` argument needs to be list and not empty" @@ -171,78 +271,69 @@ def get_last_version_from_path(path_dir, filter): return None -def compute_paths(basic_paths_items, project_root): - pattern_array = re.compile(r"\[.*\]") - project_root_key = "__project_root__" - output = [] - for path_items in basic_paths_items: - clean_items = [] - for path_item in path_items: - matches = re.findall(pattern_array, path_item) - if len(matches) > 0: - path_item = path_item.replace(matches[0], "") - if path_item == project_root_key: - path_item = project_root - clean_items.append(path_item) - output.append(os.path.normpath(os.path.sep.join(clean_items))) - return output +@deprecated("openpype.pipeline.project_folders.concatenate_splitted_paths") +def concatenate_splitted_paths(split_paths, anatomy): + """ + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.project_folders import concatenate_splitted_paths + + return concatenate_splitted_paths(split_paths, anatomy) +@deprecated +def get_format_data(anatomy): + """ + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.template_data import get_project_template_data + + data = get_project_template_data(project_name=anatomy.project_name) + data["root"] = anatomy.roots + return data + + +@deprecated("openpype.pipeline.project_folders.fill_paths") +def fill_paths(path_list, anatomy): + """ + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.project_folders import fill_paths + + return fill_paths(path_list, anatomy) + + +@deprecated("openpype.pipeline.project_folders.create_project_folders") def create_project_folders(basic_paths, project_name): - anatomy = Anatomy(project_name) - roots_paths = [] - if isinstance(anatomy.roots, dict): - for root in anatomy.roots.values(): - roots_paths.append(root.value) - else: - roots_paths.append(anatomy.roots.value) + """ + Deprecated: + Function will be removed after release version 3.16.* + """ - for root_path in roots_paths: - project_root = os.path.join(root_path, project_name) - full_paths = compute_paths(basic_paths, project_root) - # Create folders - for path in full_paths: - full_path = path.format(project_root=project_root) - if os.path.exists(full_path): - log.debug( - "Folder already exists: {}".format(full_path) - ) - else: - log.debug("Creating folder: {}".format(full_path)) - os.makedirs(full_path) - - -def _list_path_items(folder_structure): - output = [] - for key, value in folder_structure.items(): - if not value: - output.append(key) - else: - paths = _list_path_items(value) - for path in paths: - if not isinstance(path, (list, tuple)): - path = [path] - - item = [key] - item.extend(path) - output.append(item) - - return output + from openpype.pipeline.project_folders import create_project_folders + + return create_project_folders(project_name, basic_paths) +@deprecated("openpype.pipeline.project_folders.get_project_basic_paths") def get_project_basic_paths(project_name): - project_settings = get_project_settings(project_name) - folder_structure = ( - project_settings["global"]["project_folder_structure"] - ) - if not folder_structure: - return [] + """ + Deprecated: + Function will be removed after release version 3.16.* + """ - if isinstance(folder_structure, str): - folder_structure = json.loads(folder_structure) - return _list_path_items(folder_structure) + from openpype.pipeline.project_folders import get_project_basic_paths + + return get_project_basic_paths(project_name) +@deprecated("openpype.pipeline.workfile.create_workdir_extra_folders") def create_workdir_extra_folders( workdir, host_name, task_type, task_name, project_name, project_settings=None @@ -259,192 +350,18 @@ def create_workdir_extra_folders( project_name (str): Name of project on which task is. project_settings (dict): Prepared project settings. Are loaded if not passed. - """ - # Load project settings if not set - if not project_settings: - project_settings = get_project_settings(project_name) - # Load extra folders profiles - extra_folders_profiles = ( - project_settings["global"]["tools"]["Workfiles"]["extra_folders"] + Deprecated: + Function will be removed after release version 3.16.* + """ + + from openpype.pipeline.project_folders import create_workdir_extra_folders + + return create_workdir_extra_folders( + workdir, + host_name, + task_type, + task_name, + project_name, + project_settings ) - # Skip if are empty - if not extra_folders_profiles: - return - - # Prepare profiles filters - filter_data = { - "task_types": task_type, - "task_names": task_name, - "hosts": host_name - } - profile = filter_profiles(extra_folders_profiles, filter_data) - if profile is None: - return - - for subfolder in profile["folders"]: - # Make sure backslashes are converted to forwards slashes - # and does not start with slash - subfolder = subfolder.replace("\\", "/").lstrip("/") - # Skip empty strings - if not subfolder: - continue - - fullpath = os.path.join(workdir, subfolder) - if not os.path.exists(fullpath): - os.makedirs(fullpath) - - -@six.add_metaclass(abc.ABCMeta) -class HostDirmap: - """ - Abstract class for running dirmap on a workfile in a host. - - Dirmap is used to translate paths inside of host workfile from one - OS to another. (Eg. arstist created workfile on Win, different artists - opens same file on Linux.) - - Expects methods to be implemented inside of host: - on_dirmap_enabled: run host code for enabling dirmap - do_dirmap: run host code to do actual remapping - """ - def __init__(self, host_name, project_settings, sync_module=None): - self.host_name = host_name - self.project_settings = project_settings - self.sync_module = sync_module # to limit reinit of Modules - - self._mapping = None # cache mapping - - @abc.abstractmethod - def on_enable_dirmap(self): - """ - Run host dependent operation for enabling dirmap if necessary. - """ - - @abc.abstractmethod - def dirmap_routine(self, source_path, destination_path): - """ - Run host dependent remapping from source_path to destination_path - """ - - def process_dirmap(self): - # type: (dict) -> None - """Go through all paths in Settings and set them using `dirmap`. - - If artists has Site Sync enabled, take dirmap mapping directly from - Local Settings when artist is syncing workfile locally. - - Args: - project_settings (dict): Settings for current project. - - """ - if not self._mapping: - self._mapping = self.get_mappings(self.project_settings) - if not self._mapping: - return - - log.info("Processing directory mapping ...") - self.on_enable_dirmap() - log.info("mapping:: {}".format(self._mapping)) - - for k, sp in enumerate(self._mapping["source-path"]): - try: - print("{} -> {}".format(sp, - self._mapping["destination-path"][k])) - self.dirmap_routine(sp, - self._mapping["destination-path"][k]) - except IndexError: - # missing corresponding destination path - log.error(("invalid dirmap mapping, missing corresponding" - " destination directory.")) - break - except RuntimeError: - log.error("invalid path {} -> {}, mapping not registered".format( # noqa: E501 - sp, self._mapping["destination-path"][k] - )) - continue - - def get_mappings(self, project_settings): - """Get translation from source-path to destination-path. - - It checks if Site Sync is enabled and user chose to use local - site, in that case configuration in Local Settings takes precedence - """ - local_mapping = self._get_local_sync_dirmap(project_settings) - dirmap_label = "{}-dirmap".format(self.host_name) - if not self.project_settings[self.host_name].get(dirmap_label) and \ - not local_mapping: - return [] - mapping = local_mapping or \ - self.project_settings[self.host_name][dirmap_label]["paths"] or {} - enbled = self.project_settings[self.host_name][dirmap_label]["enabled"] - mapping_enabled = enbled or bool(local_mapping) - - if not mapping or not mapping_enabled or \ - not mapping.get("destination-path") or \ - not mapping.get("source-path"): - return [] - return mapping - - def _get_local_sync_dirmap(self, project_settings): - """ - Returns dirmap if synch to local project is enabled. - - Only valid mapping is from roots of remote site to local site set - in Local Settings. - - Args: - project_settings (dict) - Returns: - dict : { "source-path": [XXX], "destination-path": [YYYY]} - """ - import json - mapping = {} - - if not project_settings["global"]["sync_server"]["enabled"]: - return mapping - - from openpype.settings.lib import get_site_local_overrides - - if not self.sync_module: - from openpype.modules import ModulesManager - manager = ModulesManager() - self.sync_module = manager.modules_by_name["sync_server"] - - project_name = os.getenv("AVALON_PROJECT") - - active_site = self.sync_module.get_local_normalized_site( - self.sync_module.get_active_site(project_name)) - remote_site = self.sync_module.get_local_normalized_site( - self.sync_module.get_remote_site(project_name)) - log.debug("active {} - remote {}".format(active_site, remote_site)) - - if active_site == "local" \ - and project_name in self.sync_module.get_enabled_projects()\ - and active_site != remote_site: - - sync_settings = self.sync_module.get_sync_project_setting( - os.getenv("AVALON_PROJECT"), exclude_locals=False, - cached=False) - - active_overrides = get_site_local_overrides( - os.getenv("AVALON_PROJECT"), active_site) - remote_overrides = get_site_local_overrides( - os.getenv("AVALON_PROJECT"), remote_site) - - log.debug("local overrides".format(active_overrides)) - log.debug("remote overrides".format(remote_overrides)) - for root_name, active_site_dir in active_overrides.items(): - remote_site_dir = remote_overrides.get(root_name) or\ - sync_settings["sites"][remote_site]["root"][root_name] - if os.path.isdir(active_site_dir): - if not mapping.get("destination-path"): - mapping["destination-path"] = [] - mapping["destination-path"].append(active_site_dir) - - if not mapping.get("source-path"): - mapping["source-path"] = [] - mapping["source-path"].append(remote_site_dir) - - log.debug("local sync mapping:: {}".format(mapping)) - return mapping diff --git a/openpype/lib/plugin_tools.py b/openpype/lib/plugin_tools.py index f11ba56865..10fd3940b8 100644 --- a/openpype/lib/plugin_tools.py +++ b/openpype/lib/plugin_tools.py @@ -1,29 +1,70 @@ # -*- coding: utf-8 -*- """Avalon/Pyblish plugin tools.""" import os -import inspect import logging import re -import json -from .profiles_filtering import filter_profiles - -from openpype.settings import get_project_settings +import warnings +import functools +from openpype.client import get_asset_by_id log = logging.getLogger(__name__) -# Subset name template used when plugin does not have defined any -DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" + +class PluginToolsDeprecatedWarning(DeprecationWarning): + pass -class TaskNotSetError(KeyError): - def __init__(self, msg=None): - if not msg: - msg = "Creator's subset name template requires task name." - super(TaskNotSetError, self).__init__(msg) +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + warnings.simplefilter("always", PluginToolsDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(decorated_func.__name__, warning_message), + category=PluginToolsDeprecatedWarning, + stacklevel=4 + ) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) +@deprecated("openpype.pipeline.create.TaskNotSetError") +def TaskNotSetError(*args, **kwargs): + from openpype.pipeline.create import TaskNotSetError + + return TaskNotSetError(*args, **kwargs) + + +@deprecated("openpype.pipeline.create.get_subset_name") def get_subset_name_with_asset_doc( family, variant, @@ -59,64 +100,23 @@ def get_subset_name_with_asset_doc( is not passed. dynamic_data (dict): Dynamic data specific for a creator which creates instance. - dbcon (AvalonMongoDB): Mongo connection to be able query asset document - if 'asset_doc' is not passed. """ - if not family: - return "" - if not host_name: - host_name = os.environ["AVALON_APP"] + from openpype.pipeline.create import get_subset_name - # Use only last part of class family value split by dot (`.`) - family = family.rsplit(".", 1)[-1] - - if project_name is None: - import avalon.api - - project_name = avalon.api.Session["AVALON_PROJECT"] - - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_info = asset_tasks.get(task_name) or {} - task_type = task_info.get("type") - - # Get settings - tools_settings = get_project_settings(project_name)["global"]["tools"] - profiles = tools_settings["creator"]["subset_name_profiles"] - filtering_criteria = { - "families": family, - "hosts": host_name, - "tasks": task_name, - "task_types": task_type - } - - matching_profile = filter_profiles(profiles, filtering_criteria) - template = None - if matching_profile: - template = matching_profile["template"] - - # Make sure template is set (matching may have empty string) - if not template: - template = default_template or DEFAULT_SUBSET_TEMPLATE - - # Simple check of task name existence for template with {task} in - # - missing task should be possible only in Standalone publisher - if not task_name and "{task" in template.lower(): - raise TaskNotSetError() - - fill_pairs = { - "variant": variant, - "family": family, - "task": task_name - } - if dynamic_data: - # Dynamic data may override default values - for key, value in dynamic_data.items(): - fill_pairs[key] = value - - return template.format(**prepare_template_data(fill_pairs)) + return get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name, + host_name, + default_template, + dynamic_data + ) +@deprecated def get_subset_name( family, variant, @@ -135,20 +135,15 @@ def get_subset_name( This is legacy function should be replaced with `get_subset_name_with_asset_doc` where asset document is expected. """ - if dbcon is None: - from avalon.api import AvalonMongoDB - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name + from openpype.pipeline.create import get_subset_name - dbcon.install() + if project_name is None: + project_name = dbcon.project_name - asset_doc = dbcon.find_one( - {"_id": asset_id}, - {"data.tasks": True} - ) or {} + asset_doc = get_asset_by_id(project_name, asset_id, fields=["data.tasks"]) - return get_subset_name_with_asset_doc( + return get_subset_name( family, variant, task_name, @@ -204,156 +199,6 @@ def prepare_template_data(fill_pairs): return fill_data -def filter_pyblish_plugins(plugins): - """Filter pyblish plugins by presets. - - This servers as plugin filter / modifier for pyblish. It will load plugin - definitions from presets and filter those needed to be excluded. - - Args: - plugins (dict): Dictionary of plugins produced by :mod:`pyblish-base` - `discover()` method. - - """ - from pyblish import api - - host = api.current_host() - - presets = get_project_settings(os.environ['AVALON_PROJECT']) or {} - # skip if there are no presets to process - if not presets: - return - - # iterate over plugins - for plugin in plugins[:]: - - try: - config_data = presets[host]["publish"][plugin.__name__] - except KeyError: - # host determined from path - file = os.path.normpath(inspect.getsourcefile(plugin)) - file = os.path.normpath(file) - - split_path = file.split(os.path.sep) - if len(split_path) < 4: - log.warning( - 'plugin path too short to extract host {}'.format(file) - ) - continue - - host_from_file = split_path[-4] - plugin_kind = split_path[-2] - - # TODO: change after all plugins are moved one level up - if host_from_file == "openpype": - host_from_file = "global" - - try: - config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501 - except KeyError: - continue - - for option, value in config_data.items(): - if option == "enabled" and value is False: - log.info('removing plugin {}'.format(plugin.__name__)) - plugins.remove(plugin) - else: - log.info('setting {}:{} on plugin {}'.format( - option, value, plugin.__name__)) - - setattr(plugin, option, value) - - -def set_plugin_attributes_from_settings( - plugins, superclass, host_name=None, project_name=None -): - """Change attribute values on Avalon plugins by project settings. - - This function should be used only in host context. Modify - behavior of plugins. - - Args: - plugins (list): Plugins discovered by origin avalon discover method. - superclass (object): Superclass of plugin type (e.g. Cretor, Loader). - host_name (str): Name of host for which plugins are loaded and from. - Value from environment `AVALON_APP` is used if not entered. - project_name (str): Name of project for which settings will be loaded. - Value from environment `AVALON_PROJECT` is used if not entered. - """ - from openpype.pipeline import LegacyCreator, LoaderPlugin - - # determine host application to use for finding presets - if host_name is None: - host_name = os.environ.get("AVALON_APP") - - if project_name is None: - project_name = os.environ.get("AVALON_PROJECT") - - # map plugin superclass to preset json. Currently supported is load and - # create (LoaderPlugin and LegacyCreator) - plugin_type = None - if superclass is LoaderPlugin or issubclass(superclass, LoaderPlugin): - plugin_type = "load" - elif superclass is LegacyCreator or issubclass(superclass, LegacyCreator): - plugin_type = "create" - - if not host_name or not project_name or plugin_type is None: - msg = "Skipped attributes override from settings." - if not host_name: - msg += " Host name is not defined." - - if not project_name: - msg += " Project name is not defined." - - if plugin_type is None: - msg += " Plugin type is unsupported for class {}.".format( - superclass.__name__ - ) - - print(msg) - return - - print(">>> Finding presets for {}:{} ...".format(host_name, plugin_type)) - - project_settings = get_project_settings(project_name) - plugin_type_settings = ( - project_settings - .get(host_name, {}) - .get(plugin_type, {}) - ) - global_type_settings = ( - project_settings - .get("global", {}) - .get(plugin_type, {}) - ) - if not global_type_settings and not plugin_type_settings: - return - - for plugin in plugins: - plugin_name = plugin.__name__ - - plugin_settings = None - # Look for plugin settings in host specific settings - if plugin_name in plugin_type_settings: - plugin_settings = plugin_type_settings[plugin_name] - - # Look for plugin settings in global settings - elif plugin_name in global_type_settings: - plugin_settings = global_type_settings[plugin_name] - - if not plugin_settings: - continue - - print(">>> We have preset for {}".format(plugin_name)) - for option, value in plugin_settings.items(): - if option == "enabled" and value is False: - setattr(plugin, "active", False) - print(" - is disabled by preset") - else: - setattr(plugin, option, value) - print(" - setting `{}`: `{}`".format(option, value)) - - def source_hash(filepath, *args): """Generate simple identifier for a source file. This is used to identify whether a source file has previously been @@ -373,102 +218,3 @@ def source_hash(filepath, *args): time = str(os.path.getmtime(filepath)) size = str(os.path.getsize(filepath)) return "|".join([file_name, time, size] + list(args)).replace(".", ",") - - -def get_unique_layer_name(layers, name): - """ - Gets all layer names and if 'name' is present in them, increases - suffix by 1 (eg. creates unique layer name - for Loader) - Args: - layers (list): of strings, names only - name (string): checked value - - Returns: - (string): name_00X (without version) - """ - names = {} - for layer in layers: - layer_name = re.sub(r'_\d{3}$', '', layer) - if layer_name in names.keys(): - names[layer_name] = names[layer_name] + 1 - else: - names[layer_name] = 1 - occurrences = names.get(name, 0) - - return "{}_{:0>3d}".format(name, occurrences + 1) - - -def get_background_layers(file_url): - """ - Pulls file name from background json file, enrich with folder url for - AE to be able import files. - - Order is important, follows order in json. - - Args: - file_url (str): abs url of background json - - Returns: - (list): of abs paths to images - """ - with open(file_url) as json_file: - data = json.load(json_file) - - layers = list() - bg_folder = os.path.dirname(file_url) - for child in data['children']: - if child.get("filename"): - layers.append(os.path.join(bg_folder, child.get("filename")). - replace("\\", "/")) - else: - for layer in child['children']: - if layer.get("filename"): - layers.append(os.path.join(bg_folder, - layer.get("filename")). - replace("\\", "/")) - return layers - - -def parse_json(path): - """Parses json file at 'path' location - - Returns: - (dict) or None if unparsable - Raises: - AsssertionError if 'path' doesn't exist - """ - path = path.strip('\"') - assert os.path.isfile(path), ( - "Path to json file doesn't exist. \"{}\"".format(path) - ) - data = None - with open(path, "r") as json_file: - try: - data = json.load(json_file) - except Exception as exc: - log.error( - "Error loading json: " - "{} - Exception: {}".format(path, exc) - ) - return data - - -def get_batch_asset_task_info(ctx): - """Parses context data from webpublisher's batch metadata - - Returns: - (tuple): asset, task_name (Optional), task_type - """ - task_type = "default_task_type" - task_name = None - asset = None - - if ctx["type"] == "task": - items = ctx["path"].split('/') - asset = items[-2] - task_name = ctx["name"] - task_type = ctx["attributes"]["type"] - else: - asset = ctx["name"] - - return asset, task_name, task_type diff --git a/openpype/lib/profiles_filtering.py b/openpype/lib/profiles_filtering.py index 0bb901aff8..370703a68b 100644 --- a/openpype/lib/profiles_filtering.py +++ b/openpype/lib/profiles_filtering.py @@ -44,12 +44,6 @@ def _profile_exclusion(matching_profiles, logger): Returns: dict: Most matching profile. """ - - logger.info( - "Search for first most matching profile in match order:" - " Host name -> Task name -> Family." - ) - if not matching_profiles: return None @@ -168,6 +162,15 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): _keys_order.append(key) keys_order = tuple(_keys_order) + log_parts = " | ".join([ + "{}: \"{}\"".format(*item) + for item in key_values.items() + ]) + + logger.info( + "Looking for matching profile for: {}".format(log_parts) + ) + matching_profiles = None highest_profile_points = -1 # Each profile get 1 point for each matching filter. Profile with most @@ -205,11 +208,6 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): if profile_points == highest_profile_points: matching_profiles.append((profile, profile_scores)) - log_parts = " | ".join([ - "{}: \"{}\"".format(*item) - for item in key_values.items() - ]) - if not matching_profiles: logger.info( "None of profiles match your setup. {}".format(log_parts) @@ -221,4 +219,9 @@ def filter_profiles(profiles_data, key_values, keys_order=None, logger=None): "More than one profile match your setup. {}".format(log_parts) ) - return _profile_exclusion(matching_profiles, logger) + profile = _profile_exclusion(matching_profiles, logger) + if profile: + logger.info( + "Profile selected: {}".format(profile) + ) + return profile diff --git a/openpype/lib/project_backpack.py b/openpype/lib/project_backpack.py index 11fd0c0c3e..ff2f1d4b88 100644 --- a/openpype/lib/project_backpack.py +++ b/openpype/lib/project_backpack.py @@ -24,8 +24,11 @@ from bson.json_util import ( dumps, CANONICAL_JSON_OPTIONS ) - -from avalon.api import AvalonMongoDB +from openpype.client import ( + get_project, + get_whole_project, +) +from openpype.pipeline import AvalonMongoDB DOCUMENTS_FILE_NAME = "database" METADATA_FILE_NAME = "metadata" @@ -50,14 +53,12 @@ def pack_project(project_name, destination_dir=None): Args: project_name(str): Project that should be packaged. - destination_dir(str): Optinal path where zip will be stored. Project's + destination_dir(str): Optional path where zip will be stored. Project's root is used if not passed. """ print("Creating package of project \"{}\"".format(project_name)) # Validate existence of project - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) if not project_doc: raise ValueError("Project \"{}\" was not found in database".format( project_name @@ -118,7 +119,7 @@ def pack_project(project_name, destination_dir=None): temp_docs_json = s.name # Query all project documents and store them to temp json - docs = list(dbcon.find({})) + docs = list(get_whole_project(project_name)) data = dumps( docs, json_options=CANONICAL_JSON_OPTIONS ) @@ -147,7 +148,7 @@ def pack_project(project_name, destination_dir=None): # Cleanup os.remove(temp_docs_json) os.remove(temp_metadata_json) - dbcon.uninstall() + print("*** Packing finished ***") @@ -207,7 +208,7 @@ def unpack_project(path_to_zip, new_root=None): print("Using different root path {}".format(new_root)) root_path = new_root - project_doc = collection.find_one({"type": "project"}) + project_doc = get_project(project_name) roots = project_doc["config"]["roots"] key = tuple(roots.keys())[0] update_key = "config.roots.{}.{}".format(key, low_platform) diff --git a/openpype/lib/python_module_tools.py b/openpype/lib/python_module_tools.py index f62c848e4a..6fad3b547f 100644 --- a/openpype/lib/python_module_tools.py +++ b/openpype/lib/python_module_tools.py @@ -5,8 +5,9 @@ import importlib import inspect import logging +import six + log = logging.getLogger(__name__) -PY3 = sys.version_info[0] == 3 def import_filepath(filepath, module_name=None): @@ -28,7 +29,7 @@ def import_filepath(filepath, module_name=None): # Prepare module object where content of file will be parsed module = types.ModuleType(module_name) - if PY3: + if six.PY3: # Use loader so module has full specs module_loader = importlib.machinery.SourceFileLoader( module_name, filepath @@ -38,7 +39,7 @@ def import_filepath(filepath, module_name=None): # Execute module code and store content to module with open(filepath) as _stream: # Execute content and store it to module object - exec(_stream.read(), module.__dict__) + six.exec_(_stream.read(), module.__dict__) module.__file__ = filepath return module @@ -129,20 +130,12 @@ def classes_from_module(superclass, module): for name in dir(module): # It could be anything at this point obj = getattr(module, name) - if not inspect.isclass(obj): + if not inspect.isclass(obj) or obj is superclass: continue - # These are subclassed from nothing, not even `object` - if not len(obj.__bases__) > 0: - continue + if issubclass(obj, superclass): + classes.append(obj) - # Use string comparison rather than `issubclass` - # in order to support reloading of this module. - bases = recursive_bases_from_class(obj) - if not any(base.__name__ == superclass.__name__ for base in bases): - continue - - classes.append(obj) return classes @@ -228,7 +221,7 @@ def import_module_from_dirpath(dirpath, folder_name, dst_module_name=None): dst_module_name(str): Parent module name under which can be loaded module added. """ - if PY3: + if six.PY3: module = _import_module_from_dirpath_py3( dirpath, folder_name, dst_module_name ) diff --git a/openpype/lib/terminal.py b/openpype/lib/terminal.py index 5121b6ec26..f6072ed209 100644 --- a/openpype/lib/terminal.py +++ b/openpype/lib/terminal.py @@ -98,7 +98,7 @@ class Terminal: r"\*\*\* WRN": _SB + _LY + r"*** WRN" + _RST, r" \- ": _SB + _LY + r" - " + _RST, r"\[ ": _SB + _LG + r"[ " + _RST, - r"\]": _SB + _LG + r"]" + _RST, + r" \]": _SB + _LG + r" ]" + _RST, r"{": _LG + r"{", r"}": r"}" + _RST, r"\(": _LY + r"(", diff --git a/openpype/lib/transcoding.py b/openpype/lib/transcoding.py index 6bab6a8160..57279d0380 100644 --- a/openpype/lib/transcoding.py +++ b/openpype/lib/transcoding.py @@ -17,6 +17,9 @@ from .vendor_bin_utils import ( # Max length of string that is supported by ffmpeg MAX_FFMPEG_STRING_LEN = 8196 +# Not allowed symbols in attributes for ffmpeg +NOT_ALLOWED_FFMPEG_CHARS = ("\"", ) + # OIIO known xml tags STRING_TAGS = { "format" @@ -39,6 +42,28 @@ XML_CHAR_REF_REGEX_HEX = re.compile(r"&#x?[0-9a-fA-F]+;") # Regex to parse array attributes ARRAY_TYPE_REGEX = re.compile(r"^(int|float|string)\[\d+\]$") +IMAGE_EXTENSIONS = { + ".ani", ".anim", ".apng", ".art", ".bmp", ".bpg", ".bsave", ".cal", + ".cin", ".cpc", ".cpt", ".dds", ".dpx", ".ecw", ".exr", ".fits", + ".flic", ".flif", ".fpx", ".gif", ".hdri", ".hevc", ".icer", + ".icns", ".ico", ".cur", ".ics", ".ilbm", ".jbig", ".jbig2", + ".jng", ".jpeg", ".jpeg-ls", ".jpeg", ".2000", ".jpg", ".xr", + ".jpeg", ".xt", ".jpeg-hdr", ".kra", ".mng", ".miff", ".nrrd", + ".ora", ".pam", ".pbm", ".pgm", ".ppm", ".pnm", ".pcx", ".pgf", + ".pictor", ".png", ".psb", ".psp", ".qtvr", ".ras", + ".rgbe", ".logluv", ".tiff", ".sgi", ".tga", ".tiff", ".tiff/ep", + ".tiff/it", ".ufo", ".ufp", ".wbmp", ".webp", ".xbm", ".xcf", + ".xpm", ".xwd" +} + +VIDEO_EXTENSIONS = { + ".3g2", ".3gp", ".amv", ".asf", ".avi", ".drc", ".f4a", ".f4b", + ".f4p", ".f4v", ".flv", ".gif", ".gifv", ".m2v", ".m4p", ".m4v", + ".mkv", ".mng", ".mov", ".mp2", ".mp4", ".mpe", ".mpeg", ".mpg", + ".mpv", ".mxf", ".nsv", ".ogg", ".ogv", ".qt", ".rm", ".rmvb", + ".roq", ".svi", ".vob", ".webm", ".wmv", ".yuv" +} + def get_transcode_temp_directory(): """Creates temporary folder for transcoding. @@ -52,26 +77,38 @@ def get_transcode_temp_directory(): ) -def get_oiio_info_for_input(filepath, logger=None): +def get_oiio_info_for_input(filepath, logger=None, subimages=False): """Call oiiotool to get information about input and return stdout. Stdout should contain xml format string. """ args = [ - get_oiio_tools_path(), "--info", "-v", "-i:infoformat=xml", filepath + get_oiio_tools_path(), + "--info", + "-v" ] + if subimages: + args.append("-a") + + args.extend(["-i:infoformat=xml", filepath]) + output = run_subprocess(args, logger=logger) output = output.replace("\r\n", "\n") xml_started = False + subimages_lines = [] lines = [] for line in output.split("\n"): if not xml_started: if not line.startswith("<"): continue xml_started = True + if xml_started: lines.append(line) + if line == "": + subimages_lines.append(lines) + lines = [] if not xml_started: raise ValueError( @@ -80,12 +117,19 @@ def get_oiio_info_for_input(filepath, logger=None): ) ) - xml_text = "\n".join(lines) - return parse_oiio_xml_output(xml_text, logger=logger) + output = [] + for subimage_lines in subimages_lines: + xml_text = "\n".join(subimage_lines) + output.append(parse_oiio_xml_output(xml_text, logger=logger)) + + if subimages: + return output + return output[0] class RationalToInt: """Rational value stored as division of 2 integers using string.""" + def __init__(self, string_value): parts = string_value.split("/") top = float(parts[0]) @@ -132,16 +176,16 @@ def convert_value_by_type_name(value_type, value, logger=None): if value_type == "int": return int(value) - if value_type == "float": + if value_type in ("float", "double"): return float(value) # Vectors will probably have more types - if value_type == "vec2f": + if value_type in ("vec2f", "float2", "float2d"): return [float(item) for item in value.split(",")] # Matrix should be always have square size of element 3x3, 4x4 # - are returned as list of lists - if value_type == "matrix": + if value_type in ("matrix", "matrixd"): output = [] current_index = -1 parts = value.split(",") @@ -151,7 +195,7 @@ def convert_value_by_type_name(value_type, value, logger=None): elif parts_len == 4: divisor = 2 elif parts_len == 9: - divisor == 3 + divisor = 3 elif parts_len == 16: divisor = 4 else: @@ -173,7 +217,7 @@ def convert_value_by_type_name(value_type, value, logger=None): if value_type == "rational2i": return RationalToInt(value) - if value_type == "vector": + if value_type in ("vector", "vectord"): parts = [part.strip() for part in value.split(",")] output = [] for part in parts: @@ -201,8 +245,8 @@ def convert_value_by_type_name(value_type, value, logger=None): ) return output - logger.info(( - "MISSING IMPLEMENTATION:" + logger.debug(( + "Dev note (missing implementation):" " Unknown attrib type \"{}\". Value: {}" ).format(value_type, value)) return value @@ -260,8 +304,8 @@ def parse_oiio_xml_output(xml_string, logger=None): # - feel free to add more tags else: value = child.text - logger.info(( - "MISSING IMPLEMENTATION:" + logger.debug(( + "Dev note (missing implementation):" " Unknown tag \"{}\". Value \"{}\"" ).format(tag_name, value)) @@ -355,6 +399,10 @@ def should_convert_for_ffmpeg(src_filepath): if not input_info: return None + subimages = input_info.get("subimages") + if subimages is not None and subimages > 1: + return True + # Check compression compression = input_info["attribs"].get("compression") if compression in ("dwaa", "dwab"): @@ -367,14 +415,23 @@ def should_convert_for_ffmpeg(src_filepath): return None for attr_value in input_info["attribs"].values(): - if ( - isinstance(attr_value, str) - and len(attr_value) > MAX_FFMPEG_STRING_LEN - ): + if not isinstance(attr_value, str): + continue + + if len(attr_value) > MAX_FFMPEG_STRING_LEN: return True + + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + return True return False +# Deprecated since 2022 4 20 +# - Reason - Doesn't convert sequences right way: Can't handle gaps, reuse +# first frame for all frames and changes filenames when input +# is sequence. +# - use 'convert_input_paths_for_ffmpeg' instead def convert_for_ffmpeg( first_input_path, output_dir, @@ -402,6 +459,12 @@ def convert_for_ffmpeg( if logger is None: logger = logging.getLogger(__name__) + logger.warning(( + "DEPRECATED: 'openpype.lib.transcoding.convert_for_ffmpeg' is" + " deprecated function of conversion for FFMpeg. Please replace usage" + " with 'openpype.lib.transcoding.convert_input_paths_for_ffmpeg'" + )) + ext = os.path.splitext(first_input_path)[1].lower() if ext != ".exr": raise ValueError(( @@ -413,7 +476,7 @@ def convert_for_ffmpeg( if input_frame_start is not None and input_frame_end is not None: is_sequence = int(input_frame_end) != int(input_frame_start) - input_info = get_oiio_info_for_input(first_input_path) + input_info = get_oiio_info_for_input(first_input_path, logger=logger) # Change compression only if source compression is "dwaa" or "dwab" # - they're not supported in ffmpeg @@ -422,7 +485,12 @@ def convert_for_ffmpeg( compression = "none" # Prepare subprocess arguments - oiio_cmd = [get_oiio_tools_path()] + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] # Add input compression if available if compression: oiio_cmd.extend(["--compression", compression]) @@ -443,13 +511,21 @@ def convert_for_ffmpeg( input_channels.append(alpha) input_channels_str = ",".join(input_channels) - oiio_cmd.extend([ + subimages = input_info.get("subimages") + input_arg = "-i" + if subimages is None or subimages == 1: # Tell oiiotool which channels should be loaded # - other channels are not loaded to memory so helps to avoid memory # leak issues - "-i:ch={}".format(input_channels_str), first_input_path, + # - this option is crashing if used on multipart/subimages exrs + input_arg += ":ch={}".format(input_channels_str) + + oiio_cmd.extend([ + input_arg, first_input_path, # Tell oiiotool which channels should be put to top stack (and output) - "--ch", channels_arg + "--ch", channels_arg, + # Use first subimage + "--subimage", "0" ]) # Add frame definitions to arguments @@ -458,28 +534,45 @@ def convert_for_ffmpeg( "--frames", "{}-{}".format(input_frame_start, input_frame_end) ]) - ignore_attr_changes_added = False for attr_name, attr_value in input_info["attribs"].items(): if not isinstance(attr_value, str): continue # Remove attributes that have string value longer than allowed length - # for ffmpeg + # for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False if len(attr_value) > MAX_FFMPEG_STRING_LEN: - if not ignore_attr_changes_added: - # Attrite changes won't be added to attributes itself - ignore_attr_changes_added = True - oiio_cmd.append("--sansattrib") + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + erase_attribute = True + + if not erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: # Set attribute to empty string logger.info(( - "Removed attribute \"{}\" from metadata" - " because has too long value ({} chars)." - ).format(attr_name, len(attr_value))) + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) oiio_cmd.extend(["--eraseattrib", attr_name]) # Add last argument - path to output - base_file_name = os.path.basename(first_input_path) - output_path = os.path.join(output_dir, base_file_name) + if is_sequence: + ext = os.path.splitext(first_input_path)[1] + base_filename = "tmp.%{:0>2}d{}".format( + len(str(input_frame_end)), ext + ) + else: + base_filename = os.path.basename(first_input_path) + output_path = os.path.join(output_dir, base_filename) oiio_cmd.extend([ "-o", output_path ]) @@ -488,6 +581,140 @@ def convert_for_ffmpeg( run_subprocess(oiio_cmd, logger=logger) +def convert_input_paths_for_ffmpeg( + input_paths, + output_dir, + logger=None +): + """Convert source file to format supported in ffmpeg. + + Currently can convert only exrs. The input filepaths should be files + with same type. Information about input is loaded only from first found + file. + + Filenames of input files are kept so make sure that output directory + is not the same directory as input files have. + - This way it can handle gaps and can keep input filenames without handling + frame template + + Args: + input_paths (str): Paths that should be converted. It is expected that + contains single file or image sequence of samy type. + output_dir (str): Path to directory where output will be rendered. + Must not be same as input's directory. + logger (logging.Logger): Logger used for logging. + + Raises: + ValueError: If input filepath has extension not supported by function. + Currently is supported only ".exr" extension. + """ + if logger is None: + logger = logging.getLogger(__name__) + + first_input_path = input_paths[0] + ext = os.path.splitext(first_input_path)[1].lower() + if ext != ".exr": + raise ValueError(( + "Function 'convert_for_ffmpeg' currently support only" + " \".exr\" extension. Got \"{}\"." + ).format(ext)) + + input_info = get_oiio_info_for_input(first_input_path, logger=logger) + + # Change compression only if source compression is "dwaa" or "dwab" + # - they're not supported in ffmpeg + compression = input_info["attribs"].get("compression") + if compression in ("dwaa", "dwab"): + compression = "none" + + # Collect channels to export + channel_names = input_info["channelnames"] + review_channels = get_convert_rgb_channels(channel_names) + if review_channels is None: + raise ValueError( + "Couldn't find channels that can be used for conversion." + ) + + red, green, blue, alpha = review_channels + input_channels = [red, green, blue] + # TODO find subimage inder where rgba is available for multipart exrs + channels_arg = "R={},G={},B={}".format(red, green, blue) + if alpha is not None: + channels_arg += ",A={}".format(alpha) + input_channels.append(alpha) + input_channels_str = ",".join(input_channels) + + subimages = input_info.get("subimages") + input_arg = "-i" + if subimages is None or subimages == 1: + # Tell oiiotool which channels should be loaded + # - other channels are not loaded to memory so helps to avoid memory + # leak issues + # - this option is crashing if used on multipart exrs + input_arg += ":ch={}".format(input_channels_str) + + for input_path in input_paths: + # Prepare subprocess arguments + oiio_cmd = [ + get_oiio_tools_path(), + + # Don't add any additional attributes + "--nosoftwareattrib", + ] + # Add input compression if available + if compression: + oiio_cmd.extend(["--compression", compression]) + + oiio_cmd.extend([ + input_arg, input_path, + # Tell oiiotool which channels should be put to top stack + # (and output) + "--ch", channels_arg, + # Use first subimage + "--subimage", "0" + ]) + + for attr_name, attr_value in input_info["attribs"].items(): + if not isinstance(attr_value, str): + continue + + # Remove attributes that have string value longer than allowed + # length for ffmpeg or when containt unallowed symbols + erase_reason = "Missing reason" + erase_attribute = False + if len(attr_value) > MAX_FFMPEG_STRING_LEN: + erase_reason = "has too long value ({} chars).".format( + len(attr_value) + ) + erase_attribute = True + + if not erase_attribute: + for char in NOT_ALLOWED_FFMPEG_CHARS: + if char in attr_value: + erase_attribute = True + erase_reason = ( + "contains unsupported character \"{}\"." + ).format(char) + break + + if erase_attribute: + # Set attribute to empty string + logger.info(( + "Removed attribute \"{}\" from metadata because {}." + ).format(attr_name, erase_reason)) + oiio_cmd.extend(["--eraseattrib", attr_name]) + + # Add last argument - path to output + base_filename = os.path.basename(input_path) + output_path = os.path.join(output_dir, base_filename) + oiio_cmd.extend([ + "-o", output_path + ]) + + logger.debug("Conversion command: {}".format(" ".join(oiio_cmd))) + run_subprocess(oiio_cmd, logger=logger) + + # FFMPEG functions def get_ffprobe_data(path_to_file, logger=None): """Load data about entered filepath via ffprobe. @@ -564,9 +791,9 @@ def get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd=None): def _ffmpeg_mxf_format_args(ffprobe_data, source_ffmpeg_cmd): input_format = ffprobe_data["format"] format_tags = input_format.get("tags") or {} - product_name = format_tags.get("product_name") or "" + operational_pattern_ul = format_tags.get("operational_pattern_ul") or "" output = [] - if "opatom" in product_name.lower(): + if operational_pattern_ul == "060e2b34.04010102.0d010201.10030000": output.extend(["-f", "mxf_opatom"]) return output @@ -773,3 +1000,40 @@ def convert_ffprobe_fps_value(str_value): fps = int(fps) return str(fps) + + +def convert_ffprobe_fps_to_float(value): + """Convert string value of frame rate to float. + + Copy of 'convert_ffprobe_fps_value' which raises exceptions on invalid + value, does not convert value to string and does not return "Unknown" + string. + + Args: + value (str): Value to be converted. + + Returns: + Float: Converted frame rate in float. If divisor in value is '0' then + '0.0' is returned. + + Raises: + ValueError: Passed value is invalid for conversion. + """ + + if not value: + raise ValueError("Got empty value.") + + items = value.split("/") + if len(items) == 1: + return float(items[0]) + + if len(items) > 2: + raise ValueError(( + "FPS expression contains multiple dividers \"{}\"." + ).format(value)) + + dividend = float(items.pop(0)) + divisor = float(items.pop(0)) + if divisor == 0.0: + return 0.0 + return dividend / divisor diff --git a/openpype/lib/usdlib.py b/openpype/lib/usdlib.py index 89021156b4..20703ee308 100644 --- a/openpype/lib/usdlib.py +++ b/openpype/lib/usdlib.py @@ -8,7 +8,8 @@ except ImportError: # Allow to fall back on Multiverse 6.3.0+ pxr usd library from mvpxr import Usd, UsdGeom, Sdf, Kind -from avalon import io, api +from openpype.client import get_project, get_asset_by_name +from openpype.pipeline import legacy_io, Anatomy log = logging.getLogger(__name__) @@ -125,7 +126,8 @@ def create_model(filename, asset, variant_subsets): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -175,7 +177,8 @@ def create_shade(filename, asset, variant_subsets): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -210,7 +213,8 @@ def create_shade_variation(filename, asset, model_variant, shade_variants): """ - asset_doc = io.find_one({"name": asset, "type": "asset"}) + project_name = legacy_io.active_project() + asset_doc = get_asset_by_name(project_name, asset) assert asset_doc, "Asset not found: %s" % asset variants = [] @@ -310,21 +314,25 @@ def get_usd_master_path(asset, subset, representation): """ - project = io.find_one( - {"type": "project"}, projection={"config.template.publish": True} + project_name = legacy_io.active_project() + anatomy = Anatomy(project_name) + project_doc = get_project( + project_name, + fields=["name", "data.code"] ) - template = project["config"]["template"]["publish"] if isinstance(asset, dict) and "name" in asset: # Allow explicitly passing asset document asset_doc = asset else: - asset_doc = io.find_one({"name": asset, "type": "asset"}) + asset_doc = get_asset_by_name(project_name, asset, fields=["name"]) - path = template.format( - **{ - "root": api.registered_root(), - "project": api.Session["AVALON_PROJECT"], + formatted_result = anatomy.format( + { + "project": { + "name": project_name, + "code": project_doc.get("data", {}).get("code") + }, "asset": asset_doc["name"], "subset": subset, "representation": representation, @@ -332,6 +340,7 @@ def get_usd_master_path(asset, subset, representation): } ) + path = formatted_result["publish"]["path"] # Remove the version folder subset_folder = os.path.dirname(os.path.dirname(path)) master_folder = os.path.join(subset_folder, "master") diff --git a/openpype/lib/vendor_bin_utils.py b/openpype/lib/vendor_bin_utils.py index 23e28ea304..b6797dbba0 100644 --- a/openpype/lib/vendor_bin_utils.py +++ b/openpype/lib/vendor_bin_utils.py @@ -1,10 +1,33 @@ import os import logging import platform +import subprocess log = logging.getLogger("Vendor utils") +class CachedToolPaths: + """Cache already used and discovered tools and their executables. + + Discovering path can take some time and can trigger subprocesses so it's + better to cache the paths on first get. + """ + + _cached_paths = {} + + @classmethod + def is_tool_cached(cls, tool): + return tool in cls._cached_paths + + @classmethod + def get_executable_path(cls, tool): + return cls._cached_paths.get(tool) + + @classmethod + def cache_executable_path(cls, tool, path): + cls._cached_paths[tool] = path + + def is_file_executable(filepath): """Filepath lead to executable file. @@ -37,9 +60,10 @@ def find_executable(executable): path to file. Returns: - str: Full path to executable with extension (is file). - None: When the executable was not found. + Union[str, None]: Full path to executable with extension which was + found otherwise None. """ + # Skip if passed path is file if is_file_executable(executable): return executable @@ -47,24 +71,36 @@ def find_executable(executable): low_platform = platform.system().lower() _, ext = os.path.splitext(executable) - # Prepare variants for which it will be looked - variants = [executable] - # Add other extension variants only if passed executable does not have one - if not ext: - if low_platform == "windows": - exts = [".exe", ".ps1", ".bat"] - for ext in os.getenv("PATHEXT", "").split(os.pathsep): - ext = ext.lower() - if ext and ext not in exts: - exts.append(ext) - else: - exts = [".sh"] + # Prepare extensions to check + exts = set() + if ext: + exts.add(ext.lower()) - for ext in exts: - variant = executable + ext - if is_file_executable(variant): - return variant - variants.append(variant) + else: + # Add other possible extension variants only if passed executable + # does not have any + if low_platform == "windows": + exts |= {".exe", ".ps1", ".bat"} + for ext in os.getenv("PATHEXT", "").split(os.pathsep): + exts.add(ext.lower()) + + else: + exts |= {".sh"} + + # Executable is a path but there may be missing extension + # - this can happen primarily on windows where + # e.g. "ffmpeg" should be "ffmpeg.exe" + exe_dir, exe_filename = os.path.split(executable) + if exe_dir and os.path.isdir(exe_dir): + for filename in os.listdir(exe_dir): + filepath = os.path.join(exe_dir, filename) + basename, ext = os.path.splitext(filename) + if ( + basename == exe_filename + and ext.lower() in exts + and is_file_executable(filepath) + ): + return filepath # Get paths where to look for executable path_str = os.environ.get("PATH", None) @@ -74,13 +110,27 @@ def find_executable(executable): elif hasattr(os, "defpath"): path_str = os.defpath - if path_str: - paths = path_str.split(os.pathsep) - for path in paths: - for variant in variants: - filepath = os.path.abspath(os.path.join(path, variant)) - if is_file_executable(filepath): - return filepath + if not path_str: + return None + + paths = path_str.split(os.pathsep) + for path in paths: + if not os.path.isdir(path): + continue + for filename in os.listdir(path): + filepath = os.path.abspath(os.path.join(path, filename)) + # Filename matches executable exactly + if filename == executable and is_file_executable(filepath): + return filepath + + basename, ext = os.path.splitext(filename) + if ( + basename == executable + and ext.lower() in exts + and is_file_executable(filepath) + ): + return filepath + return None @@ -98,6 +148,7 @@ def get_vendor_bin_path(bin_app): Returns: str: Path to vendorized binaries folder. """ + return os.path.join( os.environ["OPENPYPE_ROOT"], "vendor", @@ -107,6 +158,123 @@ def get_vendor_bin_path(bin_app): ) +def find_tool_in_custom_paths(paths, tool, validation_func=None): + """Find a tool executable in custom paths. + + Args: + paths (Iterable[str]): Iterable of paths where to look for tool. + tool (str): Name of tool (binary file) to find in passed paths. + validation_func (Function): Custom validation function of path. + Function must expect one argument which is path to executable. + If not passed only 'find_executable' is used to be able identify + if path is valid. + + Reuturns: + Union[str, None]: Path to validated executable or None if was not + found. + """ + + for path in paths: + # Skip empty strings + if not path: + continue + + # Handle cases when path is just an executable + # - it allows to use executable from PATH + # - basename must match 'tool' value (without extension) + extless_path, ext = os.path.splitext(path) + if extless_path == tool: + executable_path = find_executable(tool) + if executable_path and ( + validation_func is None + or validation_func(executable_path) + ): + return executable_path + continue + + # Normalize path because it should be a path and check if exists + normalized = os.path.normpath(path) + if not os.path.exists(normalized): + continue + + # Note: Path can be both file and directory + + # If path is a file validate it + if os.path.isfile(normalized): + basename, ext = os.path.splitext(os.path.basename(path)) + # Check if the filename has actually the sane bane as 'tool' + if basename == tool: + executable_path = find_executable(normalized) + if executable_path and ( + validation_func is None + or validation_func(executable_path) + ): + return executable_path + + # Check if path is a directory and look for tool inside the dir + if os.path.isdir(normalized): + executable_path = find_executable(os.path.join(normalized, tool)) + if executable_path and ( + validation_func is None + or validation_func(executable_path) + ): + return executable_path + return None + + +def _check_args_returncode(args): + try: + # Python 2 compatibility where DEVNULL is not available + if hasattr(subprocess, "DEVNULL"): + proc = subprocess.Popen( + args, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + proc.wait() + else: + with open(os.devnull, "w") as devnull: + proc = subprocess.Popen( + args, stdout=devnull, stderr=devnull, + ) + proc.wait() + + except Exception: + return False + return proc.returncode == 0 + + +def _oiio_executable_validation(filepath): + """Validate oiio tool executable if can be executed. + + Validation has 2 steps. First is using 'find_executable' to fill possible + missing extension or fill directory then launch executable and validate + that it can be executed. For that is used '--help' argument which is fast + and does not need any other inputs. + + Any possible crash of missing libraries or invalid build should be catched. + + Main reason is to validate if executable can be executed on OS just running + which can be issue ob linux machines. + + Note: + It does not validate if the executable is really a oiio tool which + should be used. + + Args: + filepath (str): Path to executable. + + Returns: + bool: Filepath is valid executable. + """ + + filepath = find_executable(filepath) + if not filepath: + return False + + return _check_args_returncode([filepath, "--help"]) + + def get_oiio_tools_path(tool="oiiotool"): """Path to vendorized OpenImageIO tool executables. @@ -116,8 +284,63 @@ def get_oiio_tools_path(tool="oiiotool"): tool (string): Tool name (oiiotool, maketx, ...). Default is "oiiotool". """ - oiio_dir = get_vendor_bin_path("oiio") - return find_executable(os.path.join(oiio_dir, tool)) + + if CachedToolPaths.is_tool_cached(tool): + return CachedToolPaths.get_executable_path(tool) + + custom_paths_str = os.environ.get("OPENPYPE_OIIO_PATHS") or "" + tool_executable_path = find_tool_in_custom_paths( + custom_paths_str.split(os.pathsep), + tool, + _oiio_executable_validation + ) + + if not tool_executable_path: + oiio_dir = get_vendor_bin_path("oiio") + if platform.system().lower() == "linux": + oiio_dir = os.path.join(oiio_dir, "bin") + default_path = find_executable(os.path.join(oiio_dir, tool)) + if default_path and _oiio_executable_validation(default_path): + tool_executable_path = default_path + + # Look to PATH for the tool + if not tool_executable_path: + from_path = find_executable(tool) + if from_path and _oiio_executable_validation(from_path): + tool_executable_path = from_path + + CachedToolPaths.cache_executable_path(tool, tool_executable_path) + return tool_executable_path + + +def _ffmpeg_executable_validation(filepath): + """Validate ffmpeg tool executable if can be executed. + + Validation has 2 steps. First is using 'find_executable' to fill possible + missing extension or fill directory then launch executable and validate + that it can be executed. For that is used '-version' argument which is fast + and does not need any other inputs. + + Any possible crash of missing libraries or invalid build should be catched. + + Main reason is to validate if executable can be executed on OS just running + which can be issue ob linux machines. + + Note: + It does not validate if the executable is really a ffmpeg tool. + + Args: + filepath (str): Path to executable. + + Returns: + bool: Filepath is valid executable. + """ + + filepath = find_executable(filepath) + if not filepath: + return False + + return _check_args_returncode([filepath, "-version"]) def get_ffmpeg_tool_path(tool="ffmpeg"): @@ -130,10 +353,33 @@ def get_ffmpeg_tool_path(tool="ffmpeg"): Returns: str: Full path to ffmpeg executable. """ - ffmpeg_dir = get_vendor_bin_path("ffmpeg") - if platform.system().lower() == "windows": - ffmpeg_dir = os.path.join(ffmpeg_dir, "bin") - return find_executable(os.path.join(ffmpeg_dir, tool)) + + if CachedToolPaths.is_tool_cached(tool): + return CachedToolPaths.get_executable_path(tool) + + custom_paths_str = os.environ.get("OPENPYPE_FFMPEG_PATHS") or "" + tool_executable_path = find_tool_in_custom_paths( + custom_paths_str.split(os.pathsep), + tool, + _ffmpeg_executable_validation + ) + + if not tool_executable_path: + ffmpeg_dir = get_vendor_bin_path("ffmpeg") + if platform.system().lower() == "windows": + ffmpeg_dir = os.path.join(ffmpeg_dir, "bin") + tool_path = find_executable(os.path.join(ffmpeg_dir, tool)) + if tool_path and _ffmpeg_executable_validation(tool_path): + tool_executable_path = tool_path + + # Look to PATH for the tool + if not tool_executable_path: + from_path = find_executable(tool) + if from_path and _oiio_executable_validation(from_path): + tool_executable_path = from_path + + CachedToolPaths.cache_executable_path(tool, tool_executable_path) + return tool_executable_path def is_oiio_supported(): diff --git a/openpype/modules/__init__.py b/openpype/modules/__init__.py index 68b5f6c247..1f345feea9 100644 --- a/openpype/modules/__init__.py +++ b/openpype/modules/__init__.py @@ -1,8 +1,17 @@ # -*- coding: utf-8 -*- +from .interfaces import ( + ILaunchHookPaths, + IPluginPaths, + ITrayModule, + ITrayAction, + ITrayService, + ISettingsChangeListener, + IHostAddon, +) + from .base import ( OpenPypeModule, OpenPypeAddOn, - OpenPypeInterface, load_modules, @@ -18,9 +27,16 @@ from .base import ( __all__ = ( + "ILaunchHookPaths", + "IPluginPaths", + "ITrayModule", + "ITrayAction", + "ITrayService", + "ISettingsChangeListener", + "IHostAddon", + "OpenPypeModule", "OpenPypeAddOn", - "OpenPypeInterface", "load_modules", diff --git a/openpype/modules/avalon_apps/avalon_app.py b/openpype/modules/avalon_apps/avalon_app.py index 51a22323f1..a0226ecc5c 100644 --- a/openpype/modules/avalon_apps/avalon_app.py +++ b/openpype/modules/avalon_apps/avalon_app.py @@ -1,7 +1,6 @@ import os -import openpype -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule + +from openpype.modules import OpenPypeModule, ITrayModule class AvalonModule(OpenPypeModule, ITrayModule): @@ -26,7 +25,8 @@ class AvalonModule(OpenPypeModule, ITrayModule): self.avalon_mongo_timeout = avalon_mongo_timeout # Tray attributes - self.libraryloader = None + self._library_loader_imported = None + self._library_loader_window = None self.rest_api_obj = None def get_global_environments(self): @@ -41,21 +41,11 @@ class AvalonModule(OpenPypeModule, ITrayModule): def tray_init(self): # Add library tool + self._library_loader_imported = False try: - from Qt import QtCore from openpype.tools.libraryloader import LibraryLoaderWindow - libraryloader = LibraryLoaderWindow( - show_projects=True, - show_libraries=True - ) - # Remove always on top flag for tray - window_flags = libraryloader.windowFlags() - if window_flags | QtCore.Qt.WindowStaysOnTopHint: - window_flags ^= QtCore.Qt.WindowStaysOnTopHint - libraryloader.setWindowFlags(window_flags) - self.libraryloader = libraryloader - + self._library_loader_imported = True except Exception: self.log.warning( "Couldn't load Library loader tool for tray.", @@ -64,10 +54,10 @@ class AvalonModule(OpenPypeModule, ITrayModule): # Definition of Tray menu def tray_menu(self, tray_menu): - if self.libraryloader is None: + if not self._library_loader_imported: return - from Qt import QtWidgets + from qtpy import QtWidgets # Actions action_library_loader = QtWidgets.QAction( "Loader", tray_menu @@ -84,17 +74,31 @@ class AvalonModule(OpenPypeModule, ITrayModule): return def show_library_loader(self): - if self.libraryloader is None: - return + if self._library_loader_window is None: + from qtpy import QtCore + from openpype.tools.libraryloader import LibraryLoaderWindow + from openpype.pipeline import install_openpype_plugins - self.libraryloader.show() + libraryloader = LibraryLoaderWindow( + show_projects=True, + show_libraries=True + ) + # Remove always on top flag for tray + window_flags = libraryloader.windowFlags() + if window_flags | QtCore.Qt.WindowStaysOnTopHint: + window_flags ^= QtCore.Qt.WindowStaysOnTopHint + libraryloader.setWindowFlags(window_flags) + self._library_loader_window = libraryloader + + install_openpype_plugins() + + self._library_loader_window.show() # Raise and activate the window # for MacOS - self.libraryloader.raise_() + self._library_loader_window.raise_() # for Windows - self.libraryloader.activateWindow() - self.libraryloader.refresh() + self._library_loader_window.activateWindow() # Webserver module implementation def webserver_initialization(self, server_manager): diff --git a/openpype/modules/avalon_apps/rest_api.py b/openpype/modules/avalon_apps/rest_api.py index 533050fc0c..a52ce1b6df 100644 --- a/openpype/modules/avalon_apps/rest_api.py +++ b/openpype/modules/avalon_apps/rest_api.py @@ -1,4 +1,3 @@ -import os import json import datetime @@ -6,7 +5,12 @@ from bson.objectid import ObjectId from aiohttp.web_response import Response -from avalon.api import AvalonMongoDB +from openpype.client import ( + get_projects, + get_project, + get_assets, + get_asset_by_name, +) from openpype_modules.webserver.base_routes import RestApiEndpoint @@ -15,19 +19,13 @@ class _RestApiEndpoint(RestApiEndpoint): self.resource = resource super(_RestApiEndpoint, self).__init__() - @property - def dbcon(self): - return self.resource.dbcon - class AvalonProjectsEndpoint(_RestApiEndpoint): async def get(self) -> Response: - output = [] - for project_name in self.dbcon.database.collection_names(): - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) - output.append(project_doc) + output = [ + project_doc + for project_doc in get_projects() + ] return Response( status=200, body=self.resource.encode(output), @@ -37,9 +35,7 @@ class AvalonProjectsEndpoint(_RestApiEndpoint): class AvalonProjectEndpoint(_RestApiEndpoint): async def get(self, project_name) -> Response: - project_doc = self.dbcon.database[project_name].find_one({ - "type": "project" - }) + project_doc = get_project(project_name) if project_doc: return Response( status=200, @@ -54,9 +50,7 @@ class AvalonProjectEndpoint(_RestApiEndpoint): class AvalonAssetsEndpoint(_RestApiEndpoint): async def get(self, project_name) -> Response: - asset_docs = list(self.dbcon.database[project_name].find({ - "type": "asset" - })) + asset_docs = list(get_assets(project_name)) return Response( status=200, body=self.resource.encode(asset_docs), @@ -66,10 +60,7 @@ class AvalonAssetsEndpoint(_RestApiEndpoint): class AvalonAssetEndpoint(_RestApiEndpoint): async def get(self, project_name, asset_name) -> Response: - asset_doc = self.dbcon.database[project_name].find_one({ - "type": "asset", - "name": asset_name - }) + asset_doc = get_asset_by_name(project_name, asset_name) if asset_doc: return Response( status=200, @@ -89,9 +80,6 @@ class AvalonRestApiResource: self.module = avalon_module self.server_manager = server_manager - self.dbcon = AvalonMongoDB() - self.dbcon.install() - self.prefix = "/avalon" self.endpoint_defs = ( diff --git a/openpype/modules/base.py b/openpype/modules/base.py index 5cdeb86087..0fd21492e8 100644 --- a/openpype/modules/base.py +++ b/openpype/modules/base.py @@ -9,11 +9,11 @@ import logging import platform import threading import collections +import traceback from uuid import uuid4 from abc import ABCMeta, abstractmethod import six -import openpype from openpype.settings import ( get_system_settings, SYSTEM_SETTINGS_KEY, @@ -26,7 +26,20 @@ from openpype.settings.lib import ( get_studio_system_settings_overrides, load_json_file ) -from openpype.lib import PypeLogger + +from openpype.lib import ( + Logger, + import_filepath, + import_module_from_dirpath +) + +from .interfaces import ( + OpenPypeInterface, + IPluginPaths, + IHostAddon, + ITrayModule, + ITrayService +) # Files that will be always ignored on modules import IGNORED_FILENAMES = ( @@ -37,6 +50,8 @@ IGNORED_DEFAULT_FILENAMES = ( "__init__.py", "base.py", "interfaces.py", + "example_addons", + "default_modules", ) @@ -47,6 +62,7 @@ class _ModuleClass(object): Object of this class can be stored to `sys.modules` and used for storing dynamically imported modules. """ + def __init__(self, name): # Call setattr on super class super(_ModuleClass, self).__setattr__("name", name) @@ -90,7 +106,7 @@ class _ModuleClass(object): def log(self): if self._log is None: super(_ModuleClass, self).__setattr__( - "_log", PypeLogger.get_logger(self.name) + "_log", Logger.get_logger(self.name) ) return self._log @@ -114,15 +130,25 @@ class _InterfacesClass(_ModuleClass): - this is because interfaces must be available even if are missing implementation """ + def __getattr__(self, attr_name): if attr_name not in self.__attributes__: if attr_name in ("__path__", "__file__"): return None - raise ImportError(( + raise AttributeError(( "cannot import name '{}' from 'openpype_interfaces'" ).format(attr_name)) + if _LoadCache.interfaces_loaded and attr_name != "log": + stack = list(traceback.extract_stack()) + stack.pop(-1) + self.log.warning(( + "Using deprecated import of \"{}\" from 'openpype_interfaces'." + " Please switch to use import" + " from 'openpype.modules.interfaces'" + " (will be removed after 3.16.x).{}" + ).format(attr_name, "".join(traceback.format_list(stack)))) return self.__attributes__[attr_name] @@ -136,7 +162,7 @@ class _LoadCache: def get_default_modules_dir(): """Path to default OpenPype modules.""" - current_dir = os.path.abspath(os.path.dirname(__file__)) + current_dir = os.path.dirname(os.path.abspath(__file__)) output = [] for folder_name in ("default_modules", ): @@ -274,54 +300,26 @@ def load_modules(force=False): def _load_modules(): - # Import helper functions from lib - from openpype.lib import ( - import_filepath, - import_module_from_dirpath - ) - # Key under which will be modules imported in `sys.modules` modules_key = "openpype_modules" # Change `sys.modules` sys.modules[modules_key] = openpype_modules = _ModuleClass(modules_key) - log = PypeLogger.get_logger("ModulesLoader") - - current_dir = os.path.abspath(os.path.dirname(__file__)) - processed_paths = set() - processed_paths.add(current_dir) - # Import default modules imported from 'openpype.modules' - for filename in os.listdir(current_dir): - # Ignore filenames - if ( - filename in IGNORED_FILENAMES - or filename in IGNORED_DEFAULT_FILENAMES - ): - continue - - fullpath = os.path.join(current_dir, filename) - basename, ext = os.path.splitext(filename) - - if not os.path.isdir(fullpath) and ext not in (".py", ): - continue - - try: - import_str = "openpype.modules.{}".format(basename) - new_import_str = "{}.{}".format(modules_key, basename) - default_module = __import__(import_str, fromlist=("", )) - sys.modules[new_import_str] = default_module - setattr(openpype_modules, basename, default_module) - - except Exception: - msg = ( - "Failed to import default module '{}'." - ).format(basename) - log.error(msg, exc_info=True) + log = Logger.get_logger("ModulesLoader") # Look for OpenPype modules in paths defined with `get_module_dirs` # - dynamically imported OpenPype modules and addons - for dirpath in get_module_dirs(): + module_dirs = get_module_dirs() + # Add current directory at first place + # - has small differences in import logic + current_dir = os.path.abspath(os.path.dirname(__file__)) + hosts_dir = os.path.join(os.path.dirname(current_dir), "hosts") + module_dirs.insert(0, hosts_dir) + module_dirs.insert(0, current_dir) + + processed_paths = set() + for dirpath in module_dirs: # Skip already processed paths if dirpath in processed_paths: continue @@ -333,64 +331,80 @@ def _load_modules(): ).format(dirpath)) continue + is_in_current_dir = dirpath == current_dir + is_in_host_dir = dirpath == hosts_dir for filename in os.listdir(dirpath): # Ignore filenames if filename in IGNORED_FILENAMES: continue + if ( + is_in_current_dir + and filename in IGNORED_DEFAULT_FILENAMES + ): + continue + fullpath = os.path.join(dirpath, filename) basename, ext = os.path.splitext(filename) - if not os.path.isdir(fullpath) and ext not in (".py", ): + # Validations + if os.path.isdir(fullpath): + # Check existence of init file + init_path = os.path.join(fullpath, "__init__.py") + if not os.path.exists(init_path): + log.debug(( + "Module directory does not contain __init__.py" + " file {}" + ).format(fullpath)) + continue + + elif ext not in (".py", ): continue # TODO add more logic how to define if folder is module or not # - check manifest and content of manifest try: - if os.path.isdir(fullpath): - # Module without init file can't be used as OpenPype module - # because the module class could not be imported - init_file = os.path.join(fullpath, "__init__.py") - if not os.path.exists(init_file): - log.info(( - "Skipping module directory because of" - " missing \"__init__.py\" file. \"{}\"" - ).format(fullpath)) - continue + # Don't import dynamically current directory modules + if is_in_current_dir: + import_str = "openpype.modules.{}".format(basename) + new_import_str = "{}.{}".format(modules_key, basename) + default_module = __import__(import_str, fromlist=("", )) + sys.modules[new_import_str] = default_module + setattr(openpype_modules, basename, default_module) + + elif is_in_host_dir: + import_str = "openpype.hosts.{}".format(basename) + new_import_str = "{}.{}".format(modules_key, basename) + # Until all hosts are converted to be able use them as + # modules is this error check needed + try: + default_module = __import__( + import_str, fromlist=("", ) + ) + sys.modules[new_import_str] = default_module + setattr(openpype_modules, basename, default_module) + + except Exception: + log.warning( + "Failed to import host folder {}".format(basename), + exc_info=True + ) + + elif os.path.isdir(fullpath): import_module_from_dirpath(dirpath, filename, modules_key) - elif ext in (".py", ): + else: module = import_filepath(fullpath) setattr(openpype_modules, basename, module) except Exception: - log.error( - "Failed to import '{}'.".format(fullpath), - exc_info=True - ) - - -class _OpenPypeInterfaceMeta(ABCMeta): - """OpenPypeInterface meta class to print proper string.""" - def __str__(self): - return "<'OpenPypeInterface.{}'>".format(self.__name__) - - def __repr__(self): - return str(self) - - -@six.add_metaclass(_OpenPypeInterfaceMeta) -class OpenPypeInterface: - """Base class of Interface that can be used as Mixin with abstract parts. - - This is way how OpenPype module or addon can tell that has implementation - for specific part or for other module/addon. - - Child classes of OpenPypeInterface may be used as mixin in different - OpenPype modules which means they have to have implemented methods defined - in the interface. By default interface does not have any abstract parts. - """ - pass + if is_in_current_dir: + msg = "Failed to import default module '{}'.".format( + basename + ) + else: + msg = "Failed to import module '{}'.".format(fullpath) + log.error(msg, exc_info=True) @six.add_metaclass(ABCMeta) @@ -417,7 +431,7 @@ class OpenPypeModule: def __init__(self, manager, settings): self.manager = manager - self.log = PypeLogger.get_logger(self.name) + self.log = Logger.get_logger(self.name) self.initialize(settings) @@ -434,10 +448,12 @@ class OpenPypeModule: It is not recommended to override __init__ that's why specific method was implemented. """ + pass def connect_with_modules(self, enabled_modules): """Connect with other enabled modules.""" + pass def get_global_environments(self): @@ -445,8 +461,41 @@ class OpenPypeModule: Environment variables that can be get only from system settings. """ + return {} + def modify_application_launch_arguments(self, application, env): + """Give option to modify launch environments before application launch. + + Implementation is optional. To change environments modify passed + dictionary of environments. + + Args: + application (Application): Application that is launched. + env (dict): Current environemnt variables. + """ + + pass + + def on_host_install(self, host, host_name, project_name): + """Host was installed which gives option to handle in-host logic. + + It is a good option to register in-host event callbacks which are + specific for the module. The module is kept in memory for rest of + the process. + + Arguments may change in future. E.g. 'host_name' should be possible + to receive from 'host' object. + + Args: + host (ModuleType): Access to installed/registered host object. + host_name (str): Name of host. + project_name (str): Project name which is main part of host + context. + """ + + pass + def cli(self, module_click_group): """Add commands to click group. @@ -467,6 +516,7 @@ class OpenPypeModule: def mycommand(): print("my_command") """ + pass @@ -503,6 +553,40 @@ class ModulesManager: self.initialize_modules() self.connect_modules() + def __getitem__(self, module_name): + return self.modules_by_name[module_name] + + def get(self, module_name, default=None): + """Access module by name. + + Args: + module_name (str): Name of module which should be returned. + default (Any): Default output if module is not available. + + Returns: + Union[OpenPypeModule, None]: Module found by name or None. + """ + return self.modules_by_name.get(module_name, default) + + def get_enabled_module(self, module_name, default=None): + """Fast access to enabled module. + + If module is available but is not enabled default value is returned. + + Args: + module_name (str): Name of module which should be returned. + default (Any): Default output if module is not available or is + not enabled. + + Returns: + Union[OpenPypeModule, None]: Enabled module found by name or None. + """ + + module = self.get(module_name) + if module is not None and module.enabled: + return module + return default + def initialize_modules(self): """Import and initialize modules.""" # Make sure modules are loaded @@ -656,8 +740,6 @@ class ModulesManager: and "actions" each containing list of paths. """ # Output structure - from openpype_interfaces import IPluginPaths - output = { "publish": [], "create": [], @@ -704,42 +786,104 @@ class ModulesManager: ).format(expected_keys, " | ".join(msg_items))) return output - def collect_launch_hook_paths(self): - """Helper to collect hooks from modules inherited ILaunchHookPaths. - - Returns: - list: Paths to launch hook directories. - """ - from openpype_interfaces import ILaunchHookPaths - - str_type = type("") - expected_types = (list, tuple, set) - + def _collect_plugin_paths(self, method_name, *args, **kwargs): output = [] for module in self.get_enabled_modules(): - # Skip module that do not inherit from `ILaunchHookPaths` - if not isinstance(module, ILaunchHookPaths): + # Skip module that do not inherit from `IPluginPaths` + if not isinstance(module, IPluginPaths): continue - hook_paths = module.get_launch_hook_paths() - if not hook_paths: - continue - - # Convert string to list - if isinstance(hook_paths, str_type): - hook_paths = [hook_paths] - - # Skip invalid types - if not isinstance(hook_paths, expected_types): - self.log.warning(( - "Result of `get_launch_hook_paths`" - " has invalid type {}. Expected {}" - ).format(type(hook_paths), expected_types)) - continue - - output.extend(hook_paths) + method = getattr(module, method_name) + paths = method(*args, **kwargs) + if paths: + # Convert to list if value is not list + if not isinstance(paths, (list, tuple, set)): + paths = [paths] + output.extend(paths) return output + def collect_create_plugin_paths(self, host_name): + """Helper to collect creator plugin paths from modules. + + Args: + host_name (str): For which host are creators meant. + + Returns: + list: List of creator plugin paths. + """ + + return self._collect_plugin_paths( + "get_create_plugin_paths", + host_name + ) + + collect_creator_plugin_paths = collect_create_plugin_paths + + def collect_load_plugin_paths(self, host_name): + """Helper to collect load plugin paths from modules. + + Args: + host_name (str): For which host are load plugins meant. + + Returns: + list: List of load plugin paths. + """ + + return self._collect_plugin_paths( + "get_load_plugin_paths", + host_name + ) + + def collect_publish_plugin_paths(self, host_name): + """Helper to collect load plugin paths from modules. + + Args: + host_name (str): For which host are load plugins meant. + + Returns: + list: List of pyblish plugin paths. + """ + + return self._collect_plugin_paths( + "get_publish_plugin_paths", + host_name + ) + + def get_host_module(self, host_name): + """Find host module by host name. + + Args: + host_name (str): Host name for which is found host module. + + Returns: + OpenPypeModule: Found host module by name. + None: There was not found module inheriting IHostAddon which has + host name set to passed 'host_name'. + """ + + for module in self.get_enabled_modules(): + if ( + isinstance(module, IHostAddon) + and module.host_name == host_name + ): + return module + return None + + def get_host_names(self): + """List of available host names based on host modules. + + Returns: + Iterable[str]: All available host names based on enabled modules + inheriting 'IHostAddon'. + """ + + host_names = { + module.host_name + for module in self.get_enabled_modules() + if isinstance(module, IHostAddon) + } + return host_names + def print_report(self): """Print out report of time spent on modules initialization parts. @@ -862,6 +1006,7 @@ class TrayModulesManager(ModulesManager): modules_menu_order = ( "user", "ftrack", + "kitsu", "muster", "launcher_tool", "avalon", @@ -874,7 +1019,7 @@ class TrayModulesManager(ModulesManager): ) def __init__(self): - self.log = PypeLogger.get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.modules = [] self.modules_by_id = {} @@ -913,8 +1058,6 @@ class TrayModulesManager(ModulesManager): self.tray_menu(tray_menu) def get_enabled_tray_modules(self): - from openpype_interfaces import ITrayModule - output = [] for module in self.modules: if module.enabled and isinstance(module, ITrayModule): @@ -990,8 +1133,6 @@ class TrayModulesManager(ModulesManager): self._report["Tray menu"] = report def start_modules(self): - from openpype_interfaces import ITrayService - report = {} time_start = time.time() prev_start_time = time_start @@ -1050,7 +1191,7 @@ def get_module_settings_defs(): settings_defs = [] - log = PypeLogger.get_logger("ModuleSettingsLoad") + log = Logger.get_logger("ModuleSettingsLoad") for raw_module in openpype_modules: for attr_name in dir(raw_module): diff --git a/openpype/modules/clockify/clockify_module.py b/openpype/modules/clockify/clockify_module.py index 932ce87c36..300d5576e2 100644 --- a/openpype/modules/clockify/clockify_module.py +++ b/openpype/modules/clockify/clockify_module.py @@ -2,16 +2,17 @@ import os import threading import time +from openpype.modules import ( + OpenPypeModule, + ITrayModule, + IPluginPaths +) + from .clockify_api import ClockifyAPI from .constants import ( CLOCKIFY_FTRACK_USER_PATH, CLOCKIFY_FTRACK_SERVER_PATH ) -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( - ITrayModule, - IPluginPaths -) class ClockifyModule( @@ -182,7 +183,7 @@ class ClockifyModule( # Definition of Tray menu def tray_menu(self, parent_menu): # Menu for Tray App - from Qt import QtWidgets + from qtpy import QtWidgets menu = QtWidgets.QMenu("Clockify", parent_menu) menu.setProperty("submenu", "on") diff --git a/openpype/modules/clockify/launcher_actions/ClockifyStart.py b/openpype/modules/clockify/launcher_actions/ClockifyStart.py index db51964eb7..7663aecc31 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifyStart.py +++ b/openpype/modules/clockify/launcher_actions/ClockifyStart.py @@ -1,13 +1,9 @@ -from avalon import api, io -from openpype.api import Logger +from openpype.client import get_asset_by_name +from openpype.pipeline import LauncherAction from openpype_modules.clockify.clockify_api import ClockifyAPI -log = Logger().get_logger(__name__) - - -class ClockifyStart(api.Action): - +class ClockifyStart(LauncherAction): name = "clockify_start_timer" label = "Clockify - Start Timer" icon = "clockify_icon" @@ -21,20 +17,19 @@ class ClockifyStart(api.Action): return False def process(self, session, **kwargs): - project_name = session['AVALON_PROJECT'] - asset_name = session['AVALON_ASSET'] - task_name = session['AVALON_TASK'] + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] description = asset_name - asset = io.find_one({ - 'type': 'asset', - 'name': asset_name - }) - if asset is not None: - desc_items = asset.get('data', {}).get('parents', []) + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.parents"] + ) + if asset_doc is not None: + desc_items = asset_doc.get("data", {}).get("parents", []) desc_items.append(asset_name) desc_items.append(task_name) - description = '/'.join(desc_items) + description = "/".join(desc_items) project_id = self.clockapi.get_project_id(project_name) tag_ids = [] diff --git a/openpype/modules/clockify/launcher_actions/ClockifySync.py b/openpype/modules/clockify/launcher_actions/ClockifySync.py index 02982d373a..c346a1b4f6 100644 --- a/openpype/modules/clockify/launcher_actions/ClockifySync.py +++ b/openpype/modules/clockify/launcher_actions/ClockifySync.py @@ -1,10 +1,9 @@ -from avalon import api, io +from openpype.client import get_projects, get_project from openpype_modules.clockify.clockify_api import ClockifyAPI -from openpype.api import Logger -log = Logger().get_logger(__name__) +from openpype.pipeline import LauncherAction -class ClockifySync(api.Action): +class ClockifySync(LauncherAction): name = "sync_to_clockify" label = "Sync to Clockify" @@ -18,39 +17,36 @@ class ClockifySync(api.Action): return self.have_permissions def process(self, session, **kwargs): - project_name = session.get('AVALON_PROJECT', None) + project_name = session.get("AVALON_PROJECT") or "" projects_to_sync = [] - if project_name.strip() == '' or project_name is None: - for project in io.projects(): - projects_to_sync.append(project) + if project_name.strip(): + projects_to_sync = [get_project(project_name)] else: - project = io.find_one({'type': 'project'}) - projects_to_sync.append(project) + projects_to_sync = get_projects() projects_info = {} for project in projects_to_sync: - task_types = project['config']['tasks'].keys() - projects_info[project['name']] = task_types + task_types = project["config"]["tasks"].keys() + projects_info[project["name"]] = task_types clockify_projects = self.clockapi.get_projects() for project_name, task_types in projects_info.items(): - if project_name not in clockify_projects: - response = self.clockapi.add_project(project_name) - if 'id' not in response: - self.log.error('Project {} can\'t be created'.format( - project_name - )) - continue - project_id = response['id'] - else: - project_id = clockify_projects[project_name] + if project_name in clockify_projects: + continue + + response = self.clockapi.add_project(project_name) + if "id" not in response: + self.log.error("Project {} can't be created".format( + project_name + )) + continue clockify_workspace_tags = self.clockapi.get_tags() for task_type in task_types: if task_type not in clockify_workspace_tags: response = self.clockapi.add_tag(task_type) - if 'id' not in response: + if "id" not in response: self.log.error('Task {} can\'t be created'.format( task_type )) diff --git a/openpype/modules/clockify/widgets.py b/openpype/modules/clockify/widgets.py index d58df3c067..122b6212c0 100644 --- a/openpype/modules/clockify/widgets.py +++ b/openpype/modules/clockify/widgets.py @@ -1,4 +1,4 @@ -from Qt import QtCore, QtGui, QtWidgets +from qtpy import QtCore, QtGui, QtWidgets from openpype import resources, style diff --git a/openpype/modules/deadline/abstract_submit_deadline.py b/openpype/modules/deadline/abstract_submit_deadline.py index 22902d79ea..648eb77007 100644 --- a/openpype/modules/deadline/abstract_submit_deadline.py +++ b/openpype/modules/deadline/abstract_submit_deadline.py @@ -4,10 +4,12 @@ It provides Deadline JobInfo data class. """ +import json.decoder import os from abc import abstractmethod import platform import getpass +from functools import partial from collections import OrderedDict import six @@ -15,7 +17,12 @@ import attr import requests import pyblish.api -from openpype.lib.abstract_metaplugins import AbstractMetaInstancePlugin +from openpype.pipeline.publish import ( + AbstractMetaInstancePlugin, + KnownPublishError +) + +JSONDecodeError = getattr(json.decoder, "JSONDecodeError", ValueError) def requests_post(*args, **kwargs): @@ -60,6 +67,96 @@ def requests_get(*args, **kwargs): return requests.get(*args, **kwargs) +class DeadlineKeyValueVar(dict): + """ + + Serializes dictionary key values as "{key}={value}" like Deadline uses + for EnvironmentKeyValue. + + As an example: + EnvironmentKeyValue0="A_KEY=VALUE_A" + EnvironmentKeyValue1="OTHER_KEY=VALUE_B" + + The keys are serialized in alphabetical order (sorted). + + Example: + >>> var = DeadlineKeyValueVar("EnvironmentKeyValue") + >>> var["my_var"] = "hello" + >>> var["my_other_var"] = "hello2" + >>> var.serialize() + + + """ + def __init__(self, key): + super(DeadlineKeyValueVar, self).__init__() + self.__key = key + + def serialize(self): + key = self.__key + + # Allow custom location for index in serialized string + if "{}" not in key: + key = key + "{}" + + return { + key.format(index): "{}={}".format(var_key, var_value) + for index, (var_key, var_value) in enumerate(sorted(self.items())) + } + + +class DeadlineIndexedVar(dict): + """ + + Allows to set and query values by integer indices: + Query: var[1] or var.get(1) + Set: var[1] = "my_value" + Append: var += "value" + + Note: Iterating the instance is not guarantueed to be the order of the + indices. To do so iterate with `sorted()` + + """ + def __init__(self, key): + super(DeadlineIndexedVar, self).__init__() + self.__key = key + + def serialize(self): + key = self.__key + + # Allow custom location for index in serialized string + if "{}" not in key: + key = key + "{}" + + return { + key.format(index): value for index, value in sorted(self.items()) + } + + def next_available_index(self): + # Add as first unused entry + i = 0 + while i in self.keys(): + i += 1 + return i + + def update(self, data): + # Force the integer key check + for key, value in data.items(): + self.__setitem__(key, value) + + def __iadd__(self, other): + index = self.next_available_index() + self[index] = other + return self + + def __setitem__(self, key, value): + if not isinstance(key, int): + raise TypeError("Key must be an integer: {}".format(key)) + + if key < 0: + raise ValueError("Negative index can't be set: {}".format(key)) + dict.__setitem__(self, key, value) + + @attr.s class DeadlineJobInfo(object): """Mapping of all Deadline *JobInfo* attributes. @@ -212,24 +309,8 @@ class DeadlineJobInfo(object): # Environment # ---------------------------------------------- - _environmentKeyValue = attr.ib(factory=list) - - @property - def EnvironmentKeyValue(self): # noqa: N802 - """Return all environment key values formatted for Deadline. - - Returns: - dict: as `{'EnvironmentKeyValue0', 'key=value'}` - - """ - out = {} - for index, v in enumerate(self._environmentKeyValue): - out["EnvironmentKeyValue{}".format(index)] = v - return out - - @EnvironmentKeyValue.setter - def EnvironmentKeyValue(self, val): # noqa: N802 - self._environmentKeyValue.append(val) + EnvironmentKeyValue = attr.ib(factory=partial(DeadlineKeyValueVar, + "EnvironmentKeyValue")) IncludeEnvironment = attr.ib(default=None) # Default: false UseJobEnvironmentOnly = attr.ib(default=None) # Default: false @@ -237,121 +318,29 @@ class DeadlineJobInfo(object): # Job Extra Info # ---------------------------------------------- - _extraInfos = attr.ib(factory=list) - _extraInfoKeyValues = attr.ib(factory=list) - - @property - def ExtraInfo(self): # noqa: N802 - """Return all ExtraInfo values formatted for Deadline. - - Returns: - dict: as `{'ExtraInfo0': 'value'}` - - """ - out = {} - for index, v in enumerate(self._extraInfos): - out["ExtraInfo{}".format(index)] = v - return out - - @ExtraInfo.setter - def ExtraInfo(self, val): # noqa: N802 - self._extraInfos.append(val) - - @property - def ExtraInfoKeyValue(self): # noqa: N802 - """Return all ExtraInfoKeyValue values formatted for Deadline. - - Returns: - dict: as {'ExtraInfoKeyValue0': 'key=value'}` - - """ - out = {} - for index, v in enumerate(self._extraInfoKeyValues): - out["ExtraInfoKeyValue{}".format(index)] = v - return out - - @ExtraInfoKeyValue.setter - def ExtraInfoKeyValue(self, val): # noqa: N802 - self._extraInfoKeyValues.append(val) + ExtraInfo = attr.ib(factory=partial(DeadlineIndexedVar, "ExtraInfo")) + ExtraInfoKeyValue = attr.ib(factory=partial(DeadlineKeyValueVar, + "ExtraInfoKeyValue")) # Task Extra Info Names # ---------------------------------------------- OverrideTaskExtraInfoNames = attr.ib(default=None) # Default: false - _taskExtraInfos = attr.ib(factory=list) - - @property - def TaskExtraInfoName(self): # noqa: N802 - """Return all TaskExtraInfoName values formatted for Deadline. - - Returns: - dict: as `{'TaskExtraInfoName0': 'value'}` - - """ - out = {} - for index, v in enumerate(self._taskExtraInfos): - out["TaskExtraInfoName{}".format(index)] = v - return out - - @TaskExtraInfoName.setter - def TaskExtraInfoName(self, val): # noqa: N802 - self._taskExtraInfos.append(val) + TaskExtraInfoName = attr.ib(factory=partial(DeadlineIndexedVar, + "TaskExtraInfoName")) # Output # ---------------------------------------------- - _outputFilename = attr.ib(factory=list) - _outputFilenameTile = attr.ib(factory=list) - _outputDirectory = attr.ib(factory=list) + OutputFilename = attr.ib(factory=partial(DeadlineIndexedVar, + "OutputFilename")) + OutputFilenameTile = attr.ib(factory=partial(DeadlineIndexedVar, + "OutputFilename{}Tile")) + OutputDirectory = attr.ib(factory=partial(DeadlineIndexedVar, + "OutputDirectory")) - @property - def OutputFilename(self): # noqa: N802 - """Return all OutputFilename values formatted for Deadline. - - Returns: - dict: as `{'OutputFilename0': 'filename'}` - - """ - out = {} - for index, v in enumerate(self._outputFilename): - out["OutputFilename{}".format(index)] = v - return out - - @OutputFilename.setter - def OutputFilename(self, val): # noqa: N802 - self._outputFilename.append(val) - - @property - def OutputFilenameTile(self): # noqa: N802 - """Return all OutputFilename#Tile values formatted for Deadline. - - Returns: - dict: as `{'OutputFilenme#Tile': 'tile'}` - - """ - out = {} - for index, v in enumerate(self._outputFilenameTile): - out["OutputFilename{}Tile".format(index)] = v - return out - - @OutputFilenameTile.setter - def OutputFilenameTile(self, val): # noqa: N802 - self._outputFilenameTile.append(val) - - @property - def OutputDirectory(self): # noqa: N802 - """Return all OutputDirectory values formatted for Deadline. - - Returns: - dict: as `{'OutputDirectory0': 'dir'}` - - """ - out = {} - for index, v in enumerate(self._outputDirectory): - out["OutputDirectory{}".format(index)] = v - return out - - @OutputDirectory.setter - def OutputDirectory(self, val): # noqa: N802 - self._outputDirectory.append(val) + # Asset Dependency + # ---------------------------------------------- + AssetDependency = attr.ib(factory=partial(DeadlineIndexedVar, + "AssetDependency")) # Tile Job # ---------------------------------------------- @@ -375,7 +364,7 @@ class DeadlineJobInfo(object): """ def filter_data(a, v): - if a.name.startswith("_"): + if isinstance(v, (DeadlineIndexedVar, DeadlineKeyValueVar)): return False if v is None: return False @@ -383,15 +372,27 @@ class DeadlineJobInfo(object): serialized = attr.asdict( self, dict_factory=OrderedDict, filter=filter_data) - serialized.update(self.EnvironmentKeyValue) - serialized.update(self.ExtraInfo) - serialized.update(self.ExtraInfoKeyValue) - serialized.update(self.TaskExtraInfoName) - serialized.update(self.OutputFilename) - serialized.update(self.OutputFilenameTile) - serialized.update(self.OutputDirectory) + + # Custom serialize these attributes + for attribute in [ + self.EnvironmentKeyValue, + self.ExtraInfo, + self.ExtraInfoKeyValue, + self.TaskExtraInfoName, + self.OutputFilename, + self.OutputFilenameTile, + self.OutputDirectory, + self.AssetDependency + ]: + serialized.update(attribute.serialize()) + return serialized + def update(self, data): + """Update instance with data dict""" + for key, value in data.items(): + setattr(self, key, value) + @six.add_metaclass(AbstractMetaInstancePlugin) class AbstractSubmitDeadline(pyblish.api.InstancePlugin): @@ -399,6 +400,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): label = "Submit to Deadline" order = pyblish.api.IntegratorOrder + 0.1 + import_reference = False use_published = True asset_dependencies = False @@ -423,7 +425,11 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): file_path = None if self.use_published: - file_path = self.from_published_scene() + if not self.import_reference: + file_path = self.from_published_scene() + else: + self.log.info("use the scene with imported reference for rendering") # noqa + file_path = context.data["currentFile"] # fallback if nothing was set if not file_path: @@ -515,68 +521,71 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): published. """ - anatomy = self._instance.context.data['anatomy'] - file_path = None - for i in self._instance.context: - if "workfile" in i.data["families"] \ - or i.data["family"] == "workfile": - # test if there is instance of workfile waiting - # to be published. - assert i.data["publish"] is True, ( - "Workfile (scene) must be published along") - # determine published path from Anatomy. - template_data = i.data.get("anatomyData") - rep = i.data.get("representations")[0].get("ext") - template_data["representation"] = rep - template_data["ext"] = rep - template_data["comment"] = None - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["path"] - file_path = os.path.normpath(template_filled) + instance = self._instance + workfile_instance = self._get_workfile_instance(instance.context) + if workfile_instance is None: + return - self.log.info("Using published scene for render {}".format( - file_path)) + # determine published path from Anatomy. + template_data = workfile_instance.data.get("anatomyData") + rep = workfile_instance.data["representations"][0] + template_data["representation"] = rep.get("name") + template_data["ext"] = rep.get("ext") + template_data["comment"] = None - if not os.path.exists(file_path): - self.log.error("published scene does not exist!") - raise + anatomy = instance.context.data['anatomy'] + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + file_path = os.path.normpath(template_filled) - if not replace_in_path: - return file_path + self.log.info("Using published scene for render {}".format(file_path)) - # now we need to switch scene in expected files - # because token will now point to published - # scene file and that might differ from current one - new_scene = os.path.splitext( - os.path.basename(file_path))[0] - orig_scene = os.path.splitext( - os.path.basename( - self._instance.context.data["currentFile"]))[0] - exp = self._instance.data.get("expectedFiles") + if not os.path.exists(file_path): + self.log.error("published scene does not exist!") + raise - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - new_exp = {} - for aov, files in exp[0].items(): - replaced_files = [] - for f in files: - replaced_files.append( - str(f).replace(orig_scene, new_scene) - ) - new_exp[aov] = replaced_files - # [] might be too much here, TODO - self._instance.data["expectedFiles"] = [new_exp] - else: - new_exp = [] - for f in exp: - new_exp.append( - str(f).replace(orig_scene, new_scene) - ) - self._instance.data["expectedFiles"] = new_exp + if not replace_in_path: + return file_path - self.log.info("Scene name was switched {} -> {}".format( - orig_scene, new_scene - )) + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + def _clean_name(path): + return os.path.splitext(os.path.basename(path))[0] + + new_scene = _clean_name(file_path) + orig_scene = _clean_name(instance.context.data["currentFile"]) + expected_files = instance.data.get("expectedFiles") + + if isinstance(expected_files[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in expected_files[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + str(f).replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + # [] might be too much here, TODO + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in expected_files: + new_exp.append( + str(f).replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = new_exp + + metadata_folder = instance.data.get("publishRenderMetadataFolder") + if metadata_folder: + metadata_folder = metadata_folder.replace(orig_scene, + new_scene) + instance.data["publishRenderMetadataFolder"] = metadata_folder + + self.log.info("Scene name was switched {} -> {}".format( + orig_scene, new_scene + )) return file_path @@ -615,7 +624,7 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): str: resulting Deadline job id. Throws: - RuntimeError: if submission fails. + KnownPublishError: if submission fails. """ url = "{}/api/jobs".format(self._deadline_url) @@ -625,10 +634,36 @@ class AbstractSubmitDeadline(pyblish.api.InstancePlugin): self.log.error(response.status_code) self.log.error(response.content) self.log.debug(payload) - raise RuntimeError(response.text) + raise KnownPublishError(response.text) + + try: + result = response.json() + except JSONDecodeError: + msg = "Broken response {}. ".format(response) + msg += "Try restarting the Deadline Webservice." + self.log.warning(msg, exc_info=True) + raise KnownPublishError("Broken response from DL") - result = response.json() # for submit publish job self._instance.data["deadlineSubmissionJob"] = result return result["_id"] + + @staticmethod + def _get_workfile_instance(context): + """Find workfile instance in context""" + for i in context: + + is_workfile = ( + "workfile" in i.data.get("families", []) or + i.data["family"] == "workfile" + ) + if not is_workfile: + continue + + # test if there is instance of workfile waiting + # to be published. + assert i.data["publish"] is True, ( + "Workfile (scene) must be published along") + + return i diff --git a/openpype/modules/deadline/deadline_module.py b/openpype/modules/deadline/deadline_module.py index 1a179e9aaf..9855f8c1b1 100644 --- a/openpype/modules/deadline/deadline_module.py +++ b/openpype/modules/deadline/deadline_module.py @@ -1,6 +1,16 @@ import os -from openpype.modules import OpenPypeModule -from openpype_interfaces import IPluginPaths +import requests +import six +import sys + +from openpype.lib import requests_get, Logger +from openpype.modules import OpenPypeModule, IPluginPaths + + +class DeadlineWebserviceError(Exception): + """ + Exception to throw when connection to Deadline server fails. + """ class DeadlineModule(OpenPypeModule, IPluginPaths): @@ -32,3 +42,35 @@ class DeadlineModule(OpenPypeModule, IPluginPaths): return { "publish": [os.path.join(current_dir, "plugins", "publish")] } + + @staticmethod + def get_deadline_pools(webservice, log=None): + # type: (str) -> list + """Get pools from Deadline. + Args: + webservice (str): Server url. + log (Logger) + Returns: + list: Pools. + Throws: + RuntimeError: If deadline webservice is unreachable. + + """ + if not log: + log = Logger.get_logger(__name__) + + argument = "{}/api/pools?NamesOnly=true".format(webservice) + try: + response = requests_get(argument) + except requests.exceptions.ConnectionError as exc: + msg = 'Cannot connect to DL web service {}'.format(webservice) + log.error(msg) + six.reraise( + DeadlineWebserviceError, + DeadlineWebserviceError('{} - {}'.format(msg, exc)), + sys.exc_info()[2]) + if not response.ok: + log.warning("No pools retrieved") + return [] + + return response.json() diff --git a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py index 1bc4eaa067..9981bead3e 100644 --- a/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py +++ b/openpype/modules/deadline/plugins/publish/collect_deadline_server_from_instance.py @@ -11,9 +11,9 @@ import pyblish.api class CollectDeadlineServerFromInstance(pyblish.api.InstancePlugin): """Collect Deadline Webservice URL from instance.""" - order = pyblish.api.CollectorOrder + 0.02 + order = pyblish.api.CollectorOrder + 0.415 label = "Deadline Webservice from the Instance" - families = ["rendering"] + families = ["rendering", "renderlayer"] def process(self, instance): instance.data["deadlineUrl"] = self._collect_deadline_url(instance) diff --git a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py index fc056342a8..e6ad6a9aa1 100644 --- a/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py +++ b/openpype/modules/deadline/plugins/publish/collect_default_deadline_server.py @@ -6,7 +6,7 @@ import pyblish.api class CollectDefaultDeadlineServer(pyblish.api.ContextPlugin): """Collect default Deadline Webservice URL.""" - order = pyblish.api.CollectorOrder + 0.01 + order = pyblish.api.CollectorOrder + 0.410 label = "Default Deadline Webservice" pass_mongo_url = False diff --git a/openpype/modules/deadline/plugins/publish/collect_pools.py b/openpype/modules/deadline/plugins/publish/collect_pools.py new file mode 100644 index 0000000000..48130848d5 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/collect_pools.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +"""Collect Deadline pools. Choose default one from Settings + +""" +import pyblish.api + + +class CollectDeadlinePools(pyblish.api.InstancePlugin): + """Collect pools from instance if present, from Setting otherwise.""" + + order = pyblish.api.CollectorOrder + 0.420 + label = "Collect Deadline Pools" + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + + primary_pool = None + secondary_pool = None + + def process(self, instance): + if not instance.data.get("primaryPool"): + instance.data["primaryPool"] = self.primary_pool or "none" + + if not instance.data.get("secondaryPool"): + instance.data["secondaryPool"] = self.secondary_pool or "none" diff --git a/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py b/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py new file mode 100644 index 0000000000..b00381b6cf --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/collect_publishable_instances.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +"""Collect instances that should be processed and published on DL. + +""" +import os + +import pyblish.api +from openpype.pipeline import PublishValidationError + + +class CollectDeadlinePublishableInstances(pyblish.api.InstancePlugin): + """Collect instances that should be processed and published on DL. + + Some long running publishes (not just renders) could be offloaded to DL, + this plugin compares theirs name against env variable, marks only + publishable by farm. + + Triggered only when running only in headless mode, eg on a farm. + """ + + order = pyblish.api.CollectorOrder + 0.499 + label = "Collect Deadline Publishable Instance" + targets = ["remote"] + + def process(self, instance): + self.log.debug("CollectDeadlinePublishableInstances") + publish_inst = os.environ.get("OPENPYPE_PUBLISH_SUBSET", '') + if not publish_inst: + raise PublishValidationError("OPENPYPE_PUBLISH_SUBSET env var " + "required for remote publishing") + + subset_name = instance.data["subset"] + if subset_name == publish_inst: + self.log.debug("Publish {}".format(subset_name)) + instance.data["publish"] = True + instance.data["farm"] = False + else: + self.log.debug("Skipping {}".format(subset_name)) + instance.data["publish"] = False diff --git a/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml new file mode 100644 index 0000000000..0e7d72910e --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/help/validate_deadline_pools.xml @@ -0,0 +1,31 @@ + + + + Scene setting + + ## Invalid Deadline pools found + + Configured pools don't match what is set in Deadline. + + {invalid_value_str} + + ### How to repair? + + If your instance had deadline pools set on creation, remove or + change them. + + In other cases inform admin to change them in Settings. + + Available deadline pools {pools_str}. + + + ### __Detailed Info__ + + This error is shown when deadline pool is not on Deadline anymore. It + could happen in case of republish old workfile which was created with + previous deadline pools, + or someone changed pools on Deadline side, but didn't modify Openpype + Settings. + + + \ No newline at end of file diff --git a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py index c499c14d40..f26047bb9d 100644 --- a/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_aftereffects_deadline.py @@ -2,13 +2,16 @@ import os import attr import getpass import pyblish.api +from datetime import datetime -from avalon import api - -from openpype.lib import env_value_to_bool -from openpype.lib.delivery import collect_frames +from openpype.lib import ( + env_value_to_bool, + collect_frames, +) +from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.tests.lib import is_in_tests @attr.s @@ -34,11 +37,10 @@ class AfterEffectsSubmitDeadline( hosts = ["aftereffects"] families = ["render.farm"] # cannot be "render' as that is integrated use_published = True + targets = ["local"] priority = 50 chunk_size = 1000000 - primary_pool = None - secondary_pool = None group = None department = None multiprocess = True @@ -48,9 +50,11 @@ class AfterEffectsSubmitDeadline( context = self._instance.context + batch_name = os.path.basename(self._instance.data["source"]) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") dln_job_info.Name = self._instance.data["name"] - dln_job_info.BatchName = os.path.basename(self._instance. - data["source"]) + dln_job_info.BatchName = batch_name dln_job_info.Plugin = "AfterEffects" dln_job_info.UserName = context.data.get( "deadlineUser", getpass.getuser()) @@ -62,14 +66,14 @@ class AfterEffectsSubmitDeadline( dln_job_info.Frames = frame_range dln_job_info.Priority = self.priority - dln_job_info.Pool = self.primary_pool - dln_job_info.SecondaryPool = self.secondary_pool + dln_job_info.Pool = self._instance.data.get("primaryPool") + dln_job_info.SecondaryPool = self._instance.data.get("secondaryPool") dln_job_info.Group = self.group dln_job_info.Department = self.department dln_job_info.ChunkSize = self.chunk_size - dln_job_info.OutputFilename = \ + dln_job_info.OutputFilename += \ os.path.basename(self._instance.data["expectedFiles"][0]) - dln_job_info.OutputDirectory = \ + dln_job_info.OutputDirectory += \ os.path.dirname(self._instance.data["expectedFiles"][0]) dln_job_info.JobDelay = "00:00:00" @@ -82,22 +86,23 @@ class AfterEffectsSubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION", + "IS_TEST" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for key in keys: - val = environment.get(key) - if val: - dln_job_info.EnvironmentKeyValue = "{key}={value}".format( - key=key, - value=val) + value = environment.get(key) + if value: + dln_job_info.EnvironmentKeyValue[key] = value + # to recognize job from PYPE for turning Event On/Off - dln_job_info.EnvironmentKeyValue = "OPENPYPE_RENDER_JOB=1" + dln_job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" return dln_job_info diff --git a/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py similarity index 73% rename from openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py rename to openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py index ea109e9445..038ee4fc03 100644 --- a/openpype/hosts/celaction/plugins/publish/submit_celaction_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_celaction_deadline.py @@ -2,16 +2,14 @@ import os import re import json import getpass - import requests import pyblish.api -class ExtractCelactionDeadline(pyblish.api.InstancePlugin): +class CelactionSubmitDeadline(pyblish.api.InstancePlugin): """Submit CelAction2D scene to Deadline - Renders are submitted to a Deadline Web Service as - supplied via settings key "DEADLINE_REST_URL". + Renders are submitted to a Deadline Web Service. """ @@ -26,27 +24,21 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): deadline_pool_secondary = "" deadline_group = "" deadline_chunk_size = 1 - - enviro_filter = [ - "FTRACK_API_USER", - "FTRACK_API_KEY", - "FTRACK_SERVER" - ] + deadline_job_delay = "00:00:08:00" def process(self, instance): instance.data["toBeRenderedOn"] = "deadline" context = instance.context - deadline_url = ( - context.data["system_settings"] - ["modules"] - ["deadline"] - ["DEADLINE_REST_URL"] - ) - assert deadline_url, "Requires DEADLINE_REST_URL" + # get default deadline webservice url from deadline module + deadline_url = instance.context.data["defaultDeadline"] + # if custom one is set in instance, use that + if instance.data.get("deadlineUrl"): + deadline_url = instance.data.get("deadlineUrl") + assert deadline_url, "Requires Deadline Webservice URL" self.deadline_url = "{}/api/jobs".format(deadline_url) - self._comment = context.data.get("comment", "") + self._comment = instance.data["comment"] self._deadline_user = context.data.get( "deadlineUser", getpass.getuser()) self._frame_start = int(instance.data["frameStart"]) @@ -82,6 +74,26 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): render_dir = os.path.normpath(os.path.dirname(render_path)) render_path = os.path.normpath(render_path) script_name = os.path.basename(script_path) + + for item in instance.context: + if "workfile" in item.data["family"]: + msg = "Workfile (scene) must be published along" + assert item.data["publish"] is True, msg + + template_data = item.data.get("anatomyData") + rep = item.data.get("representations")[0].get("name") + template_data["representation"] = rep + template_data["ext"] = rep + template_data["comment"] = None + anatomy_filled = instance.context.data["anatomy"].format( + template_data) + template_filled = anatomy_filled["publish"]["path"] + script_path = os.path.normpath(template_filled) + + self.log.info( + "Using published scene for render {}".format(script_path) + ) + jobname = "%s - %s" % (script_name, instance.name) output_filename_0 = self.preview_fname(render_path) @@ -98,7 +110,7 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): chunk_size = self.deadline_chunk_size # search for %02d pattern in name, and padding number - search_results = re.search(r"(.%0)(\d)(d)[._]", render_path).groups() + search_results = re.search(r"(%0)(\d)(d)[._]", render_path).groups() split_patern = "".join(search_results) padding_number = int(search_results[1]) @@ -145,10 +157,11 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): # frames from Deadline Monitor "OutputFilename0": output_filename_0.replace("\\", "/"), - # # Asset dependency to wait for at least the scene file to sync. + # # Asset dependency to wait for at least + # the scene file to sync. # "AssetDependency0": script_path "ScheduledType": "Once", - "JobDelay": "00:00:08:00" + "JobDelay": self.deadline_job_delay }, "PluginInfo": { # Input @@ -173,19 +186,6 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): plugin = payload["JobInfo"]["Plugin"] self.log.info("using render plugin : {}".format(plugin)) - i = 0 - for key, values in dict(os.environ).items(): - if key.upper() in self.enviro_filter: - payload["JobInfo"].update( - { - "EnvironmentKeyValue%d" - % i: "{key}={value}".format( - key=key, value=values - ) - } - ) - i += 1 - self.log.info("Submitting..") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) @@ -193,10 +193,15 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): self.expected_files(instance, render_path) self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) + response = requests.post(self.deadline_url, json=payload) if not response.ok: - raise Exception(response.text) + self.log.error( + "Submission failed! [{}] {}".format( + response.status_code, response.content)) + self.log.debug(payload) + raise SystemExit(response.text) return response @@ -234,32 +239,29 @@ class ExtractCelactionDeadline(pyblish.api.InstancePlugin): split_path = path.split(split_patern) hashes = "#" * int(search_results[1]) return "".join([split_path[0], hashes, split_path[-1]]) - if "#" in path: - self.log.debug("_ path: `{}`".format(path)) - return path - else: - return path - def expected_files(self, - instance, - path): + self.log.debug("_ path: `{}`".format(path)) + return path + + def expected_files(self, instance, filepath): """ Create expected files in instance data """ if not instance.data.get("expectedFiles"): - instance.data["expectedFiles"] = list() + instance.data["expectedFiles"] = [] - dir = os.path.dirname(path) - file = os.path.basename(path) + dirpath = os.path.dirname(filepath) + filename = os.path.basename(filepath) - if "#" in file: - pparts = file.split("#") + if "#" in filename: + pparts = filename.split("#") padding = "%0{}d".format(len(pparts) - 1) - file = pparts[0] + padding + pparts[-1] + filename = pparts[0] + padding + pparts[-1] - if "%" not in file: - instance.data["expectedFiles"].append(path) + if "%" not in filename: + instance.data["expectedFiles"].append(filepath) return for i in range(self._frame_start, (self._frame_end + 1)): instance.data["expectedFiles"].append( - os.path.join(dir, (file % i)).replace("\\", "/")) + os.path.join(dirpath, (filename % i)).replace("\\", "/") + ) diff --git a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py index 918efb6630..425883393f 100644 --- a/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_harmony_deadline.py @@ -5,13 +5,15 @@ from pathlib import Path from collections import OrderedDict from zipfile import ZipFile, is_zipfile import re +from datetime import datetime import attr import pyblish.api -from avalon import api +from openpype.pipeline import legacy_io from openpype_modules.deadline import abstract_submit_deadline from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.tests.lib import is_in_tests class _ZipFile(ZipFile): @@ -238,11 +240,10 @@ class HarmonySubmitDeadline( order = pyblish.api.IntegratorOrder + 0.1 hosts = ["harmony"] families = ["render.farm"] + targets = ["local"] optional = True use_published = False - primary_pool = "" - secondary_pool = "" priority = 50 chunk_size = 1000000 group = "none" @@ -259,10 +260,13 @@ class HarmonySubmitDeadline( # for now, get those from presets. Later on it should be # configurable in Harmony UI directly. job_info.Priority = self.priority - job_info.Pool = self.primary_pool - job_info.SecondaryPool = self.secondary_pool + job_info.Pool = self._instance.data.get("primaryPool") + job_info.SecondaryPool = self._instance.data.get("secondaryPool") job_info.ChunkSize = self.chunk_size - job_info.BatchName = os.path.basename(self._instance.data["source"]) + batch_name = os.path.basename(self._instance.data["source"]) + if is_in_tests: + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + job_info.BatchName = batch_name job_info.Department = self.department job_info.Group = self.group @@ -275,23 +279,23 @@ class HarmonySubmitDeadline( "AVALON_TASK", "AVALON_APP_NAME", "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" + "OPENPYPE_LOG_NO_COLORS", + "OPENPYPE_VERSION", + "IS_TEST" ] # Add mongo url if it's enabled if self._instance.context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for key in keys: - val = environment.get(key) - if val: - job_info.EnvironmentKeyValue = "{key}={value}".format( - key=key, - value=val) + value = environment.get(key) + if value: + job_info.EnvironmentKeyValue[key] = value # to recognize job from PYPE for turning Event On/Off - job_info.EnvironmentKeyValue = "OPENPYPE_RENDER_JOB=1" + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" return job_info @@ -323,7 +327,9 @@ class HarmonySubmitDeadline( ) unzip_dir = (published_scene.parent / published_scene.stem) with _ZipFile(published_scene, "r") as zip_ref: - zip_ref.extractall(unzip_dir.as_posix()) + # UNC path (//?/) added to minimalize risk with extracting + # to large file paths + zip_ref.extractall("//?/" + str(unzip_dir.as_posix())) # find any xstage files in directory, prefer the one with the same name # as directory (plus extension) diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py index c683eb68a8..6a62f83cae 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_remote_publish.py @@ -1,13 +1,15 @@ import os import json +from datetime import datetime import requests import hou -from avalon import api, io - import pyblish.api +from openpype.pipeline import legacy_io +from openpype.tests.lib import is_in_tests + class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): """Submit Houdini scene to perform a local publish in Deadline. @@ -35,7 +37,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): ), "Errors found, aborting integration.." # Deadline connection - AVALON_DEADLINE = api.Session.get( + AVALON_DEADLINE = legacy_io.Session.get( "AVALON_DEADLINE", "http://localhost:8082" ) assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" @@ -55,11 +57,13 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): scenename = os.path.basename(scene) # Get project code - project = io.find_one({"type": "project"}) + project = context.data["projectEntity"] code = project["data"].get("code", project["name"]) job_name = "{scene} [PUBLISH]".format(scene=scenename) batch_name = "{code} - {scene}".format(code=code, scene=scenename) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") deadline_user = "roy" # todo: get deadline user dynamically # Get only major.minor version of Houdini, ignore patch version @@ -130,6 +134,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "houdini17.5;pluginx2.3" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): @@ -137,7 +142,7 @@ class HoudiniSubmitPublishDeadline(pyblish.api.ContextPlugin): environment = dict( {key: os.environ[key] for key in keys if key in os.environ}, - **api.Session + **legacy_io.Session ) environment["PYBLISH_ACTIVE_INSTANCES"] = ",".join(instances) diff --git a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py index 59aeb68b79..2b17b644b8 100644 --- a/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_houdini_render_deadline.py @@ -1,13 +1,15 @@ import os import json import getpass +from datetime import datetime import requests -from avalon import api - import pyblish.api -import hou +# import hou ??? + +from openpype.pipeline import legacy_io +from openpype.tests.lib import is_in_tests class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): @@ -45,6 +47,9 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): if code: batch_name = "{0} - {1}".format(code, batch_name) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + # Output driver to render driver = instance[0] @@ -71,7 +76,8 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): "UserName": deadline_user, "Plugin": "Houdini", - "Pool": "houdini_redshift", # todo: remove hardcoded pool + "Pool": instance.data.get("primaryPool"), + "secondaryPool": instance.data.get("secondaryPool"), "Frames": frames, "ChunkSize": instance.data.get("chunkSize", 10), @@ -100,13 +106,14 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): # this application with so the Render Slave can build its own # similar environment using it, e.g. "maya2018;vray4.x;yeti3.1.9" "AVALON_TOOLS", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if context.data.get("deadlinePassMongoUrl"): keys.append("OPENPYPE_MONGO") environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -140,7 +147,7 @@ class HoudiniSubmitRenderDeadline(pyblish.api.InstancePlugin): def submit(self, instance, payload): - AVALON_DEADLINE = api.Session.get("AVALON_DEADLINE", + AVALON_DEADLINE = legacy_io.Session.get("AVALON_DEADLINE", "http://localhost:8082") assert AVALON_DEADLINE, "Requires AVALON_DEADLINE" diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py index 15a6f8d828..ed37ff1897 100644 --- a/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_maya_deadline.py @@ -18,7 +18,6 @@ Attributes: from __future__ import print_function import os -import json import getpass import copy import re @@ -27,43 +26,731 @@ from datetime import datetime import itertools from collections import OrderedDict -import clique -import requests +import attr from maya import cmds -from avalon import api -import pyblish.api +from openpype.pipeline import legacy_io -from openpype.hosts.maya.api import lib +from openpype.hosts.maya.api.lib_rendersettings import RenderSettings +from openpype.hosts.maya.api.lib import get_attr_in_layer -# Documentation for keys available at: -# https://docs.thinkboxsoftware.com -# /products/deadline/8.0/1_User%20Manual/manual -# /manual-submission.html#job-info-file-options +from openpype_modules.deadline import abstract_submit_deadline +from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo +from openpype.tests.lib import is_in_tests -payload_skeleton_template = { - "JobInfo": { - "BatchName": None, # Top-level group name - "Name": None, # Job name, as seen in Monitor - "UserName": None, - "Plugin": "MayaBatch", - "Frames": "{start}-{end}x{step}", - "Comment": None, - "Priority": 50, - }, - "PluginInfo": { - "SceneFile": None, # Input - "OutputFilePath": None, # Output directory and filename - "OutputFilePrefix": None, - "Version": cmds.about(version=True), # Mandatory for Deadline - "UsingRenderLayers": True, - "RenderLayer": None, # Render only this layer - "Renderer": None, - "ProjectPath": None, # Resolve relative references - }, - "AuxFiles": [] # Mandatory for Deadline, may be empty -} + +def _validate_deadline_bool_value(instance, attribute, value): + if not isinstance(value, (str, bool)): + raise TypeError( + "Attribute {} must be str or bool.".format(attribute)) + if value not in {"1", "0", True, False}: + raise ValueError( + ("Value of {} must be one of " + "'0', '1', True, False").format(attribute) + ) + + +@attr.s +class MayaPluginInfo(object): + SceneFile = attr.ib(default=None) # Input + OutputFilePath = attr.ib(default=None) # Output directory and filename + OutputFilePrefix = attr.ib(default=None) + Version = attr.ib(default=None) # Mandatory for Deadline + UsingRenderLayers = attr.ib(default=True) + RenderLayer = attr.ib(default=None) # Render only this layer + Renderer = attr.ib(default=None) + ProjectPath = attr.ib(default=None) # Resolve relative references + # Include all lights flag + RenderSetupIncludeLights = attr.ib( + default="1", validator=_validate_deadline_bool_value) + StrictErrorChecking = attr.ib(default=True) + + +@attr.s +class PythonPluginInfo(object): + ScriptFile = attr.ib() + Version = attr.ib(default="3.6") + Arguments = attr.ib(default=None) + SingleFrameOnly = attr.ib(default=None) + + +@attr.s +class VRayPluginInfo(object): + InputFilename = attr.ib(default=None) # Input + SeparateFilesPerFrame = attr.ib(default=None) + VRayEngine = attr.ib(default="V-Ray") + Width = attr.ib(default=None) + Height = attr.ib(default=None) # Mandatory for Deadline + OutputFilePath = attr.ib(default=True) + OutputFileName = attr.ib(default=None) # Render only this layer + + +@attr.s +class ArnoldPluginInfo(object): + ArnoldFile = attr.ib(default=None) + + +class MayaSubmitDeadline(abstract_submit_deadline.AbstractSubmitDeadline): + + label = "Submit Render to Deadline" + hosts = ["maya"] + families = ["renderlayer"] + targets = ["local"] + + tile_assembler_plugin = "OpenPypeTileAssembler" + priority = 50 + tile_priority = 50 + limit = [] # limit groups + jobInfo = {} + pluginInfo = {} + group = "none" + + def get_job_info(self): + job_info = DeadlineJobInfo(Plugin="MayaBatch") + + # todo: test whether this works for existing production cases + # where custom jobInfo was stored in the project settings + job_info.update(self.jobInfo) + + instance = self._instance + context = instance.context + + # Always use the original work file name for the Job name even when + # rendering is done from the published Work File. The original work + # file name is clearer because it can also have subversion strings, + # etc. which are stripped for the published file. + src_filepath = context.data["currentFile"] + src_filename = os.path.basename(src_filepath) + + if is_in_tests(): + src_filename += datetime.now().strftime("%d%m%Y%H%M%S") + + job_info.Name = "%s - %s" % (src_filename, instance.name) + job_info.BatchName = src_filename + job_info.Plugin = instance.data.get("mayaRenderPlugin", "MayaBatch") + job_info.UserName = context.data.get("deadlineUser", getpass.getuser()) + + # Deadline requires integers in frame range + frames = "{start}-{end}x{step}".format( + start=int(instance.data["frameStartHandle"]), + end=int(instance.data["frameEndHandle"]), + step=int(instance.data["byFrameStep"]), + ) + job_info.Frames = frames + + job_info.Pool = instance.data.get("primaryPool") + job_info.SecondaryPool = instance.data.get("secondaryPool") + job_info.ChunkSize = instance.data.get("chunkSize", 10) + job_info.Comment = context.data.get("comment") + job_info.Priority = instance.data.get("priority", self.priority) + job_info.FramesPerTask = instance.data.get("framesPerTask", 1) + + if self.group != "none" and self.group: + job_info.Group = self.group + + if self.limit: + job_info.LimitGroups = ",".join(self.limit) + + # Add options from RenderGlobals + render_globals = instance.data.get("renderGlobals", {}) + job_info.update(render_globals) + + keys = [ + "FTRACK_API_KEY", + "FTRACK_API_USER", + "FTRACK_SERVER", + "OPENPYPE_SG_USER", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "AVALON_APP_NAME", + "OPENPYPE_DEV", + "OPENPYPE_VERSION", + "IS_TEST" + ] + # Add mongo url if it's enabled + if self._instance.context.data.get("deadlinePassMongoUrl"): + keys.append("OPENPYPE_MONGO") + + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **legacy_io.Session) + + for key in keys: + value = environment.get(key) + if not value: + continue + job_info.EnvironmentKeyValue[key] = value + + # to recognize job from PYPE for turning Event On/Off + job_info.EnvironmentKeyValue["OPENPYPE_RENDER_JOB"] = "1" + job_info.EnvironmentKeyValue["OPENPYPE_LOG_NO_COLORS"] = "1" + + # Adding file dependencies. + if self.asset_dependencies: + dependencies = instance.context.data["fileDependencies"] + for dependency in dependencies: + job_info.AssetDependency += dependency + + # Add list of expected files to job + # --------------------------------- + exp = instance.data.get("expectedFiles") + for filepath in self._iter_expected_files(exp): + job_info.OutputDirectory += os.path.dirname(filepath) + job_info.OutputFilename += os.path.basename(filepath) + + return job_info + + def get_plugin_info(self): + + instance = self._instance + context = instance.context + + # Set it to default Maya behaviour if it cannot be determined + # from instance (but it should be, by the Collector). + + default_rs_include_lights = ( + instance.context.data['project_settings'] + ['maya'] + ['RenderSettings'] + ['enable_all_lights'] + ) + + rs_include_lights = instance.data.get( + "renderSetupIncludeLights", default_rs_include_lights) + if rs_include_lights not in {"1", "0", True, False}: + rs_include_lights = default_rs_include_lights + strict_error_checking = instance.data.get("strict_error_checking", + True) + plugin_info = MayaPluginInfo( + SceneFile=self.scene_path, + Version=cmds.about(version=True), + RenderLayer=instance.data['setMembers'], + Renderer=instance.data["renderer"], + RenderSetupIncludeLights=rs_include_lights, # noqa + ProjectPath=context.data["workspaceDir"], + UsingRenderLayers=True, + StrictErrorChecking=strict_error_checking + ) + + plugin_payload = attr.asdict(plugin_info) + + # Patching with pluginInfo from settings + for key, value in self.pluginInfo.items(): + plugin_payload[key] = value + + return plugin_payload + + def process_submission(self): + + instance = self._instance + context = instance.context + + filepath = self.scene_path # publish if `use_publish` else workfile + + # TODO: Avoid the need for this logic here, needed for submit publish + # Store output dir for unified publisher (filesequence) + expected_files = instance.data["expectedFiles"] + first_file = next(self._iter_expected_files(expected_files)) + output_dir = os.path.dirname(first_file) + instance.data["outputDir"] = output_dir + instance.data["toBeRenderedOn"] = "deadline" + + # Patch workfile (only when use_published is enabled) + if self.use_published: + self._patch_workfile() + + # Gather needed data ------------------------------------------------ + workspace = context.data["workspaceDir"] + default_render_file = instance.context.data.get('project_settings')\ + .get('maya')\ + .get('RenderSettings')\ + .get('default_render_image_folder') + filename = os.path.basename(filepath) + dirname = os.path.join(workspace, default_render_file) + + # Fill in common data to payload ------------------------------------ + # TODO: Replace these with collected data from CollectRender + payload_data = { + "filename": filename, + "dirname": dirname, + } + + # Submit preceding export jobs ------------------------------------- + export_job = None + assert not all(x in instance.data["families"] + for x in ['vrayscene', 'assscene']), ( + "Vray Scene and Ass Scene options are mutually exclusive") + + if "vrayscene" in instance.data["families"]: + self.log.debug("Submitting V-Ray scene render..") + vray_export_payload = self._get_vray_export_payload(payload_data) + export_job = self.submit(vray_export_payload) + + payload = self._get_vray_render_payload(payload_data) + + elif "assscene" in instance.data["families"]: + self.log.debug("Submitting Arnold .ass standalone render..") + ass_export_payload = self._get_arnold_export_payload(payload_data) + export_job = self.submit(ass_export_payload) + + payload = self._get_arnold_render_payload(payload_data) + else: + self.log.debug("Submitting MayaBatch render..") + payload = self._get_maya_payload(payload_data) + + # Add export job as dependency -------------------------------------- + if export_job: + job_info, _ = payload + job_info.JobDependencies = export_job + + if instance.data.get("tileRendering"): + # Prepare tiles data + self._tile_render(payload) + else: + # Submit main render job + job_info, plugin_info = payload + self.submit(self.assemble_payload(job_info, plugin_info)) + + def _tile_render(self, payload): + """Submit as tile render per frame with dependent assembly jobs.""" + + # As collected by super process() + instance = self._instance + + payload_job_info, payload_plugin_info = payload + job_info = copy.deepcopy(payload_job_info) + plugin_info = copy.deepcopy(payload_plugin_info) + + # if we have sequence of files, we need to create tile job for + # every frame + job_info.TileJob = True + job_info.TileJobTilesInX = instance.data.get("tilesX") + job_info.TileJobTilesInY = instance.data.get("tilesY") + + tiles_count = job_info.TileJobTilesInX * job_info.TileJobTilesInY + + plugin_info["ImageHeight"] = instance.data.get("resolutionHeight") + plugin_info["ImageWidth"] = instance.data.get("resolutionWidth") + plugin_info["RegionRendering"] = True + + R_FRAME_NUMBER = re.compile( + r".+\.(?P[0-9]+)\..+") # noqa: N806, E501 + REPL_FRAME_NUMBER = re.compile( + r"(.+\.)([0-9]+)(\..+)") # noqa: N806, E501 + + exp = instance.data["expectedFiles"] + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + # get files from `beauty` + files = exp[0].get("beauty") + # assembly files are used for assembly jobs as we need to put + # together all AOVs + assembly_files = list( + itertools.chain.from_iterable( + [f for _, f in exp[0].items()])) + if not files: + # if beauty doesn't exist, use first aov we found + files = exp[0].get(list(exp[0].keys())[0]) + else: + files = exp + assembly_files = files + + # Define frame tile jobs + frame_file_hash = {} + frame_payloads = {} + file_index = 1 + for file in files: + frame = re.search(R_FRAME_NUMBER, file).group("frame") + + new_job_info = copy.deepcopy(job_info) + new_job_info.Name += " (Frame {} - {} tiles)".format(frame, + tiles_count) + new_job_info.TileJobFrame = frame + + new_plugin_info = copy.deepcopy(plugin_info) + + # Add tile data into job info and plugin info + tiles_data = _format_tiles( + file, 0, + instance.data.get("tilesX"), + instance.data.get("tilesY"), + instance.data.get("resolutionWidth"), + instance.data.get("resolutionHeight"), + payload_plugin_info["OutputFilePrefix"] + )[0] + + new_job_info.update(tiles_data["JobInfo"]) + new_plugin_info.update(tiles_data["PluginInfo"]) + + self.log.info("hashing {} - {}".format(file_index, file)) + job_hash = hashlib.sha256( + ("{}_{}".format(file_index, file)).encode("utf-8")) + + file_hash = job_hash.hexdigest() + frame_file_hash[frame] = file_hash + + new_job_info.ExtraInfo[0] = file_hash + new_job_info.ExtraInfo[1] = file + + frame_payloads[frame] = self.assemble_payload( + job_info=new_job_info, + plugin_info=new_plugin_info + ) + file_index += 1 + + self.log.info( + "Submitting tile job(s) [{}] ...".format(len(frame_payloads))) + + # Submit frame tile jobs + frame_tile_job_id = {} + for frame, tile_job_payload in frame_payloads.items(): + job_id = self.submit(tile_job_payload) + frame_tile_job_id[frame] = job_id + + # Define assembly payloads + assembly_job_info = copy.deepcopy(job_info) + assembly_job_info.Plugin = self.tile_assembler_plugin + assembly_job_info.Name += " - Tile Assembly Job" + assembly_job_info.Frames = 1 + assembly_job_info.MachineLimit = 1 + assembly_job_info.Priority = instance.data.get("tile_priority", + self.tile_priority) + + assembly_plugin_info = { + "CleanupTiles": 1, + "ErrorOnMissing": True, + "Renderer": self._instance.data["renderer"] + } + + assembly_payloads = [] + output_dir = self.job_info.OutputDirectory[0] + for file in assembly_files: + frame = re.search(R_FRAME_NUMBER, file).group("frame") + + frame_assembly_job_info = copy.deepcopy(assembly_job_info) + frame_assembly_job_info.Name += " (Frame {})".format(frame) + frame_assembly_job_info.OutputFilename[0] = re.sub( + REPL_FRAME_NUMBER, + "\\1{}\\3".format("#" * len(frame)), file) + + file_hash = frame_file_hash[frame] + tile_job_id = frame_tile_job_id[frame] + + frame_assembly_job_info.ExtraInfo[0] = file_hash + frame_assembly_job_info.ExtraInfo[1] = file + frame_assembly_job_info.JobDependencies = tile_job_id + + # write assembly job config files + now = datetime.now() + + config_file = os.path.join( + output_dir, + "{}_config_{}.txt".format( + os.path.splitext(file)[0], + now.strftime("%Y_%m_%d_%H_%M_%S") + ) + ) + try: + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + except OSError: + # directory is not available + self.log.warning("Path is unreachable: " + "`{}`".format(output_dir)) + + with open(config_file, "w") as cf: + print("TileCount={}".format(tiles_count), file=cf) + print("ImageFileName={}".format(file), file=cf) + print("ImageWidth={}".format( + instance.data.get("resolutionWidth")), file=cf) + print("ImageHeight={}".format( + instance.data.get("resolutionHeight")), file=cf) + + tiles = _format_tiles( + file, 0, + instance.data.get("tilesX"), + instance.data.get("tilesY"), + instance.data.get("resolutionWidth"), + instance.data.get("resolutionHeight"), + payload_plugin_info["OutputFilePrefix"] + )[1] + for k, v in sorted(tiles.items()): + print("{}={}".format(k, v), file=cf) + + payload = self.assemble_payload( + job_info=frame_assembly_job_info, + plugin_info=assembly_plugin_info.copy(), + # todo: aux file transfers don't work with deadline webservice + # add config file as job auxFile + # aux_files=[config_file] + ) + assembly_payloads.append(payload) + + # Submit assembly jobs + assembly_job_ids = [] + num_assemblies = len(assembly_payloads) + for i, payload in enumerate(assembly_payloads): + self.log.info( + "submitting assembly job {} of {}".format(i + 1, + num_assemblies) + ) + assembly_job_id = self.submit(payload) + assembly_job_ids.append(assembly_job_id) + + instance.data["assemblySubmissionJobs"] = assembly_job_ids + + def _get_maya_payload(self, data): + + job_info = copy.deepcopy(self.job_info) + + if self.asset_dependencies: + # Asset dependency to wait for at least the scene file to sync. + job_info.AssetDependency += self.scene_path + + # Get layer prefix + renderlayer = self._instance.data["setMembers"] + renderer = self._instance.data["renderer"] + layer_prefix_attr = RenderSettings.get_image_prefix_attr(renderer) + layer_prefix = get_attr_in_layer(layer_prefix_attr, layer=renderlayer) + + plugin_info = copy.deepcopy(self.plugin_info) + plugin_info.update({ + # Output directory and filename + "OutputFilePath": data["dirname"].replace("\\", "/"), + "OutputFilePrefix": layer_prefix, + }) + + # This hack is here because of how Deadline handles Renderman version. + # it considers everything with `renderman` set as version older than + # Renderman 22, and so if we are using renderman > 21 we need to set + # renderer string on the job to `renderman22`. We will have to change + # this when Deadline releases new version handling this. + renderer = self._instance.data["renderer"] + if renderer == "renderman": + try: + from rfm2.config import cfg # noqa + except ImportError: + raise Exception("Cannot determine renderman version") + + rman_version = cfg().build_info.version() # type: str + if int(rman_version.split(".")[0]) > 22: + renderer = "renderman22" + + plugin_info["Renderer"] = renderer + + # this is needed because renderman plugin in Deadline + # handles directory and file prefixes separately + plugin_info["OutputFilePath"] = job_info.OutputDirectory[0] + + return job_info, plugin_info + + def _get_vray_export_payload(self, data): + + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Export") + + # Get V-Ray settings info to compute output path + vray_scene = self.format_vray_output_filename() + + plugin_info = { + "Renderer": "vray", + "SkipExistingFrames": True, + "UseLegacyRenderLayers": True, + "OutputFilePath": os.path.dirname(vray_scene) + } + + return job_info, attr.asdict(plugin_info) + + def _get_arnold_export_payload(self, data): + + try: + from openpype.scripts import export_maya_ass_job + except Exception: + raise AssertionError( + "Expected module 'export_maya_ass_job' to be available") + + module_path = export_maya_ass_job.__file__ + if module_path.endswith(".pyc"): + module_path = module_path[: -len(".pyc")] + ".py" + + script = os.path.normpath(module_path) + + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Export") + + # Force a single frame Python job + job_info.Plugin = "Python" + job_info.Frames = 1 + + renderlayer = self._instance.data["setMembers"] + + # add required env vars for the export script + envs = { + "AVALON_APP_NAME": os.environ.get("AVALON_APP_NAME"), + "OPENPYPE_ASS_EXPORT_RENDER_LAYER": renderlayer, + "OPENPYPE_ASS_EXPORT_SCENE_FILE": self.scene_path, + "OPENPYPE_ASS_EXPORT_OUTPUT": job_info.OutputFilename[0], + "OPENPYPE_ASS_EXPORT_START": int(self._instance.data["frameStartHandle"]), # noqa + "OPENPYPE_ASS_EXPORT_END": int(self._instance.data["frameEndHandle"]), # noqa + "OPENPYPE_ASS_EXPORT_STEP": 1 + } + for key, value in envs.items(): + if not value: + continue + job_info.EnvironmentKeyValue[key] = value + + plugin_info = PythonPluginInfo( + ScriptFile=script, + Version="3.6", + Arguments="", + SingleFrameOnly="True" + ) + + return job_info, attr.asdict(plugin_info) + + def _get_vray_render_payload(self, data): + + # Job Info + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Render") + job_info.Plugin = "Vray" + job_info.OverrideTaskExtraInfoNames = False + + # Plugin Info + plugin_info = VRayPluginInfo( + InputFilename=self.format_vray_output_filename(), + SeparateFilesPerFrame=False, + VRayEngine="V-Ray", + Width=self._instance.data["resolutionWidth"], + Height=self._instance.data["resolutionHeight"], + OutputFilePath=job_info.OutputDirectory[0], + OutputFileName=job_info.OutputFilename[0] + ) + + return job_info, attr.asdict(plugin_info) + + def _get_arnold_render_payload(self, data): + + # Job Info + job_info = copy.deepcopy(self.job_info) + job_info.Name = self._job_info_label("Render") + job_info.Plugin = "Arnold" + job_info.OverrideTaskExtraInfoNames = False + + # Plugin Info + ass_file, _ = os.path.splitext(data["output_filename_0"]) + ass_filepath = ass_file + ".ass" + + plugin_info = ArnoldPluginInfo( + ArnoldFile=ass_filepath + ) + + return job_info, attr.asdict(plugin_info) + + def format_vray_output_filename(self): + """Format the expected output file of the Export job. + + Example: + /_/ + "shot010_v006/shot010_v006_CHARS/CHARS_0001.vrscene" + Returns: + str + + """ + + # "vrayscene//_/" + vray_settings = cmds.ls(type="VRaySettingsNode") + node = vray_settings[0] + template = cmds.getAttr("{}.vrscene_filename".format(node)) + scene, _ = os.path.splitext(self.scene_path) + + def smart_replace(string, key_values): + new_string = string + for key, value in key_values.items(): + new_string = new_string.replace(key, value) + return new_string + + # Get workfile scene path without extension to format vrscene_filename + scene_filename = os.path.basename(self.scene_path) + scene_filename_no_ext, _ = os.path.splitext(scene_filename) + + layer = self._instance.data['setMembers'] + + # Reformat without tokens + output_path = smart_replace( + template, + {"": scene_filename_no_ext, + "": layer}) + + start_frame = int(self._instance.data["frameStartHandle"]) + workspace = self._instance.context.data["workspace"] + filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame) + filepath_zero = os.path.join(workspace, filename_zero) + + return filepath_zero.replace("\\", "/") + + def _patch_workfile(self): + """Patch Maya scene. + + This will take list of patches (lines to add) and apply them to + *published* Maya scene file (that is used later for rendering). + + Patches are dict with following structure:: + { + "name": "Name of patch", + "regex": "regex of line before patch", + "line": "line to insert" + } + + """ + project_settings = self._instance.context.data["project_settings"] + patches = ( + project_settings.get( + "deadline", {}).get( + "publish", {}).get( + "MayaSubmitDeadline", {}).get( + "scene_patches", {}) + ) + if not patches: + return + + if not os.path.splitext(self.scene_path)[1].lower() != ".ma": + self.log.debug("Skipping workfile patch since workfile is not " + ".ma file") + return + + compiled_regex = [re.compile(p["regex"]) for p in patches] + with open(self.scene_path, "r+") as pf: + scene_data = pf.readlines() + for ln, line in enumerate(scene_data): + for i, r in enumerate(compiled_regex): + if re.match(r, line): + scene_data.insert(ln + 1, patches[i]["line"]) + pf.seek(0) + pf.writelines(scene_data) + pf.truncate() + self.log.info("Applied {} patch to scene.".format( + patches[i]["name"] + )) + + def _job_info_label(self, label): + return "{label} {job.Name} [{start}-{end}]".format( + label=label, + job=self.job_info, + start=int(self._instance.data["frameStartHandle"]), + end=int(self._instance.data["frameEndHandle"]), + ) + + @staticmethod + def _iter_expected_files(exp): + if isinstance(exp[0], dict): + for _aov, files in exp[0].items(): + for file in files: + yield file + else: + for file in exp: + yield file def _format_tiles( @@ -87,12 +774,12 @@ def _format_tiles( Example:: Image prefix is: - `maya///_` + `//_` Result for tile 0 for 4x4 will be: - `maya///_tile_1x1_4x4__` + `//_tile_1x1_4x4__` - Calculating coordinates is tricky as in Job they are defined as top, + Calculating coordinates is tricky as in Job they are defined as top, left, bottom, right with zero being in top-left corner. But Assembler configuration file takes tile coordinates as X, Y, Width and Height and zero is bottom left corner. @@ -101,25 +788,32 @@ def _format_tiles( filename (str): Filename to process as tiles. index (int): Index of that file if it is sequence. tiles_x (int): Number of tiles in X. - tiles_y (int): Number if tikes in Y. + tiles_y (int): Number of tiles in Y. width (int): Width resolution of final image. height (int): Height resolution of final image. prefix (str): Image prefix. Returns: - (dict, dict): Tuple of two dictionaires - first can be used to + (dict, dict): Tuple of two dictionaries - first can be used to extend JobInfo, second has tiles x, y, width and height used for assembler configuration. """ - tile = 0 + # Math used requires integers for correct output - as such + # we ensure our inputs are correct. + assert type(tiles_x) is int, "tiles_x must be an integer" + assert type(tiles_y) is int, "tiles_y must be an integer" + assert type(width) is int, "width must be an integer" + assert type(height) is int, "height must be an integer" + out = {"JobInfo": {}, "PluginInfo": {}} cfg = OrderedDict() - w_space = width / tiles_x - h_space = height / tiles_y + w_space = width // tiles_x + h_space = height // tiles_y cfg["TilesCropped"] = "False" + tile = 0 for tile_x in range(1, tiles_x + 1): for tile_y in reversed(range(1, tiles_y + 1)): tile_prefix = "_tile_{}x{}_{}x{}_".format( @@ -127,1030 +821,38 @@ def _format_tiles( tiles_x, tiles_y ) - out_tile_index = "OutputFilename{}Tile{}".format( - str(index), tile - ) + new_filename = "{}/{}{}".format( os.path.dirname(filename), tile_prefix, os.path.basename(filename) ) - out["JobInfo"][out_tile_index] = new_filename + + top = height - (tile_y * h_space) + bottom = height - ((tile_y - 1) * h_space) - 1 + left = (tile_x - 1) * w_space + right = (tile_x * w_space) - 1 + + # Job info + out["JobInfo"]["OutputFilename{}Tile{}".format(index, tile)] = new_filename # noqa: E501 + + # Plugin Info out["PluginInfo"]["RegionPrefix{}".format(str(tile))] = \ "/{}".format(tile_prefix).join(prefix.rsplit("/", 1)) + out["PluginInfo"]["RegionTop{}".format(tile)] = top + out["PluginInfo"]["RegionBottom{}".format(tile)] = bottom + out["PluginInfo"]["RegionLeft{}".format(tile)] = left + out["PluginInfo"]["RegionRight{}".format(tile)] = right - out["PluginInfo"]["RegionTop{}".format(tile)] = int(height) - (tile_y * h_space) # noqa: E501 - out["PluginInfo"]["RegionBottom{}".format(tile)] = int(height) - ((tile_y - 1) * h_space) - 1 # noqa: E501 - out["PluginInfo"]["RegionLeft{}".format(tile)] = (tile_x - 1) * w_space # noqa: E501 - out["PluginInfo"]["RegionRight{}".format(tile)] = (tile_x * w_space) - 1 # noqa: E501 - + # Tile config cfg["Tile{}".format(tile)] = new_filename cfg["Tile{}Tile".format(tile)] = new_filename cfg["Tile{}FileName".format(tile)] = new_filename - cfg["Tile{}X".format(tile)] = (tile_x - 1) * w_space - - cfg["Tile{}Y".format(tile)] = int(height) - (tile_y * h_space) - + cfg["Tile{}X".format(tile)] = left + cfg["Tile{}Y".format(tile)] = top cfg["Tile{}Width".format(tile)] = w_space cfg["Tile{}Height".format(tile)] = h_space tile += 1 + return out, cfg - - -def get_renderer_variables(renderlayer, root): - """Retrieve the extension which has been set in the VRay settings. - - Will return None if the current renderer is not VRay - For Maya 2016.5 and up the renderSetup creates renderSetupLayer node which - start with `rs`. Use the actual node name, do NOT use the `nice name` - - Args: - renderlayer (str): the node name of the renderlayer. - root (str): base path to render - - Returns: - dict - - """ - renderer = lib.get_renderer(renderlayer or lib.get_current_renderlayer()) - render_attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS["default"]) - - padding = cmds.getAttr("{}.{}".format(render_attrs["node"], - render_attrs["padding"])) - - filename_0 = cmds.renderSettings( - fullPath=True, - gin="#" * int(padding), - lut=True, - layer=renderlayer or lib.get_current_renderlayer())[0] - filename_0 = re.sub('_', '_beauty', - filename_0, flags=re.IGNORECASE) - prefix_attr = "defaultRenderGlobals.imageFilePrefix" - if renderer == "vray": - renderlayer = renderlayer.split("_")[-1] - # Maya's renderSettings function does not return V-Ray file extension - # so we get the extension from vraySettings - extension = cmds.getAttr("vraySettings.imageFormatStr") - - # When V-Ray image format has not been switched once from default .png - # the getAttr command above returns None. As such we explicitly set - # it to `.png` - if extension is None: - extension = "png" - - if extension in ["exr (multichannel)", "exr (deep)"]: - extension = "exr" - - prefix_attr = "vraySettings.fileNamePrefix" - filename_prefix = cmds.getAttr(prefix_attr) - # we need to determine path for vray as maya `renderSettings` query - # does not work for vray. - scene = cmds.file(query=True, sceneName=True) - scene, _ = os.path.splitext(os.path.basename(scene)) - filename_0 = re.sub('', scene, filename_prefix, flags=re.IGNORECASE) # noqa: E501 - filename_0 = re.sub('', renderlayer, filename_0, flags=re.IGNORECASE) # noqa: E501 - filename_0 = "{}.{}.{}".format( - filename_0, "#" * int(padding), extension) - filename_0 = os.path.normpath(os.path.join(root, filename_0)) - elif renderer == "renderman": - prefix_attr = "rmanGlobals.imageFileFormat" - elif renderer == "redshift": - # mapping redshift extension dropdown values to strings - ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"] - extension = ext_mapping[ - cmds.getAttr("redshiftOptions.imageFormat") - ] - else: - # Get the extension, getAttr defaultRenderGlobals.imageFormat - # returns an index number. - filename_base = os.path.basename(filename_0) - extension = os.path.splitext(filename_base)[-1].strip(".") - - filename_prefix = cmds.getAttr(prefix_attr) - return {"ext": extension, - "filename_prefix": filename_prefix, - "padding": padding, - "filename_0": filename_0} - - -class MayaSubmitDeadline(pyblish.api.InstancePlugin): - """Submit available render layers to Deadline. - - Renders are submitted to a Deadline Web Service as - supplied via settings key "DEADLINE_REST_URL". - - Attributes: - use_published (bool): Use published scene to render instead of the - one in work area. - - """ - - label = "Submit to Deadline" - order = pyblish.api.IntegratorOrder + 0.1 - hosts = ["maya"] - families = ["renderlayer"] - - use_published = True - tile_assembler_plugin = "OpenPypeTileAssembler" - asset_dependencies = False - limit_groups = [] - group = "none" - - def process(self, instance): - """Plugin entry point.""" - instance.data["toBeRenderedOn"] = "deadline" - context = instance.context - - self._instance = instance - self.payload_skeleton = copy.deepcopy(payload_skeleton_template) - - # get default deadline webservice url from deadline module - self.deadline_url = instance.context.data.get("defaultDeadline") - # if custom one is set in instance, use that - if instance.data.get("deadlineUrl"): - self.deadline_url = instance.data.get("deadlineUrl") - assert self.deadline_url, "Requires Deadline Webservice URL" - - self._job_info = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "jobInfo", {}) - ) - - self._plugin_info = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "pluginInfo", {}) - ) - - self.limit_groups = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "limit", []) - ) - - self.group = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "group", "none") - ) - - context = instance.context - workspace = context.data["workspaceDir"] - anatomy = context.data['anatomy'] - instance.data["toBeRenderedOn"] = "deadline" - - filepath = None - patches = ( - context.data["project_settings"].get( - "deadline", {}).get( - "publish", {}).get( - "MayaSubmitDeadline", {}).get( - "scene_patches", {}) - ) - - # Handle render/export from published scene or not ------------------ - if self.use_published: - patched_files = [] - for i in context: - if "workfile" not in i.data["families"]: - continue - assert i.data["publish"] is True, ( - "Workfile (scene) must be published along") - template_data = i.data.get("anatomyData") - rep = i.data.get("representations")[0].get("name") - template_data["representation"] = rep - template_data["ext"] = rep - template_data["comment"] = None - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["path"] - filepath = os.path.normpath(template_filled) - self.log.info("Using published scene for render {}".format( - filepath)) - - if not os.path.exists(filepath): - self.log.error("published scene does not exist!") - raise - # now we need to switch scene in expected files - # because token will now point to published - # scene file and that might differ from current one - new_scene = os.path.splitext( - os.path.basename(filepath))[0] - orig_scene = os.path.splitext( - os.path.basename(context.data["currentFile"]))[0] - exp = instance.data.get("expectedFiles") - - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - new_exp = {} - for aov, files in exp[0].items(): - replaced_files = [] - for f in files: - replaced_files.append( - f.replace(orig_scene, new_scene) - ) - new_exp[aov] = replaced_files - instance.data["expectedFiles"] = [new_exp] - else: - new_exp = [] - for f in exp: - new_exp.append( - f.replace(orig_scene, new_scene) - ) - instance.data["expectedFiles"] = [new_exp] - - if instance.data.get("publishRenderMetadataFolder"): - instance.data["publishRenderMetadataFolder"] = \ - instance.data["publishRenderMetadataFolder"].replace( - orig_scene, new_scene) - self.log.info("Scene name was switched {} -> {}".format( - orig_scene, new_scene - )) - # patch workfile is needed - if filepath not in patched_files: - patched_file = self._patch_workfile(filepath, patches) - patched_files.append(patched_file) - - all_instances = [] - for result in context.data["results"]: - if (result["instance"] is not None and - result["instance"] not in all_instances): # noqa: E128 - all_instances.append(result["instance"]) - - # fallback if nothing was set - if not filepath: - self.log.warning("Falling back to workfile") - filepath = context.data["currentFile"] - - self.log.debug(filepath) - - # Gather needed data ------------------------------------------------ - default_render_file = instance.context.data.get('project_settings')\ - .get('maya')\ - .get('create')\ - .get('CreateRender')\ - .get('default_render_image_folder') - filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - dirname = os.path.join(workspace, default_render_file) - renderlayer = instance.data['setMembers'] # rs_beauty - deadline_user = context.data.get("user", getpass.getuser()) - - # Always use the original work file name for the Job name even when - # rendering is done from the published Work File. The original work - # file name is clearer because it can also have subversion strings, - # etc. which are stripped for the published file. - src_filename = os.path.basename(context.data["currentFile"]) - jobname = "%s - %s" % (src_filename, instance.name) - - # Get the variables depending on the renderer - render_variables = get_renderer_variables(renderlayer, dirname) - filename_0 = render_variables["filename_0"] - if self.use_published: - new_scene = os.path.splitext(filename)[0] - orig_scene = os.path.splitext( - os.path.basename(context.data["currentFile"]))[0] - filename_0 = render_variables["filename_0"].replace( - orig_scene, new_scene) - - output_filename_0 = filename_0 - - # Create render folder ---------------------------------------------- - try: - # Ensure render folder exists - os.makedirs(dirname) - except OSError: - pass - - # Fill in common data to payload ------------------------------------ - payload_data = {} - payload_data["filename"] = filename - payload_data["filepath"] = filepath - payload_data["jobname"] = jobname - payload_data["deadline_user"] = deadline_user - payload_data["comment"] = comment - payload_data["output_filename_0"] = output_filename_0 - payload_data["render_variables"] = render_variables - payload_data["renderlayer"] = renderlayer - payload_data["workspace"] = workspace - payload_data["dirname"] = dirname - - self.log.info("--- Submission data:") - for k, v in payload_data.items(): - self.log.info("- {}: {}".format(k, v)) - self.log.info("-" * 20) - - frame_pattern = self.payload_skeleton["JobInfo"]["Frames"] - self.payload_skeleton["JobInfo"]["Frames"] = frame_pattern.format( - start=int(self._instance.data["frameStartHandle"]), - end=int(self._instance.data["frameEndHandle"]), - step=int(self._instance.data["byFrameStep"])) - - self.payload_skeleton["JobInfo"]["Plugin"] = self._instance.data.get( - "mayaRenderPlugin", "MayaBatch") - - self.payload_skeleton["JobInfo"]["BatchName"] = src_filename - # Job name, as seen in Monitor - self.payload_skeleton["JobInfo"]["Name"] = jobname - # Arbitrary username, for visualisation in Monitor - self.payload_skeleton["JobInfo"]["UserName"] = deadline_user - # Set job priority - self.payload_skeleton["JobInfo"]["Priority"] = \ - self._instance.data.get("priority", 50) - - if self.group != "none" and self.group: - self.payload_skeleton["JobInfo"]["Group"] = self.group - - if self.limit_groups: - self.payload_skeleton["JobInfo"]["LimitGroups"] = \ - ",".join(self.limit_groups) - # Optional, enable double-click to preview rendered - # frames from Deadline Monitor - self.payload_skeleton["JobInfo"]["OutputDirectory0"] = \ - os.path.dirname(output_filename_0).replace("\\", "/") - self.payload_skeleton["JobInfo"]["OutputFilename0"] = \ - output_filename_0.replace("\\", "/") - - self.payload_skeleton["JobInfo"]["Comment"] = comment - self.payload_skeleton["PluginInfo"]["RenderLayer"] = renderlayer - - # Adding file dependencies. - dependencies = instance.context.data["fileDependencies"] - dependencies.append(filepath) - if self.asset_dependencies: - for dependency in dependencies: - key = "AssetDependency" + str(dependencies.index(dependency)) - self.payload_skeleton["JobInfo"][key] = dependency - - # Handle environments ----------------------------------------------- - # We need those to pass them to pype for it to set correct context - keys = [ - "FTRACK_API_KEY", - "FTRACK_API_USER", - "FTRACK_SERVER", - "AVALON_PROJECT", - "AVALON_ASSET", - "AVALON_TASK", - "AVALON_APP_NAME", - "OPENPYPE_DEV", - "OPENPYPE_LOG_NO_COLORS" - ] - # Add mongo url if it's enabled - if instance.context.data.get("deadlinePassMongoUrl"): - keys.append("OPENPYPE_MONGO") - - environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) - environment["OPENPYPE_LOG_NO_COLORS"] = "1" - environment["OPENPYPE_MAYA_VERSION"] = cmds.about(v=True) - # to recognize job from PYPE for turning Event On/Off - environment["OPENPYPE_RENDER_JOB"] = "1" - self.payload_skeleton["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) - # Add options from RenderGlobals------------------------------------- - render_globals = instance.data.get("renderGlobals", {}) - self.payload_skeleton["JobInfo"].update(render_globals) - - # Submit preceding export jobs ------------------------------------- - export_job = None - assert not all(x in instance.data["families"] - for x in ['vrayscene', 'assscene']), ( - "Vray Scene and Ass Scene options are mutually exclusive") - if "vrayscene" in instance.data["families"]: - export_job = self._submit_export(payload_data, "vray") - - if "assscene" in instance.data["families"]: - export_job = self._submit_export(payload_data, "arnold") - - # Prepare main render job ------------------------------------------- - if "vrayscene" in instance.data["families"]: - payload = self._get_vray_render_payload(payload_data) - elif "assscene" in instance.data["families"]: - payload = self._get_arnold_render_payload(payload_data) - else: - payload = self._get_maya_payload(payload_data) - - # Add export job as dependency -------------------------------------- - if export_job: - payload["JobInfo"]["JobDependency0"] = export_job - - # Add list of expected files to job --------------------------------- - exp = instance.data.get("expectedFiles") - exp_index = 0 - output_filenames = {} - - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - for _aov, files in exp[0].items(): - col, rem = clique.assemble(files) - if not col and rem: - # we couldn't find any collections but have - # individual files. - assert len(rem) == 1, ("Found multiple non related files " - "to render, don't know what to do " - "with them.") - output_file = rem[0] - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - else: - output_file = col[0].format('{head}{padding}{tail}') - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - - output_filenames['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - exp_index += 1 - else: - col, rem = clique.assemble(exp) - if not col and rem: - # we couldn't find any collections but have - # individual files. - assert len(rem) == 1, ("Found multiple non related files " - "to render, don't know what to do " - "with them.") - - output_file = rem[0] - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - else: - output_file = col[0].format('{head}{padding}{tail}') - if not instance.data.get("tileRendering"): - payload['JobInfo']['OutputFilename' + str(exp_index)] = output_file # noqa: E501 - - output_filenames['OutputFilename' + str(exp_index)] = output_file - - plugin = payload["JobInfo"]["Plugin"] - self.log.info("using render plugin : {}".format(plugin)) - - # Store output dir for unified publisher (filesequence) - instance.data["outputDir"] = os.path.dirname(output_filename_0) - - self.preflight_check(instance) - - # add jobInfo and pluginInfo variables from Settings - payload["JobInfo"].update(self._job_info) - payload["PluginInfo"].update(self._plugin_info) - - # Prepare tiles data ------------------------------------------------ - if instance.data.get("tileRendering"): - # if we have sequence of files, we need to create tile job for - # every frame - - payload["JobInfo"]["TileJob"] = True - payload["JobInfo"]["TileJobTilesInX"] = instance.data.get("tilesX") - payload["JobInfo"]["TileJobTilesInY"] = instance.data.get("tilesY") - payload["PluginInfo"]["ImageHeight"] = instance.data.get("resolutionHeight") # noqa: E501 - payload["PluginInfo"]["ImageWidth"] = instance.data.get("resolutionWidth") # noqa: E501 - payload["PluginInfo"]["RegionRendering"] = True - - assembly_payload = { - "AuxFiles": [], - "JobInfo": { - "BatchName": payload["JobInfo"]["BatchName"], - "Frames": 1, - "Name": "{} - Tile Assembly Job".format( - payload["JobInfo"]["Name"]), - "OutputDirectory0": - payload["JobInfo"]["OutputDirectory0"].replace( - "\\", "/"), - "Plugin": self.tile_assembler_plugin, - "MachineLimit": 1 - }, - "PluginInfo": { - "CleanupTiles": 1, - "ErrorOnMissing": True - } - } - assembly_payload["JobInfo"].update(output_filenames) - assembly_payload["JobInfo"]["Priority"] = self._instance.data.get( - "priority", 50) - assembly_payload["JobInfo"]["UserName"] = deadline_user - - frame_payloads = [] - assembly_payloads = [] - - R_FRAME_NUMBER = re.compile(r".+\.(?P[0-9]+)\..+") # noqa: N806, E501 - REPL_FRAME_NUMBER = re.compile(r"(.+\.)([0-9]+)(\..+)") # noqa: N806, E501 - - if isinstance(exp[0], dict): - # we have aovs and we need to iterate over them - # get files from `beauty` - files = exp[0].get("beauty") - # assembly files are used for assembly jobs as we need to put - # together all AOVs - assembly_files = list( - itertools.chain.from_iterable( - [f for _, f in exp[0].items()])) - if not files: - # if beauty doesn't exists, use first aov we found - files = exp[0].get(list(exp[0].keys())[0]) - else: - files = exp - assembly_files = files - - frame_jobs = {} - - file_index = 1 - for file in files: - frame = re.search(R_FRAME_NUMBER, file).group("frame") - new_payload = copy.deepcopy(payload) - new_payload["JobInfo"]["Name"] = \ - "{} (Frame {} - {} tiles)".format( - payload["JobInfo"]["Name"], - frame, - instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 - ) - self.log.info( - "... preparing job {}".format( - new_payload["JobInfo"]["Name"])) - new_payload["JobInfo"]["TileJobFrame"] = frame - - tiles_data = _format_tiles( - file, 0, - instance.data.get("tilesX"), - instance.data.get("tilesY"), - instance.data.get("resolutionWidth"), - instance.data.get("resolutionHeight"), - payload["PluginInfo"]["OutputFilePrefix"] - )[0] - new_payload["JobInfo"].update(tiles_data["JobInfo"]) - new_payload["PluginInfo"].update(tiles_data["PluginInfo"]) - - job_hash = hashlib.sha256("{}_{}".format(file_index, file)) - frame_jobs[frame] = job_hash.hexdigest() - new_payload["JobInfo"]["ExtraInfo0"] = job_hash.hexdigest() - new_payload["JobInfo"]["ExtraInfo1"] = file - - frame_payloads.append(new_payload) - file_index += 1 - - file_index = 1 - for file in assembly_files: - frame = re.search(R_FRAME_NUMBER, file).group("frame") - new_assembly_payload = copy.deepcopy(assembly_payload) - new_assembly_payload["JobInfo"]["Name"] = \ - "{} (Frame {})".format( - assembly_payload["JobInfo"]["Name"], - frame) - new_assembly_payload["JobInfo"]["OutputFilename0"] = re.sub( - REPL_FRAME_NUMBER, - "\\1{}\\3".format("#" * len(frame)), file) - - new_assembly_payload["PluginInfo"]["Renderer"] = self._instance.data["renderer"] # noqa: E501 - new_assembly_payload["JobInfo"]["ExtraInfo0"] = frame_jobs[frame] # noqa: E501 - new_assembly_payload["JobInfo"]["ExtraInfo1"] = file - assembly_payloads.append(new_assembly_payload) - file_index += 1 - - self.log.info( - "Submitting tile job(s) [{}] ...".format(len(frame_payloads))) - - url = "{}/api/jobs".format(self.deadline_url) - tiles_count = instance.data.get("tilesX") * instance.data.get("tilesY") # noqa: E501 - - for tile_job in frame_payloads: - response = self._requests_post(url, json=tile_job) - if not response.ok: - raise Exception(response.text) - - job_id = response.json()["_id"] - hash = response.json()["Props"]["Ex0"] - - for assembly_job in assembly_payloads: - if assembly_job["JobInfo"]["ExtraInfo0"] == hash: - assembly_job["JobInfo"]["JobDependency0"] = job_id - - for assembly_job in assembly_payloads: - file = assembly_job["JobInfo"]["ExtraInfo1"] - # write assembly job config files - now = datetime.now() - - config_file = os.path.join( - os.path.dirname(output_filename_0), - "{}_config_{}.txt".format( - os.path.splitext(file)[0], - now.strftime("%Y_%m_%d_%H_%M_%S") - ) - ) - - try: - if not os.path.isdir(os.path.dirname(config_file)): - os.makedirs(os.path.dirname(config_file)) - except OSError: - # directory is not available - self.log.warning( - "Path is unreachable: `{}`".format( - os.path.dirname(config_file))) - - # add config file as job auxFile - assembly_job["AuxFiles"] = [config_file] - - with open(config_file, "w") as cf: - print("TileCount={}".format(tiles_count), file=cf) - print("ImageFileName={}".format(file), file=cf) - print("ImageWidth={}".format( - instance.data.get("resolutionWidth")), file=cf) - print("ImageHeight={}".format( - instance.data.get("resolutionHeight")), file=cf) - - tiles = _format_tiles( - file, 0, - instance.data.get("tilesX"), - instance.data.get("tilesY"), - instance.data.get("resolutionWidth"), - instance.data.get("resolutionHeight"), - payload["PluginInfo"]["OutputFilePrefix"] - )[1] - sorted(tiles) - for k, v in tiles.items(): - print("{}={}".format(k, v), file=cf) - - job_idx = 1 - instance.data["assemblySubmissionJobs"] = [] - for ass_job in assembly_payloads: - self.log.info("submitting assembly job {} of {}".format( - job_idx, len(assembly_payloads) - )) - self.log.debug(json.dumps(ass_job, indent=4, sort_keys=True)) - response = self._requests_post(url, json=ass_job) - if not response.ok: - raise Exception(response.text) - - instance.data["assemblySubmissionJobs"].append( - response.json()["_id"]) - job_idx += 1 - - instance.data["jobBatchName"] = payload["JobInfo"]["BatchName"] - self.log.info("Setting batch name on instance: {}".format( - instance.data["jobBatchName"])) - else: - # Submit job to farm -------------------------------------------- - self.log.info("Submitting ...") - self.log.debug(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) - if not response.ok: - raise Exception(response.text) - instance.data["deadlineSubmissionJob"] = response.json() - - def _get_maya_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - - if not self.asset_dependencies: - job_info_ext = {} - - else: - job_info_ext = { - # Asset dependency to wait for at least the scene file to sync. - "AssetDependency0": data["filepath"], - } - - plugin_info = { - "SceneFile": data["filepath"], - # Output directory and filename - "OutputFilePath": data["dirname"].replace("\\", "/"), - "OutputFilePrefix": data["render_variables"]["filename_prefix"], # noqa: E501 - - # Only render layers are considered renderable in this pipeline - "UsingRenderLayers": True, - - # Render only this layer - "RenderLayer": data["renderlayer"], - - # Determine which renderer to use from the file itself - "Renderer": self._instance.data["renderer"], - - # Resolve relative references - "ProjectPath": data["workspace"], - } - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info) - return payload - - def _get_vray_export_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - vray_settings = cmds.ls(type="VRaySettingsNode") - node = vray_settings[0] - template = cmds.getAttr("{}.vrscene_filename".format(node)) - scene, _ = os.path.splitext(data["filename"]) - first_file = self.format_vray_output_filename(scene, template) - first_file = "{}/{}".format(data["workspace"], first_file) - output = os.path.dirname(first_file) - job_info_ext = { - # Job name, as seen in Monitor - "Name": "Export {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": self._instance.data.get( - "mayaRenderPlugin", "MayaPype"), - "FramesPerTask": self._instance.data.get("framesPerTask", 1) - } - - plugin_info_ext = { - # Renderer - "Renderer": "vray", - # Input - "SceneFile": data["filepath"], - "SkipExistingFrames": True, - "UsingRenderLayers": True, - "UseLegacyRenderLayers": True, - "RenderLayer": data["renderlayer"], - "ProjectPath": data["workspace"], - "OutputFilePath": output - } - - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info_ext) - return payload - - def _get_arnold_export_payload(self, data): - - try: - from openpype.scripts import export_maya_ass_job - except Exception: - raise AssertionError( - "Expected module 'export_maya_ass_job' to be available") - - module_path = export_maya_ass_job.__file__ - if module_path.endswith(".pyc"): - module_path = module_path[: -len(".pyc")] + ".py" - - script = os.path.normpath(module_path) - - payload = copy.deepcopy(self.payload_skeleton) - job_info_ext = { - # Job name, as seen in Monitor - "Name": "Export {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": "Python", - "FramesPerTask": self._instance.data.get("framesPerTask", 1), - "Frames": 1 - } - - plugin_info_ext = { - "Version": "3.6", - "ScriptFile": script, - "Arguments": "", - "SingleFrameOnly": "True", - } - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info_ext) - - envs = [ - v - for k, v in payload["JobInfo"].items() - if k.startswith("EnvironmentKeyValue") - ] - - # add app name to environment - envs.append( - "AVALON_APP_NAME={}".format(os.environ.get("AVALON_APP_NAME"))) - envs.append( - "OPENPYPE_ASS_EXPORT_RENDER_LAYER={}".format(data["renderlayer"])) - envs.append( - "OPENPYPE_ASS_EXPORT_SCENE_FILE={}".format(data["filepath"])) - envs.append( - "OPENPYPE_ASS_EXPORT_OUTPUT={}".format( - payload['JobInfo']['OutputFilename0'])) - envs.append( - "OPENPYPE_ASS_EXPORT_START={}".format( - int(self._instance.data["frameStartHandle"]))) - envs.append( - "OPENPYPE_ASS_EXPORT_END={}".format( - int(self._instance.data["frameEndHandle"]))) - envs.append( - "OPENPYPE_ASS_EXPORT_STEP={}".format(1)) - - for i, e in enumerate(envs): - payload["JobInfo"]["EnvironmentKeyValue{}".format(i)] = e - return payload - - def _get_vray_render_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - vray_settings = cmds.ls(type="VRaySettingsNode") - node = vray_settings[0] - template = cmds.getAttr("{}.vrscene_filename".format(node)) - # "vrayscene//_/" - - scene, _ = os.path.splitext(data["filename"]) - first_file = self.format_vray_output_filename(scene, template) - first_file = "{}/{}".format(data["workspace"], first_file) - job_info_ext = { - "Name": "Render {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": "Vray", - "OverrideTaskExtraInfoNames": False, - } - - plugin_info = { - "InputFilename": first_file, - "SeparateFilesPerFrame": True, - "VRayEngine": "V-Ray", - - "Width": self._instance.data["resolutionWidth"], - "Height": self._instance.data["resolutionHeight"], - "OutputFilePath": payload["JobInfo"]["OutputDirectory0"], - "OutputFileName": payload["JobInfo"]["OutputFilename0"] - } - - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info) - return payload - - def _get_arnold_render_payload(self, data): - payload = copy.deepcopy(self.payload_skeleton) - ass_file, _ = os.path.splitext(data["output_filename_0"]) - first_file = ass_file + ".ass" - job_info_ext = { - "Name": "Render {} [{}-{}]".format( - data["jobname"], - int(self._instance.data["frameStartHandle"]), - int(self._instance.data["frameEndHandle"])), - - "Plugin": "Arnold", - "OverrideTaskExtraInfoNames": False, - } - - plugin_info = { - "ArnoldFile": first_file, - } - - payload["JobInfo"].update(job_info_ext) - payload["PluginInfo"].update(plugin_info) - return payload - - def _submit_export(self, data, format): - if format == "vray": - payload = self._get_vray_export_payload(data) - self.log.info("Submitting vrscene export job.") - elif format == "arnold": - payload = self._get_arnold_export_payload(data) - self.log.info("Submitting ass export job.") - - url = "{}/api/jobs".format(self.deadline_url) - response = self._requests_post(url, json=payload) - if not response.ok: - self.log.error("Submition failed!") - self.log.error(response.status_code) - self.log.error(response.content) - self.log.debug(payload) - raise RuntimeError(response.text) - - dependency = response.json() - return dependency["_id"] - - def preflight_check(self, instance): - """Ensure the startFrame, endFrame and byFrameStep are integers.""" - for key in ("frameStartHandle", "frameEndHandle", "byFrameStep"): - value = instance.data[key] - - if int(value) == value: - continue - - self.log.warning( - "%f=%d was rounded off to nearest integer" - % (value, int(value)) - ) - - def _requests_post(self, *args, **kwargs): - """Wrap request post method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.post(*args, **kwargs) - - def _requests_get(self, *args, **kwargs): - """Wrap request get method. - - Disabling SSL certificate validation if ``DONT_VERIFY_SSL`` environment - variable is found. This is useful when Deadline or Muster server are - running with self-signed certificates and their certificate is not - added to trusted certificates on client machines. - - Warning: - Disabling SSL certificate validation is defeating one line - of defense SSL is providing and it is not recommended. - - """ - if 'verify' not in kwargs: - kwargs['verify'] = not os.getenv("OPENPYPE_DONT_VERIFY_SSL", True) - # add 10sec timeout before bailing out - kwargs['timeout'] = 10 - return requests.get(*args, **kwargs) - - def format_vray_output_filename(self, filename, template, dir=False): - """Format the expected output file of the Export job. - - Example: - /_/ - "shot010_v006/shot010_v006_CHARS/CHARS" - - Args: - instance: - filename(str): - dir(bool): - - Returns: - str - - """ - def smart_replace(string, key_values): - new_string = string - for key, value in key_values.items(): - new_string = new_string.replace(key, value) - return new_string - - # Ensure filename has no extension - file_name, _ = os.path.splitext(filename) - - layer = self._instance.data['setMembers'] - - # Reformat without tokens - output_path = smart_replace( - template, - {"": file_name, - "": layer}) - - if dir: - return output_path.replace("\\", "/") - - start_frame = int(self._instance.data["frameStartHandle"]) - filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame) - - result = filename_zero.replace("\\", "/") - - return result - - def _patch_workfile(self, file, patches): - # type: (str, dict) -> [str, None] - """Patch Maya scene. - - This will take list of patches (lines to add) and apply them to - *published* Maya scene file (that is used later for rendering). - - Patches are dict with following structure:: - { - "name": "Name of patch", - "regex": "regex of line before patch", - "line": "line to insert" - } - - Args: - file (str): File to patch. - patches (dict): Dictionary defining patches. - - Returns: - str: Patched file path or None - - """ - if os.path.splitext(file)[1].lower() != ".ma" or not patches: - return None - - compiled_regex = [re.compile(p["regex"]) for p in patches] - with open(file, "r+") as pf: - scene_data = pf.readlines() - for ln, line in enumerate(scene_data): - for i, r in enumerate(compiled_regex): - if re.match(r, line): - scene_data.insert(ln + 1, patches[i]["line"]) - pf.seek(0) - pf.writelines(scene_data) - pf.truncate() - self.log.info( - "Applied {} patch to scene.".format( - patches[i]["name"])) - return file diff --git a/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py new file mode 100644 index 0000000000..bab6591c7f --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/submit_maya_remote_publish_deadline.py @@ -0,0 +1,140 @@ +import os +import requests +from datetime import datetime + +from maya import cmds + +from openpype.pipeline import legacy_io, PublishXmlValidationError +from openpype.settings import get_project_settings +from openpype.tests.lib import is_in_tests + +import pyblish.api + + +class MayaSubmitRemotePublishDeadline(pyblish.api.InstancePlugin): + """Submit Maya scene to perform a local publish in Deadline. + + Publishing in Deadline can be helpful for scenes that publish very slow. + This way it can process in the background on another machine without the + Artist having to wait for the publish to finish on their local machine. + + Submission is done through the Deadline Web Service. DL then triggers + `openpype/scripts/remote_publish.py`. + + Each publishable instance creates its own full publish job. + + Different from `ProcessSubmittedJobOnFarm` which creates publish job + depending on metadata json containing context and instance data of + rendered files. + """ + + label = "Submit Scene to Deadline" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["publish.farm"] + targets = ["local"] + + def process(self, instance): + project_name = instance.context.data["projectName"] + # TODO settings can be received from 'context.data["project_settings"]' + settings = get_project_settings(project_name) + # use setting for publish job on farm, no reason to have it separately + deadline_publish_job_sett = (settings["deadline"] + ["publish"] + ["ProcessSubmittedJobOnFarm"]) + + # Ensure no errors so far + if not (all(result["success"] + for result in instance.context.data["results"])): + raise PublishXmlValidationError("Publish process has errors") + + if not instance.data["publish"]: + self.log.warning("No active instances found. " + "Skipping submission..") + return + + scene = instance.context.data["currentFile"] + scenename = os.path.basename(scene) + + job_name = "{scene} [PUBLISH]".format(scene=scenename) + batch_name = "{code} - {scene}".format(code=project_name, + scene=scenename) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + + # Generate the payload for Deadline submission + payload = { + "JobInfo": { + "Plugin": "MayaBatch", + "BatchName": batch_name, + "Name": job_name, + "UserName": instance.context.data["user"], + "Comment": instance.context.data.get("comment", ""), + # "InitialStatus": state + "Department": deadline_publish_job_sett["deadline_department"], + "ChunkSize": deadline_publish_job_sett["deadline_chunk_size"], + "Priority": deadline_publish_job_sett["deadline_priority"], + "Group": deadline_publish_job_sett["deadline_group"], + "Pool": deadline_publish_job_sett["deadline_pool"], + }, + "PluginInfo": { + + "Build": None, # Don't force build + "StrictErrorChecking": True, + "ScriptJob": True, + + # Inputs + "SceneFile": scene, + "ScriptFilename": "{OPENPYPE_REPOS_ROOT}/openpype/scripts/remote_publish.py", # noqa + + # Mandatory for Deadline + "Version": cmds.about(version=True), + + # Resolve relative references + "ProjectPath": cmds.workspace(query=True, + rootDirectory=True), + + }, + + # Mandatory for Deadline, may be empty + "AuxFiles": [] + } + + # Include critical environment variables with submission + api.Session + keys = [ + "FTRACK_API_USER", + "FTRACK_API_KEY", + "FTRACK_SERVER", + "OPENPYPE_VERSION" + ] + environment = dict({key: os.environ[key] for key in keys + if key in os.environ}, **legacy_io.Session) + + # TODO replace legacy_io with context.data + environment["AVALON_PROJECT"] = project_name + environment["AVALON_ASSET"] = legacy_io.Session["AVALON_ASSET"] + environment["AVALON_TASK"] = legacy_io.Session["AVALON_TASK"] + environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") + environment["OPENPYPE_LOG_NO_COLORS"] = "1" + environment["OPENPYPE_REMOTE_JOB"] = "1" + environment["OPENPYPE_USERNAME"] = instance.context.data["user"] + environment["OPENPYPE_PUBLISH_SUBSET"] = instance.data["subset"] + environment["OPENPYPE_REMOTE_PUBLISH"] = "1" + + payload["JobInfo"].update({ + "EnvironmentKeyValue%d" % index: "{key}={value}".format( + key=key, + value=environment[key] + ) for index, key in enumerate(environment) + }) + + self.log.info("Submitting Deadline job ...") + deadline_url = instance.context.data["defaultDeadline"] + # if custom one is set in instance, use that + if instance.data.get("deadlineUrl"): + deadline_url = instance.data.get("deadlineUrl") + assert deadline_url, "Requires Deadline Webservice URL" + url = "{}/api/jobs".format(deadline_url) + response = requests.post(url, json=payload, timeout=10) + if not response.ok: + raise Exception(response.text) diff --git a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py index d6bd11620d..d1948d8d50 100644 --- a/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py +++ b/openpype/modules/deadline/plugins/publish/submit_nuke_deadline.py @@ -2,12 +2,14 @@ import os import re import json import getpass +from datetime import datetime import requests - -from avalon import api import pyblish.api + import nuke +from openpype.pipeline import legacy_io +from openpype.tests.lib import is_in_tests class NukeSubmitDeadline(pyblish.api.InstancePlugin): @@ -23,12 +25,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): hosts = ["nuke", "nukestudio"] families = ["render.farm", "prerender.farm"] optional = True + targets = ["local"] # presets priority = 50 chunk_size = 1 - primary_pool = "" - secondary_pool = "" + concurrent_tasks = 1 group = "" department = "" limit_groups = {} @@ -40,7 +42,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["toBeRenderedOn"] = "deadline" families = instance.data["families"] - node = instance[0] + node = instance.data["transientData"]["node"] context = instance.context # get default deadline webservice url from deadline module @@ -55,8 +57,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) self._deadline_user = context.data.get( "deadlineUser", getpass.getuser()) - self._frame_start = int(instance.data["frameStartHandle"]) - self._frame_end = int(instance.data["frameEndHandle"]) + submit_frame_start = int(instance.data["frameStartHandle"]) + submit_frame_end = int(instance.data["frameEndHandle"]) # get output path render_path = instance.data['path'] @@ -80,15 +82,14 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Using published scene for render {}".format(script_path) ) - # exception for slate workflow - if "slate" in instance.data["families"]: - self._frame_start -= 1 - - response = self.payload_submit(instance, - script_path, - render_path, - node.name() - ) + response = self.payload_submit( + instance, + script_path, + render_path, + node.name(), + submit_frame_start, + submit_frame_end + ) # Store output dir for unified publisher (filesequence) instance.data["deadlineSubmissionJob"] = response.json() instance.data["outputDir"] = os.path.dirname( @@ -101,15 +102,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): script_path = baking_script["bakeScriptPath"] exe_node_name = baking_script["bakeWriteNodeName"] - # exception for slate workflow - if "slate" in instance.data["families"]: - self._frame_start += 1 - resp = self.payload_submit( instance, script_path, render_path, exe_node_name, + submit_frame_start, + submit_frame_end, response.json() ) @@ -117,6 +116,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["deadlineSubmissionJob"] = resp.json() instance.data["publishJobState"] = "Suspended" + # add to list of job Id + if not instance.data.get("bakingSubmissionJobs"): + instance.data["bakingSubmissionJobs"] = [] + + instance.data["bakingSubmissionJobs"].append( + resp.json()["_id"]) + # redefinition of families if "render.farm" in families: instance.data['family'] = 'write' @@ -126,16 +132,22 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): families.insert(0, "prerender") instance.data["families"] = families - def payload_submit(self, - instance, - script_path, - render_path, - exe_node_name, - responce_data=None - ): + def payload_submit( + self, + instance, + script_path, + render_path, + exe_node_name, + start_frame, + end_frame, + responce_data=None + ): render_dir = os.path.normpath(os.path.dirname(render_path)) - script_name = os.path.basename(script_path) - jobname = "%s - %s" % (script_name, instance.name) + batch_name = os.path.basename(script_path) + jobname = "%s - %s" % (batch_name, instance.name) + if is_in_tests(): + batch_name += datetime.now().strftime("%d%m%Y%H%M%S") + output_filename_0 = self.preview_fname(render_path) @@ -149,11 +161,16 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): pass # define chunk and priority - chunk_size = instance.data.get("deadlineChunkSize") + chunk_size = instance.data["deadlineChunkSize"] if chunk_size == 0 and self.chunk_size: chunk_size = self.chunk_size - priority = instance.data.get("deadlinePriority") + # define chunk and priority + concurrent_tasks = instance.data["deadlineConcurrentTasks"] + if concurrent_tasks == 0 and self.concurrent_tasks: + concurrent_tasks = self.concurrent_tasks + + priority = instance.data["deadlinePriority"] if not priority: priority = self.priority @@ -164,7 +181,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): payload = { "JobInfo": { # Top-level group name - "BatchName": script_name, + "BatchName": batch_name, # Asset dependency to wait for at least the scene file to sync. # "AssetDependency0": script_path, @@ -177,16 +194,18 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Priority": priority, "ChunkSize": chunk_size, + "ConcurrentTasks": concurrent_tasks, + "Department": self.department, - "Pool": self.primary_pool, - "SecondaryPool": self.secondary_pool, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "Group": self.group, "Plugin": "Nuke", "Frames": "{start}-{end}".format( - start=self._frame_start, - end=self._frame_end + start=start_frame, + end=end_frame ), "Comment": self._comment, @@ -236,7 +255,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): keys = [ "PYTHONPATH", "PATH", - "AVALON_SCHEMA", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", @@ -247,7 +265,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "PYBLISHPLUGINPATH", "NUKE_PATH", "TOOL_ENV", - "FOUNDRY_LICENSE" + "FOUNDRY_LICENSE", + "OPENPYPE_VERSION" ] # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): @@ -258,7 +277,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): keys += self.env_allowed_keys environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) + if key in os.environ}, **legacy_io.Session) for _path in os.environ: if _path.lower().startswith('openpype_'): @@ -287,7 +306,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # adding expectied files to instance.data - self.expected_files(instance, render_path) + self.expected_files( + instance, + render_path, + start_frame, + end_frame + ) + self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) response = requests.post(self.deadline_url, json=payload, timeout=10) @@ -333,15 +358,19 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.log.debug("_ path: `{}`".format(path)) return path - def expected_files(self, - instance, - path): + def expected_files( + self, + instance, + path, + start_frame, + end_frame + ): """ Create expected files in instance data """ if not instance.data.get("expectedFiles"): instance.data["expectedFiles"] = [] - dir = os.path.dirname(path) + dirname = os.path.dirname(path) file = os.path.basename(path) if "#" in file: @@ -353,9 +382,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): instance.data["expectedFiles"].append(path) return - for i in range(self._frame_start, (self._frame_end + 1)): + if instance.data.get("slate"): + start_frame -= 1 + + for i in range(start_frame, (end_frame + 1)): instance.data["expectedFiles"].append( - os.path.join(dir, (file % i)).replace("\\", "/")) + os.path.join(dirname, (file % i)).replace("\\", "/")) def get_limit_groups(self): """Search for limit group nodes and return group name. diff --git a/openpype/modules/deadline/plugins/publish/submit_publish_job.py b/openpype/modules/deadline/plugins/publish/submit_publish_job.py index fad4d14ea0..7e39a644a2 100644 --- a/openpype/modules/deadline/plugins/publish/submit_publish_job.py +++ b/openpype/modules/deadline/plugins/publish/submit_publish_job.py @@ -7,24 +7,38 @@ import re from copy import copy, deepcopy import requests import clique -import openpype.api - -from avalon import api, io import pyblish.api -from openpype.pipeline import get_representation_path +from openpype.client import ( + get_last_version_by_subset_name, + get_representations, +) +from openpype.pipeline import ( + get_representation_path, + legacy_io, +) +from openpype.tests.lib import is_in_tests +from openpype.pipeline.farm.patterning import match_aov_pattern -def get_resources(version, extension=None): +def get_resources(project_name, version, extension=None): """Get the files from the specific version.""" - query = {"type": "representation", "parent": version["_id"]} + + # TODO this functions seems to be weird + # - it's looking for representation with one extension or first (any) + # representation from a version? + # - not sure how this should work, maybe it does for specific use cases + # but probably can't be used for all resources from 2D workflows + extensions = None if extension: - query["name"] = extension - - representation = io.find_one(query) - assert representation, "This is a bug" + extensions = [extension] + repre_docs = list(get_representations( + project_name, version_ids=[version["_id"]], extensions=extensions + )) + assert repre_docs, "This is a bug" + representation = repre_docs[0] directory = get_representation_path(representation) print("Source: ", directory) resources = sorted( @@ -101,35 +115,32 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder + 0.2 icon = "tractor" deadline_plugin = "OpenPype" + targets = ["local"] hosts = ["fusion", "maya", "nuke", "celaction", "aftereffects", "harmony"] families = ["render.farm", "prerender.farm", "renderlayer", "imagesequence", "vrayscene"] - aov_filter = {"maya": [r".*(?:[\._-])*([Bb]eauty)(?:[\.|_])*.*"], + aov_filter = {"maya": [r".*([Bb]eauty).*"], "aftereffects": [r".*"], # for everything from AE "harmony": [r".*"], # for everything from AE "celaction": [r".*"]} - enviro_filter = [ + environ_job_filter = [ + "OPENPYPE_METADATA_FILE" + ] + + environ_keys = [ "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", - "OPENPYPE_METADATA_FILE", - "AVALON_PROJECT", - "AVALON_ASSET", - "AVALON_TASK", "AVALON_APP_NAME", - "OPENPYPE_PUBLISH_JOB" - - "OPENPYPE_LOG_NO_COLORS", "OPENPYPE_USERNAME", - "OPENPYPE_RENDER_JOB", - "OPENPYPE_PUBLISH_JOB" + "OPENPYPE_VERSION" ] - # custom deadline atributes + # custom deadline attributes deadline_department = "" deadline_pool = "" deadline_pool_secondary = "" @@ -143,7 +154,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # mapping of instance properties to be transfered to new instance for every # specified family instance_transfer = { - "slate": ["slateFrame"], + "slate": ["slateFrames", "slate"], "review": ["lutPath"], "render2d": ["bakingNukeScripts", "version"], "renderlayer": ["convertToScanline"] @@ -196,6 +207,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): more universal code. Muster post job is sent directly by Muster submitter, so this type of code isn't necessary for it. + Returns: + (str): deadline_publish_job_id """ data = instance.data.copy() subset = data["subset"] @@ -207,34 +220,51 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instance_version = instance.data.get("version") # take this if exists if instance_version != 1: override_version = instance_version - output_dir = self._get_publish_folder(instance.context.data['anatomy'], - deepcopy( - instance.data["anatomyData"]), - instance.data.get("asset"), - instances[0]["subset"], - 'render', - override_version) + output_dir = self._get_publish_folder( + instance.context.data['anatomy'], + deepcopy(instance.data["anatomyData"]), + instance.data.get("asset"), + instances[0]["subset"], + 'render', + override_version + ) # Transfer the environment from the original job to this dependent # job so they use the same environment metadata_path, roothless_metadata_path = \ self._create_metadata_path(instance) - environment = job["Props"].get("Env", {}) - environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"] - environment["AVALON_ASSET"] = io.Session["AVALON_ASSET"] - environment["AVALON_TASK"] = io.Session["AVALON_TASK"] - environment["AVALON_APP_NAME"] = os.environ.get("AVALON_APP_NAME") - environment["OPENPYPE_LOG_NO_COLORS"] = "1" - environment["OPENPYPE_USERNAME"] = instance.context.data["user"] - environment["OPENPYPE_PUBLISH_JOB"] = "1" - environment["OPENPYPE_RENDER_JOB"] = "0" + environment = { + "AVALON_PROJECT": legacy_io.Session["AVALON_PROJECT"], + "AVALON_ASSET": legacy_io.Session["AVALON_ASSET"], + "AVALON_TASK": legacy_io.Session["AVALON_TASK"], + "OPENPYPE_USERNAME": instance.context.data["user"], + "OPENPYPE_PUBLISH_JOB": "1", + "OPENPYPE_RENDER_JOB": "0", + "OPENPYPE_REMOTE_JOB": "0", + "OPENPYPE_LOG_NO_COLORS": "1", + "IS_TEST": str(int(is_in_tests())) + } + + # add environments from self.environ_keys + for env_key in self.environ_keys: + if os.getenv(env_key): + environment[env_key] = os.environ[env_key] + + # pass environment keys from self.environ_job_filter + job_environ = job["Props"].get("Env", {}) + for env_j_key in self.environ_job_filter: + if job_environ.get(env_j_key): + environment[env_j_key] = job_environ[env_j_key] + # Add mongo url if it's enabled if instance.context.data.get("deadlinePassMongoUrl"): mongo_url = os.environ.get("OPENPYPE_MONGO") if mongo_url: environment["OPENPYPE_MONGO"] = mongo_url + priority = self.deadline_priority or instance.data.get("priority", 50) + args = [ "--headless", 'publish', @@ -243,6 +273,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "--targets", "farm" ] + if is_in_tests(): + args.append("--automatic-tests") + # Generate the payload for Deadline submission payload = { "JobInfo": { @@ -254,11 +287,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Department": self.deadline_department, "ChunkSize": self.deadline_chunk_size, - "Priority": job["Props"]["Pri"], + "Priority": priority, "Group": self.deadline_group, - "Pool": self.deadline_pool, - "SecondaryPool": self.deadline_pool_secondary, + "Pool": instance.data.get("primaryPool"), + "SecondaryPool": instance.data.get("secondaryPool"), "OutputDirectory0": output_dir }, @@ -278,22 +311,27 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): for assembly_id in instance.data.get("assemblySubmissionJobs"): payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501 job_index += 1 + elif instance.data.get("bakingSubmissionJobs"): + self.log.info("Adding baking submission jobs as dependencies...") + job_index = 0 + for assembly_id in instance.data["bakingSubmissionJobs"]: + payload["JobInfo"]["JobDependency{}".format(job_index)] = assembly_id # noqa: E501 + job_index += 1 else: payload["JobInfo"]["JobDependency0"] = job["_id"] - index = 0 - for key in environment: - if key.upper() in self.enviro_filter: - payload["JobInfo"].update( - { - "EnvironmentKeyValue%d" - % index: "{key}={value}".format( - key=key, value=environment[key] - ) - } - ) - index += 1 + if instance.data.get("suspend_publish"): + payload["JobInfo"]["InitialStatus"] = "Suspended" + for index, (key_, value_) in enumerate(environment.items()): + payload["JobInfo"].update( + { + "EnvironmentKeyValue%d" + % index: "{key}={value}".format( + key=key_, value=value_ + ) + } + ) # remove secondary pool payload["JobInfo"].pop("SecondaryPool", None) @@ -304,6 +342,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if not response.ok: raise Exception(response.text) + deadline_publish_job_id = response.json()["_id"] + + return deadline_publish_job_id + def _copy_extend_frames(self, instance, representation): """Copy existing frames from latest version. @@ -321,13 +363,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.info("Preparing to copy ...") start = instance.data.get("frameStart") end = instance.data.get("frameEnd") + project_name = legacy_io.active_project() # get latest version of subset # this will stop if subset wasn't published yet - version = openpype.api.get_latest_version(instance.data.get("asset"), - instance.data.get("subset")) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + instance.data.get("subset"), + asset_name=instance.data.get("asset") + ) + # get its files based on extension - subset_resources = get_resources(version, representation.get("ext")) + subset_resources = get_resources( + project_name, version, representation.get("ext") + ) r_col, _ = clique.assemble(subset_resources) # if override remove all frames we are expecting to be rendered @@ -422,9 +472,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): cam = [c for c in cameras if c in col.head] if cam: - subset_name = '{}_{}_{}'.format(group_name, cam, aov) + if aov: + subset_name = '{}_{}_{}'.format(group_name, cam, aov) + else: + subset_name = '{}_{}'.format(group_name, cam) else: - subset_name = '{}_{}'.format(group_name, aov) + if aov: + subset_name = '{}_{}'.format(group_name, aov) + else: + subset_name = '{}'.format(group_name) if isinstance(col, (list, tuple)): staging = os.path.dirname(col[0]) @@ -447,16 +503,20 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): app = os.environ.get("AVALON_APP", "") preview = False - if app in self.aov_filter.keys(): - for aov_pattern in self.aov_filter[app]: - if re.match(aov_pattern, aov): - preview = True - break + + if isinstance(col, list): + render_file_name = os.path.basename(col[0]) + else: + render_file_name = os.path.basename(col) + aov_patterns = self.aov_filter + + preview = match_aov_pattern(app, aov_patterns, render_file_name) + # toggle preview on if multipart is on if instance_data.get("multipartExr"): preview = True - - new_instance = copy(instance_data) + self.log.debug("preview:{}".format(preview)) + new_instance = deepcopy(instance_data) new_instance["subset"] = subset_name new_instance["subsetGroup"] = group_name if preview: @@ -498,7 +558,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if new_instance.get("extendFrames", False): self._copy_extend_frames(new_instance, rep) instances.append(new_instance) - + self.log.debug("instances:{}".format(instances)) return instances def _get_representations(self, instance, exp_files): @@ -509,8 +569,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): most cases, but if not - we create representation from each of them. Arguments: - instance (pyblish.plugin.Instance): instance for which we are - setting representations + instance (dict): instance data for which we are + setting representations exp_files (list): list of expected files Returns: @@ -518,27 +578,29 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] + host_name = os.environ.get("AVALON_APP", "") collections, remainders = clique.assemble(exp_files) # create representation for every collected sequento ce for collection in collections: ext = collection.tail.lstrip(".") preview = False - # if filtered aov name is found in filename, toggle it for - # preview video rendering - for app in self.aov_filter.keys(): - if os.environ.get("AVALON_APP", "") == app: - for aov in self.aov_filter[app]: - if re.match( - aov, - list(collection)[0] - ): - preview = True - break - - # toggle preview on if multipart is on - if instance.get("multipartExr", False): - preview = True + # TODO 'useSequenceForReview' is temporary solution which does + # not work for 100% of cases. We must be able to tell what + # expected files contains more explicitly and from what + # should be review made. + # - "review" tag is never added when is set to 'False' + if instance["useSequenceForReview"]: + # toggle preview on if multipart is on + if instance.get("multipartExr", False): + preview = True + else: + render_file_name = list(collection)[0] + # if filtered aov name is found in filename, toggle it for + # preview video rendering + preview = match_aov_pattern( + host_name, self.aov_filter, render_file_name + ) staging = os.path.dirname(list(collection)[0]) success, rootless_staging_dir = ( @@ -552,11 +614,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues on farm." ).format(staging)) + frame_start = int(instance.get("frameStartHandle")) + if instance.get("slate"): + frame_start -= 1 + rep = { "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(collection)], - "frameStart": int(instance.get("frameStartHandle")), + "frameStart": frame_start, "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, @@ -602,12 +668,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "files": os.path.basename(remainder), "stagingDir": os.path.dirname(remainder), } - if "render" in instance.get("families"): + + preview = match_aov_pattern( + host_name, self.aov_filter, remainder + ) + if preview: rep.update({ "fps": instance.get("fps"), "tags": ["review"] }) - self._solve_families(instance, True) + self._solve_families(instance, preview) already_there = False for repre in instance.get("representations", []): @@ -624,6 +694,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): def _solve_families(self, instance, preview=False): families = instance.get("families") + # if we have one representation with preview tag # flag whole instance for review and for ftrack if preview: @@ -651,10 +722,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.context = context self.anatomy = instance.context.data["anatomy"] - if hasattr(instance, "_log"): - data['_log'] = instance._log - - asset = data.get("asset") or api.Session["AVALON_ASSET"] + asset = data.get("asset") or legacy_io.Session["AVALON_ASSET"] subset = data.get("subset") start = instance.data.get("frameStart") @@ -703,10 +771,17 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues." ).format(source)) - families = ["render"] + family = "render" + if "prerender" in instance.data["families"]: + family = "prerender" + families = [family] + + # pass review to families if marked as review + if data.get("review"): + families.append("review") instance_skeleton_data = { - "family": "render", + "family": family, "subset": subset, "families": families, "asset": asset, @@ -716,6 +791,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "handleEnd": handle_end, "frameStartHandle": start - handle_start, "frameEndHandle": end + handle_end, + "comment": instance.data["comment"], "fps": fps, "source": source, "extendFrames": data.get("extendFrames"), @@ -724,14 +800,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "resolutionWidth": data.get("resolutionWidth", 1920), "resolutionHeight": data.get("resolutionHeight", 1080), "multipartExr": data.get("multipartExr", False), - "jobBatchName": data.get("jobBatchName", "") + "jobBatchName": data.get("jobBatchName", ""), + "useSequenceForReview": data.get("useSequenceForReview", True), + # map inputVersions `ObjectId` -> `str` so json supports it + "inputVersions": list(map(str, data.get("inputVersions", []))) } - if "prerender" in instance.data["families"]: - instance_skeleton_data.update({ - "family": "prerender", - "families": []}) - # skip locking version if we are creating v01 instance_version = instance.data.get("version") # take this if exists if instance_version != 1: @@ -866,8 +940,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_i = copy(i) new_i["version"] = at.get("version") new_i["subset"] = at.get("subset") + new_i["family"] = at.get("family") new_i["append"] = True - new_i["families"].append(at.get("family")) + # don't set subsetGroup if we are attaching + new_i.pop("subsetGroup") new_instances.append(new_i) self.log.info(" - {} / v{}".format( at.get("subset"), at.get("version"))) @@ -916,12 +992,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # User is deadline user render_job["Props"]["User"] = context.data.get( "deadlineUser", getpass.getuser()) - # Priority is now not handled at all - - if self.deadline_priority: - render_job["Props"]["Pri"] = self.deadline_priority - else: - render_job["Props"]["Pri"] = instance.data.get("priority") render_job["Props"]["Env"] = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), @@ -937,7 +1007,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.deadline_url = instance.data.get("deadlineUrl") assert self.deadline_url, "Requires Deadline Webservice URL" - self._submit_deadline_post_job(instance, render_job, instances) + deadline_publish_job_id = \ + self._submit_deadline_post_job(instance, render_job, instances) # publish job file publish_job = { @@ -951,10 +1022,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "intent": context.data.get("intent"), "comment": context.data.get("comment"), "job": render_job or None, - "session": api.Session.copy(), + "session": legacy_io.Session.copy(), "instances": instances } + if deadline_publish_job_id: + publish_job["deadline_publish_job_id"] = deadline_publish_job_id + # add audio to metadata file if available audio_file = context.data.get("audioFile") if audio_file and os.path.isfile(audio_file): @@ -995,9 +1069,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): prev_start = None prev_end = None - version = openpype.api.get_latest_version(asset_name=asset, - subset_name=subset - ) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) # Set prev start / end frames for comparison if not prev_start and not prev_end: @@ -1027,7 +1104,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): get publish_path Args: - anatomy (pype.lib.anatomy.Anatomy): + anatomy (openpype.pipeline.anatomy.Anatomy): template_data (dict): pre-calculated collected data for process asset (string): asset name subset (string): subset name (actually group name of subset) @@ -1042,7 +1119,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): based on 'publish' template """ if not version: - version = openpype.api.get_latest_version(asset, subset) + project_name = legacy_io.active_project() + version = get_last_version_by_subset_name( + project_name, + subset, + asset_name=asset + ) if version: version = int(version["name"]) + 1 else: @@ -1059,7 +1141,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] + project_name = legacy_io.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." diff --git a/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py new file mode 100644 index 0000000000..78eed17c98 --- /dev/null +++ b/openpype/modules/deadline/plugins/publish/validate_deadline_pools.py @@ -0,0 +1,48 @@ +import pyblish.api + +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) +from openpype.modules.deadline.deadline_module import DeadlineModule + + +class ValidateDeadlinePools(OptionalPyblishPluginMixin, + pyblish.api.InstancePlugin): + """Validate primaryPool and secondaryPool on instance. + + Values are on instance based on value insertion when Creating instance or + by Settings in CollectDeadlinePools. + """ + + label = "Validate Deadline Pools" + order = pyblish.api.ValidatorOrder + families = ["rendering", "render.farm", "renderFarm", "renderlayer"] + optional = True + + def process(self, instance): + # get default deadline webservice url from deadline module + deadline_url = instance.context.data["defaultDeadline"] + self.log.info("deadline_url::{}".format(deadline_url)) + pools = DeadlineModule.get_deadline_pools(deadline_url, log=self.log) + self.log.info("pools::{}".format(pools)) + + formatting_data = { + "pools_str": ",".join(pools) + } + + primary_pool = instance.data.get("primaryPool") + if primary_pool and primary_pool not in pools: + msg = "Configured primary '{}' not present on Deadline".format( + instance.data["primaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) + + secondary_pool = instance.data.get("secondaryPool") + if secondary_pool and secondary_pool not in pools: + msg = "Configured secondary '{}' not present on Deadline".format( + instance.data["secondaryPool"]) + formatting_data["invalid_value_str"] = msg + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py index c2426e0d78..f0a3ddd246 100644 --- a/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py +++ b/openpype/modules/deadline/plugins/publish/validate_expected_and_rendered_files.py @@ -3,7 +3,7 @@ import requests import pyblish.api -from openpype.lib.delivery import collect_frames +from openpype.lib import collect_frames from openpype_modules.deadline.abstract_submit_deadline import requests_get diff --git a/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.ico b/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.ico new file mode 100644 index 0000000000..39d61592fe Binary files /dev/null and b/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.ico differ diff --git a/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.param b/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.param new file mode 100644 index 0000000000..24c59d2005 --- /dev/null +++ b/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.param @@ -0,0 +1,38 @@ +[About] +Type=label +Label=About +Category=About Plugin +CategoryOrder=-1 +Index=0 +Default=Celaction Plugin for Deadline +Description=Not configurable + +[ConcurrentTasks] +Type=label +Label=ConcurrentTasks +Category=About Plugin +CategoryOrder=-1 +Index=0 +Default=True +Description=Not configurable + +[Executable] +Type=filename +Label=Executable +Category=Config +CategoryOrder=0 +CategoryIndex=0 +Description=The command executable to run +Required=false +DisableIfBlank=true + +[RenderNameSeparator] +Type=string +Label=RenderNameSeparator +Category=Config +CategoryOrder=0 +CategoryIndex=1 +Description=The separator to use for naming +Required=false +DisableIfBlank=true +Default=. diff --git a/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.py b/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.py new file mode 100644 index 0000000000..2d0edd3dca --- /dev/null +++ b/openpype/modules/deadline/repository/custom/plugins/CelAction/CelAction.py @@ -0,0 +1,122 @@ +from System.Text.RegularExpressions import * + +from Deadline.Plugins import * +from Deadline.Scripting import * + +import _winreg + +###################################################################### +# This is the function that Deadline calls to get an instance of the +# main DeadlinePlugin class. +###################################################################### + + +def GetDeadlinePlugin(): + return CelActionPlugin() + + +def CleanupDeadlinePlugin(deadlinePlugin): + deadlinePlugin.Cleanup() + +###################################################################### +# This is the main DeadlinePlugin class for the CelAction plugin. +###################################################################### + + +class CelActionPlugin(DeadlinePlugin): + + def __init__(self): + self.InitializeProcessCallback += self.InitializeProcess + self.RenderExecutableCallback += self.RenderExecutable + self.RenderArgumentCallback += self.RenderArgument + self.StartupDirectoryCallback += self.StartupDirectory + + def Cleanup(self): + for stdoutHandler in self.StdoutHandlers: + del stdoutHandler.HandleCallback + + del self.InitializeProcessCallback + del self.RenderExecutableCallback + del self.RenderArgumentCallback + del self.StartupDirectoryCallback + + def GetCelActionRegistryKey(self): + # Modify registry for frame separation + path = r'Software\CelAction\CelAction2D\User Settings' + _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, path) + regKey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0, + _winreg.KEY_ALL_ACCESS) + return regKey + + def GetSeparatorValue(self, regKey): + useSeparator, _ = _winreg.QueryValueEx( + regKey, 'RenderNameUseSeparator') + separator, _ = _winreg.QueryValueEx(regKey, 'RenderNameSeparator') + + return useSeparator, separator + + def SetSeparatorValue(self, regKey, useSeparator, separator): + _winreg.SetValueEx(regKey, 'RenderNameUseSeparator', + 0, _winreg.REG_DWORD, useSeparator) + _winreg.SetValueEx(regKey, 'RenderNameSeparator', + 0, _winreg.REG_SZ, separator) + + def InitializeProcess(self): + # Set the plugin specific settings. + self.SingleFramesOnly = False + + # Set the process specific settings. + self.StdoutHandling = True + self.PopupHandling = True + + # Ignore 'celaction' Pop-up dialog + self.AddPopupIgnorer(".*Rendering.*") + self.AddPopupIgnorer(".*AutoRender.*") + + # Ignore 'celaction' Pop-up dialog + self.AddPopupIgnorer(".*Wait.*") + + # Ignore 'celaction' Pop-up dialog + self.AddPopupIgnorer(".*Timeline Scrub.*") + + celActionRegKey = self.GetCelActionRegistryKey() + + self.SetSeparatorValue(celActionRegKey, 1, self.GetConfigEntryWithDefault( + "RenderNameSeparator", ".").strip()) + + def RenderExecutable(self): + return RepositoryUtils.CheckPathMapping(self.GetConfigEntry("Executable").strip()) + + def RenderArgument(self): + arguments = RepositoryUtils.CheckPathMapping( + self.GetPluginInfoEntry("Arguments").strip()) + arguments = arguments.replace( + "", str(self.GetStartFrame())) + arguments = arguments.replace("", str(self.GetEndFrame())) + arguments = self.ReplacePaddedFrame( + arguments, "", self.GetStartFrame()) + arguments = self.ReplacePaddedFrame( + arguments, "", self.GetEndFrame()) + arguments = arguments.replace("", "\"") + return arguments + + def StartupDirectory(self): + return self.GetPluginInfoEntryWithDefault("StartupDirectory", "").strip() + + def ReplacePaddedFrame(self, arguments, pattern, frame): + frameRegex = Regex(pattern) + while True: + frameMatch = frameRegex.Match(arguments) + if frameMatch.Success: + paddingSize = int(frameMatch.Groups[1].Value) + if paddingSize > 0: + padding = StringUtils.ToZeroPaddedString( + frame, paddingSize, False) + else: + padding = str(frame) + arguments = arguments.replace( + frameMatch.Groups[0].Value, padding) + else: + break + + return arguments diff --git a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py index eeb1f7744c..b0560ce1e8 100644 --- a/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py +++ b/openpype/modules/deadline/repository/custom/plugins/GlobalJobPreLoad.py @@ -6,13 +6,281 @@ import subprocess import json import platform import uuid -from Deadline.Scripting import RepositoryUtils, FileUtils +import re +from Deadline.Scripting import ( + RepositoryUtils, + FileUtils, + DirectoryUtils, + ProcessUtils, +) + +VERSION_REGEX = re.compile( + r"(?P0|[1-9]\d*)" + r"\.(?P0|[1-9]\d*)" + r"\.(?P0|[1-9]\d*)" + r"(?:-(?P[a-zA-Z\d\-.]*))?" + r"(?:\+(?P[a-zA-Z\d\-.]*))?" +) + + +class OpenPypeVersion: + """Fake semver version class for OpenPype version purposes. + + The version + """ + def __init__(self, major, minor, patch, prerelease, origin=None): + self.major = major + self.minor = minor + self.patch = patch + self.prerelease = prerelease + + is_valid = True + if major is None or minor is None or patch is None: + is_valid = False + self.is_valid = is_valid + + if origin is None: + base = "{}.{}.{}".format(str(major), str(minor), str(patch)) + if not prerelease: + origin = base + else: + origin = "{}-{}".format(base, str(prerelease)) + + self.origin = origin + + @classmethod + def from_string(cls, version): + """Create an object of version from string. + + Args: + version (str): Version as a string. + + Returns: + Union[OpenPypeVersion, None]: Version object if input is nonempty + string otherwise None. + """ + + if not version: + return None + valid_parts = VERSION_REGEX.findall(version) + if len(valid_parts) != 1: + # Return invalid version with filled 'origin' attribute + return cls(None, None, None, None, origin=str(version)) + + # Unpack found version + major, minor, patch, pre, post = valid_parts[0] + prerelease = pre + # Post release is not important anymore and should be considered as + # part of prerelease + # - comparison is implemented to find suitable build and builds should + # never contain prerelease part so "not proper" parsing is + # acceptable for this use case. + if post: + prerelease = "{}+{}".format(pre, post) + + return cls( + int(major), int(minor), int(patch), prerelease, origin=version + ) + + def has_compatible_release(self, other): + """Version has compatible release as other version. + + Both major and minor versions must be exactly the same. In that case + a build can be considered as release compatible with any version. + + Args: + other (OpenPypeVersion): Other version. + + Returns: + bool: Version is release compatible with other version. + """ + + if self.is_valid and other.is_valid: + return self.major == other.major and self.minor == other.minor + return False + + def __bool__(self): + return self.is_valid + + def __repr__(self): + return "<{} {}>".format(self.__class__.__name__, self.origin) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return self.origin == other + return self.origin == other.origin + + def __lt__(self, other): + if not isinstance(other, self.__class__): + return None + + if not self.is_valid: + return True + + if not other.is_valid: + return False + + if self.origin == other.origin: + return None + + same_major = self.major == other.major + if not same_major: + return self.major < other.major + + same_minor = self.minor == other.minor + if not same_minor: + return self.minor < other.minor + + same_patch = self.patch == other.patch + if not same_patch: + return self.patch < other.patch + + if not self.prerelease: + return False + + if not other.prerelease: + return True + + pres = [self.prerelease, other.prerelease] + pres.sort() + return pres[0] == self.prerelease + + +def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + Union[OpenPypeVersion, None]: version of OpenPype if found. + """ + + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "MacOS") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(" ! path is not a build: {}".format(path)) + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_str = version.get("__version__") + if version_str: + return OpenPypeVersion.from_string(version_str) + return None def get_openpype_executable(): """Return OpenPype Executable from Event Plug-in Settings""" config = RepositoryUtils.GetPluginConfig("OpenPype") - return config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + exe_list = config.GetConfigEntryWithDefault("OpenPypeExecutable", "") + dir_list = config.GetConfigEntryWithDefault( + "OpenPypeInstallationDirs", "") + + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + exe_list = exe_list.replace("\\ ", " ") + dir_list = dir_list.replace("\\ ", " ") + return exe_list, dir_list + + +def get_openpype_versions(dir_list): + print(">>> Getting OpenPype executable ...") + openpype_versions = [] + + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + print("--- Looking for OpenPype at: {}".format(install_dir)) + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = get_openpype_version_from_path(subdir) + if not version: + continue + print(" - found: {} - {}".format(version, subdir)) + openpype_versions.append((version, subdir)) + return openpype_versions + + +def get_requested_openpype_executable( + exe, dir_list, requested_version +): + requested_version_obj = OpenPypeVersion.from_string(requested_version) + if not requested_version_obj: + print(( + ">>> Requested version '{}' does not match version regex '{}'" + ).format(requested_version, VERSION_REGEX)) + return None + + print(( + ">>> Scanning for compatible requested version {}" + ).format(requested_version)) + openpype_versions = get_openpype_versions(dir_list) + if not openpype_versions: + return None + + # if looking for requested compatible version, + # add the implicitly specified to the list too. + if exe: + exe_dir = os.path.dirname(exe) + print("Looking for OpenPype at: {}".format(exe_dir)) + version = get_openpype_version_from_path(exe_dir) + if version: + print(" - found: {} - {}".format(version, exe_dir)) + openpype_versions.append((version, exe_dir)) + + matching_item = None + compatible_versions = [] + for version_item in openpype_versions: + version, version_dir = version_item + if requested_version_obj.has_compatible_release(version): + compatible_versions.append(version_item) + if version == requested_version_obj: + # Store version item if version match exactly + # - break if is found matching version + matching_item = version_item + break + + if not compatible_versions: + return None + + compatible_versions.sort(key=lambda item: item[0]) + if matching_item: + version, version_dir = matching_item + print(( + "*** Found exact match build version {} in {}" + ).format(version_dir, version)) + + else: + version, version_dir = compatible_versions[-1] + + print(( + "*** Latest compatible version found is {} in {}" + ).format(version_dir, version)) + + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join(version_dir, "openpype_console.exe"), + os.path.join(version_dir, "openpype_console"), + os.path.join(version_dir, "MacOS", "openpype_console") + ] + return FileUtils.SearchFileList(";".join(exe_list)) def inject_openpype_environment(deadlinePlugin): @@ -24,17 +292,31 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Injecting OpenPype environments ...") try: - print(">>> Getting OpenPype executable ...") - exe_list = get_openpype_executable() - openpype_app = FileUtils.SearchFileList(exe_list) - if openpype_app == "": - raise RuntimeError( - "OpenPype executable was not found " + - "in the semicolon separated list \"" + exe_list + "\". " + - "The path to the render executable can be configured " + - "from the Plugin Configuration in the Deadline Monitor.") + exe_list, dir_list = get_openpype_executable() + exe = FileUtils.SearchFileList(exe_list) - print("--- OpenPype executable: {}".format(openpype_app)) + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + exe = get_requested_openpype_executable( + exe, dir_list, requested_version + ) + if exe is None: + raise RuntimeError(( + "Cannot find compatible version available for version {}" + " requested by the job. Please add it through plugin" + " configuration in Deadline or install it to configured" + " directory." + ).format(requested_version)) + + if not exe: + raise RuntimeError(( + "OpenPype executable was not found in the semicolon " + "separated list \"{}\"." + "The path to the render executable can be configured" + " from the Plugin Configuration in the Deadline Monitor." + ).format(";".join(exe_list))) + + print("--- OpenPype executable: {}".format(exe)) # tempfile.TemporaryFile cannot be used because of locking temp_file_name = "{}_{}.json".format( @@ -45,47 +327,59 @@ def inject_openpype_environment(deadlinePlugin): print(">>> Temporary path: {}".format(export_url)) args = [ - openpype_app, "--headless", - 'extractenvironments', + "extractenvironments", export_url ] - add_args = {} - add_args['project'] = \ - job.GetJobEnvironmentKeyValue('AVALON_PROJECT') - add_args['asset'] = job.GetJobEnvironmentKeyValue('AVALON_ASSET') - add_args['task'] = job.GetJobEnvironmentKeyValue('AVALON_TASK') - add_args['app'] = job.GetJobEnvironmentKeyValue('AVALON_APP_NAME') - add_args["envgroup"] = "farm" + add_kwargs = { + "project": job.GetJobEnvironmentKeyValue("AVALON_PROJECT"), + "asset": job.GetJobEnvironmentKeyValue("AVALON_ASSET"), + "task": job.GetJobEnvironmentKeyValue("AVALON_TASK"), + "app": job.GetJobEnvironmentKeyValue("AVALON_APP_NAME"), + "envgroup": "farm" + } + + if job.GetJobEnvironmentKeyValue('IS_TEST'): + args.append("--automatic-tests") - if all(add_args.values()): - for key, value in add_args.items(): - args.append("--{}".format(key)) - args.append(value) + if all(add_kwargs.values()): + for key, value in add_kwargs.items(): + args.extend(["--{}".format(key), value]) else: - msg = "Required env vars: AVALON_PROJECT, AVALON_ASSET, " + \ - "AVALON_TASK, AVALON_APP_NAME" - raise RuntimeError(msg) + raise RuntimeError(( + "Missing required env vars: AVALON_PROJECT, AVALON_ASSET," + " AVALON_TASK, AVALON_APP_NAME" + )) if not os.environ.get("OPENPYPE_MONGO"): print(">>> Missing OPENPYPE_MONGO env var, process won't work") - env = os.environ - env["OPENPYPE_HEADLESS_MODE"] = "1" - env["AVALON_TIMEOUT"] = "5000" + os.environ["AVALON_TIMEOUT"] = "5000" - print(">>> Executing: {}".format(args)) - std_output = subprocess.check_output(args, - cwd=os.path.dirname(openpype_app), - env=env) - print(">>> Process result {}".format(std_output)) + args_str = subprocess.list2cmdline(args) + print(">>> Executing: {} {}".format(exe, args_str)) + process = ProcessUtils.SpawnProcess( + exe, args_str, os.path.dirname(exe) + ) + ProcessUtils.WaitForExit(process, -1) + if process.ExitCode != 0: + raise RuntimeError( + "Failed to run OpenPype process to extract environments." + ) print(">>> Loading file ...") with open(export_url) as fp: contents = json.load(fp) - for key, value in contents.items(): - deadlinePlugin.SetProcessEnvironmentVariable(key, value) + + for key, value in contents.items(): + deadlinePlugin.SetProcessEnvironmentVariable(key, value) + + script_url = job.GetJobPluginInfoKeyValue("ScriptFilename") + if script_url: + script_url = script_url.format(**contents).replace("\\", "/") + print(">>> Setting script path {}".format(script_url)) + job.SetJobPluginInfoKeyValue("ScriptFilename", script_url) print(">>> Removing temporary file") os.remove(export_url) @@ -115,78 +409,6 @@ def inject_render_job_id(deadlinePlugin): print(">>> Injection end.") -def pype_command_line(executable, arguments, workingDirectory): - """Remap paths in comand line argument string. - - Using Deadline rempper it will remap all path found in command-line. - - Args: - executable (str): path to executable - arguments (str): arguments passed to executable - workingDirectory (str): working directory path - - Returns: - Tuple(executable, arguments, workingDirectory) - - """ - print("-" * 40) - print("executable: {}".format(executable)) - print("arguments: {}".format(arguments)) - print("workingDirectory: {}".format(workingDirectory)) - print("-" * 40) - print("Remapping arguments ...") - arguments = RepositoryUtils.CheckPathMapping(arguments) - print("* {}".format(arguments)) - print("-" * 40) - return executable, arguments, workingDirectory - - -def pype(deadlinePlugin): - """Remaps `PYPE_METADATA_FILE` and `PYPE_PYTHON_EXE` environment vars. - - `PYPE_METADATA_FILE` is used on farm to point to rendered data. This path - originates on platform from which this job was published. To be able to - publish on different platform, this path needs to be remapped. - - `PYPE_PYTHON_EXE` can be used to specify custom location of python - interpreter to use for Pype. This is remappeda also if present even - though it probably doesn't make much sense. - - Arguments: - deadlinePlugin: Deadline job plugin passed by Deadline - - """ - print(">>> Getting job ...") - job = deadlinePlugin.GetJob() - # PYPE should be here, not OPENPYPE - backward compatibility!! - pype_metadata = job.GetJobEnvironmentKeyValue("PYPE_METADATA_FILE") - pype_python = job.GetJobEnvironmentKeyValue("PYPE_PYTHON_EXE") - print(">>> Having backward compatible env vars {}/{}".format(pype_metadata, - pype_python)) - # test if it is pype publish job. - if pype_metadata: - pype_metadata = RepositoryUtils.CheckPathMapping(pype_metadata) - if platform.system().lower() == "linux": - pype_metadata = pype_metadata.replace("\\", "/") - - print("- remapping PYPE_METADATA_FILE: {}".format(pype_metadata)) - job.SetJobEnvironmentKeyValue("PYPE_METADATA_FILE", pype_metadata) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_METADATA_FILE", pype_metadata) - - if pype_python: - pype_python = RepositoryUtils.CheckPathMapping(pype_python) - if platform.system().lower() == "linux": - pype_python = pype_python.replace("\\", "/") - - print("- remapping PYPE_PYTHON_EXE: {}".format(pype_python)) - job.SetJobEnvironmentKeyValue("PYPE_PYTHON_EXE", pype_python) - deadlinePlugin.SetProcessEnvironmentVariable( - "PYPE_PYTHON_EXE", pype_python) - - deadlinePlugin.ModifyCommandLineCallback += pype_command_line - - def __main__(deadlinePlugin): print("*** GlobalJobPreload start ...") print(">>> Getting job ...") @@ -196,16 +418,17 @@ def __main__(deadlinePlugin): job.GetJobEnvironmentKeyValue('OPENPYPE_RENDER_JOB') or '0' openpype_publish_job = \ job.GetJobEnvironmentKeyValue('OPENPYPE_PUBLISH_JOB') or '0' + openpype_remote_job = \ + job.GetJobEnvironmentKeyValue('OPENPYPE_REMOTE_JOB') or '0' print("--- Job type - render {}".format(openpype_render_job)) print("--- Job type - publish {}".format(openpype_publish_job)) + print("--- Job type - remote {}".format(openpype_remote_job)) if openpype_publish_job == '1' and openpype_render_job == '1': raise RuntimeError("Misconfiguration. Job couldn't be both " + "render and publish.") if openpype_publish_job == '1': inject_render_job_id(deadlinePlugin) - elif openpype_render_job == '1': + elif openpype_render_job == '1' or openpype_remote_job == '1': inject_openpype_environment(deadlinePlugin) - else: - pype(deadlinePlugin) # backward compatibility with Pype2 diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param index 8bd6dce12d..b3ac18e20c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.param @@ -7,11 +7,20 @@ Index=0 Default=OpenPype Plugin for Deadline Description=Not configurable +[OpenPypeInstallationDirs] +Type=multilinemultifolder +Label=Directories where OpenPype versions are installed +Category=OpenPype Installation Directories +CategoryOrder=0 +Index=0 +Default=C:\Program Files (x86)\OpenPype +Description=Path or paths to directories where multiple versions of OpenPype might be installed. Enter every such path on separate lines. + [OpenPypeExecutable] Type=multilinemultifilename Label=OpenPype Executable Category=OpenPype Executables -CategoryOrder=0 +CategoryOrder=1 Index=0 Default= Description=The path to the OpenPype executable. Enter alternative paths on separate lines. diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py index 451d71fb63..ab4a3d5e9b 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPype/OpenPype.py @@ -1,10 +1,19 @@ +#!/usr/bin/env python3 + from System.IO import Path from System.Text.RegularExpressions import Regex from Deadline.Plugins import PluginType, DeadlinePlugin -from Deadline.Scripting import StringUtils, FileUtils, RepositoryUtils +from Deadline.Scripting import ( + StringUtils, + FileUtils, + DirectoryUtils, + RepositoryUtils +) import re +import os +import platform ###################################################################### @@ -52,13 +61,123 @@ class OpenPypeDeadlinePlugin(DeadlinePlugin): self.AddStdoutHandlerCallback( ".*Progress: (\d+)%.*").HandleCallback += self.HandleProgress + @staticmethod + def get_openpype_version_from_path(path, build=True): + """Get OpenPype version from provided path. + path (str): Path to scan. + build (bool, optional): Get only builds, not sources + + Returns: + str or None: version of OpenPype if found. + + """ + # fix path for application bundle on macos + if platform.system().lower() == "darwin": + path = os.path.join(path, "MacOS") + + version_file = os.path.join(path, "openpype", "version.py") + if not os.path.isfile(version_file): + return None + + # skip if the version is not build + exe = os.path.join(path, "openpype_console.exe") + if platform.system().lower() in ["linux", "darwin"]: + exe = os.path.join(path, "openpype_console") + + # if only builds are requested + if build and not os.path.isfile(exe): # noqa: E501 + print(f" ! path is not a build: {path}") + return None + + version = {} + with open(version_file, "r") as vf: + exec(vf.read(), version) + + version_match = re.search(r"(\d+\.\d+.\d+).*", version["__version__"]) + return version_match[1] + def RenderExecutable(self): - exeList = self.GetConfigEntry("OpenPypeExecutable") - exe = FileUtils.SearchFileList(exeList) + job = self.GetJob() + openpype_versions = [] + # if the job requires specific OpenPype version, + # lets go over all available and find compatible build. + requested_version = job.GetJobEnvironmentKeyValue("OPENPYPE_VERSION") + if requested_version: + self.LogInfo(( + "Scanning for compatible requested " + f"version {requested_version}")) + dir_list = self.GetConfigEntry("OpenPypeInstallationDirs") + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + dir_list = dir_list.replace("\\ ", " ") + install_dir = DirectoryUtils.SearchDirectoryList(dir_list) + if install_dir: + sub_dirs = [ + f.path for f in os.scandir(install_dir) + if f.is_dir() + ] + for subdir in sub_dirs: + version = self.get_openpype_version_from_path(subdir) + if not version: + continue + openpype_versions.append((version, subdir)) + + exe_list = self.GetConfigEntry("OpenPypeExecutable") + # clean '\ ' for MacOS pasting + if platform.system().lower() == "darwin": + exe_list = exe_list.replace("\\ ", " ") + exe = FileUtils.SearchFileList(exe_list) + if openpype_versions: + # if looking for requested compatible version, + # add the implicitly specified to the list too. + version = self.get_openpype_version_from_path( + os.path.dirname(exe)) + if version: + openpype_versions.append((version, os.path.dirname(exe))) + + if requested_version: + # sort detected versions + if openpype_versions: + openpype_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + requested_major, requested_minor, _ = requested_version.split(".")[:3] # noqa: E501 + compatible_versions = [] + for version in openpype_versions: + v = version[0].split(".")[:3] + if v[0] == requested_major and v[1] == requested_minor: + compatible_versions.append(version) + if not compatible_versions: + self.FailRender(("Cannot find compatible version available " + "for version {} requested by the job. " + "Please add it through plugin configuration " + "in Deadline or install it to configured " + "directory.").format(requested_version)) + # sort compatible versions nad pick the last one + compatible_versions.sort( + key=lambda ver: [ + int(t) if t.isdigit() else t.lower() + for t in re.split(r"(\d+)", ver[0]) + ]) + # create list of executables for different platform and let + # Deadline decide. + exe_list = [ + os.path.join( + compatible_versions[-1][1], "openpype_console.exe"), + os.path.join( + compatible_versions[-1][1], "openpype_console"), + os.path.join( + compatible_versions[-1][1], "MacOS", "openpype_console") + ] + exe = FileUtils.SearchFileList(";".join(exe_list)) + if exe == "": self.FailRender( "OpenPype executable was not found " + - "in the semicolon separated list \"" + exeList + "\". " + + "in the semicolon separated list " + + "\"" + ";".join(exe_list) + "\". " + "The path to the render executable can be configured " + "from the Plugin Configuration in the Deadline Monitor.") return exe diff --git a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py index 9fca1b5391..861f16518c 100644 --- a/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py +++ b/openpype/modules/deadline/repository/custom/plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py @@ -56,7 +56,7 @@ def convert_value_by_type_name(value_type, value): return float(value) # Vectors will probably have more types - if value_type == "vec2f": + if value_type in ("vec2f", "float2"): return [float(item) for item in value.split(",")] # Matrix should be always have square size of element 3x3, 4x4 @@ -71,7 +71,7 @@ def convert_value_by_type_name(value_type, value): elif parts_len == 4: divisor = 2 elif parts_len == 9: - divisor == 3 + divisor = 3 elif parts_len == 16: divisor = 4 else: @@ -127,7 +127,7 @@ def convert_value_by_type_name(value_type, value): return output print(( - "MISSING IMPLEMENTATION:" + "Dev note (missing implementation):" " Unknown attrib type \"{}\". Value: {}" ).format(value_type, value)) return value @@ -183,7 +183,7 @@ def parse_oiio_xml_output(xml_string): else: value = child.text print(( - "MISSING IMPLEMENTATION:" + "Dev note (missing implementation):" " Unknown tag \"{}\". Value \"{}\"" ).format(tag_name, value)) @@ -204,10 +204,10 @@ def info_about_input(oiiotool_path, filepath): _stdout, _stderr = popen.communicate() output = "" if _stdout: - output += _stdout.decode("utf-8") + output += _stdout.decode("utf-8", errors="backslashreplace") if _stderr: - output += _stderr.decode("utf-8") + output += _stderr.decode("utf-8", errors="backslashreplace") output = output.replace("\r\n", "\n") xml_started = False @@ -453,7 +453,7 @@ class OpenPypeTileAssembler(DeadlinePlugin): # Swap to have input as foreground args.append("--swap") # Paste foreground to background - args.append("--paste +{}+{}".format(pos_x, pos_y)) + args.append("--paste {x:+d}{y:+d}".format(x=pos_x, y=pos_y)) args.append("-o") args.append(output_path) diff --git a/openpype/modules/example_addons/example_addon/addon.py b/openpype/modules/example_addons/example_addon/addon.py index 50554b1e43..ead647b41d 100644 --- a/openpype/modules/example_addons/example_addon/addon.py +++ b/openpype/modules/example_addons/example_addon/addon.py @@ -13,10 +13,7 @@ import click from openpype.modules import ( JsonFilesSettingsDef, OpenPypeAddOn, - ModulesManager -) -# Import interface defined by this addon to be able find other addons using it -from openpype_interfaces import ( + ModulesManager, IPluginPaths, ITrayAction ) diff --git a/openpype/modules/example_addons/example_addon/widgets.py b/openpype/modules/example_addons/example_addon/widgets.py index c0a0a7e510..cd0da3ae43 100644 --- a/openpype/modules/example_addons/example_addon/widgets.py +++ b/openpype/modules/example_addons/example_addon/widgets.py @@ -1,4 +1,4 @@ -from Qt import QtWidgets +from qtpy import QtWidgets from openpype.style import load_stylesheet diff --git a/openpype/modules/ftrack/__init__.py b/openpype/modules/ftrack/__init__.py index 7261254c6f..e520f08337 100644 --- a/openpype/modules/ftrack/__init__.py +++ b/openpype/modules/ftrack/__init__.py @@ -1,9 +1,13 @@ from .ftrack_module import ( FtrackModule, - FTRACK_MODULE_DIR + FTRACK_MODULE_DIR, + + resolve_ftrack_url, ) __all__ = ( "FtrackModule", - "FTRACK_MODULE_DIR" + "FTRACK_MODULE_DIR", + + "resolve_ftrack_url", ) diff --git a/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py new file mode 100644 index 0000000000..21382007a0 --- /dev/null +++ b/openpype/modules/ftrack/event_handlers_server/action_create_review_session.py @@ -0,0 +1,311 @@ +import threading +import datetime +import copy +import collections + +import ftrack_api + +from openpype.lib import get_datetime_data +from openpype.settings.lib import ( + get_project_settings, + get_default_project_settings +) +from openpype_modules.ftrack.lib import ServerAction + + +class CreateDailyReviewSessionServerAction(ServerAction): + """Create daily review session object per project. + + Action creates review sessions based on settings. Settings define if is + action enabled and what is a template for review session name. Logic works + in a way that if review session with the name already exists then skip + process. If review session for current day does not exist but yesterdays + review exists and is empty then yesterdays is renamed otherwise creates + new review session. + + Also contains cycle creation of dailies which is triggered each morning. + This option must be enabled in project settings. Cycle creation is also + checked on registration of action. + """ + + identifier = "create.daily.review.session" + #: Action label. + label = "OpenPype Admin" + variant = "- Create Daily Review Session (Server)" + #: Action description. + description = "Manually create daily review session" + role_list = {"Pypeclub", "Administrator", "Project Manager"} + + settings_key = "create_daily_review_session" + default_template = "{yy}{mm}{dd}" + + def __init__(self, *args, **kwargs): + super(CreateDailyReviewSessionServerAction, self).__init__( + *args, **kwargs + ) + + self._cycle_timer = None + self._last_cyle_time = None + self._day_delta = datetime.timedelta(days=1) + + def discover(self, session, entities, event): + """Show action only on AssetVersions.""" + + valid_selection = False + for ent in event["data"]["selection"]: + # Ignore entities that are not tasks or projects + if ent["entityType"].lower() in ( + "show", "task", "reviewsession", "assetversion" + ): + valid_selection = True + break + + if not valid_selection: + return False + return self.valid_roles(session, entities, event) + + def launch(self, session, entities, event): + project_entity = self.get_project_from_entity(entities[0], session) + project_name = project_entity["full_name"] + project_settings = self.get_project_settings_from_event( + event, project_name + ) + action_settings = self._extract_action_settings(project_settings) + project_name_by_id = { + project_entity["id"]: project_name + } + settings_by_project_id = { + project_entity["id"]: action_settings + } + self._process_review_session( + session, settings_by_project_id, project_name_by_id + ) + return True + + def _calculate_next_cycle_delta(self): + studio_default_settings = get_default_project_settings() + action_settings = ( + studio_default_settings + ["ftrack"] + [self.settings_frack_subkey] + [self.settings_key] + ) + cycle_hour_start = action_settings.get("cycle_hour_start") + if not cycle_hour_start: + h = m = s = 0 + else: + h, m, s = cycle_hour_start + + # Create threading timer which will trigger creation of report + # at the 00:00:01 of next day + # - callback will trigger another timer which will have 1 day offset + now = datetime.datetime.now() + # Create object of today morning + expected_next_trigger = datetime.datetime( + now.year, now.month, now.day, h, m, s + ) + if expected_next_trigger > now: + seconds = (expected_next_trigger - now).total_seconds() + else: + expected_next_trigger += self._day_delta + seconds = (expected_next_trigger - now).total_seconds() + return seconds, expected_next_trigger + + def register(self, *args, **kwargs): + """Override register to be able trigger """ + # Register server action as would be normally + super(CreateDailyReviewSessionServerAction, self).register( + *args, **kwargs + ) + + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + + # Store cycle time which will be used to create next timer + self._last_cyle_time = cycle_time + # Create timer thread + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) + self._cycle_timer.start() + + self._check_review_session() + + def _timer_callback(self): + if ( + self._cycle_timer is not None + and self._last_cyle_time is not None + ): + seconds_delta, cycle_time = self._calculate_next_cycle_delta() + self._last_cyle_time = cycle_time + + self._cycle_timer = threading.Timer( + seconds_delta, self._timer_callback + ) + self._cycle_timer.start() + self._check_review_session() + + def _check_review_session(self): + session = ftrack_api.Session( + server_url=self.session.server_url, + api_key=self.session.api_key, + api_user=self.session.api_user, + auto_connect_event_hub=False + ) + project_entities = session.query( + "select id, full_name from Project" + ).all() + project_names_by_id = { + project_entity["id"]: project_entity["full_name"] + for project_entity in project_entities + } + + action_settings_by_project_id = self._get_action_settings( + project_names_by_id + ) + enabled_action_settings_by_project_id = {} + for item in action_settings_by_project_id.items(): + project_id, action_settings = item + if action_settings.get("cycle_enabled"): + enabled_action_settings_by_project_id[project_id] = ( + action_settings + ) + + if not enabled_action_settings_by_project_id: + self.log.info(( + "There are no projects that have enabled" + " cycle review sesison creation" + )) + + else: + self._process_review_session( + session, + enabled_action_settings_by_project_id, + project_names_by_id + ) + + session.close() + + def _process_review_session( + self, session, settings_by_project_id, project_names_by_id + ): + review_sessions = session.query(( + "select id, name, project_id" + " from ReviewSession where project_id in ({})" + ).format(self.join_query_keys(settings_by_project_id))).all() + + review_sessions_by_project_id = collections.defaultdict(list) + for review_session in review_sessions: + project_id = review_session["project_id"] + review_sessions_by_project_id[project_id].append(review_session) + + # Prepare fill data for today's review sesison and yesterdays + now = datetime.datetime.now() + today_obj = datetime.datetime( + now.year, now.month, now.day, 0, 0, 0 + ) + yesterday_obj = today_obj - self._day_delta + + today_fill_data = get_datetime_data(today_obj) + yesterday_fill_data = get_datetime_data(yesterday_obj) + + # Loop through projects and try to create daily reviews + for project_id, action_settings in settings_by_project_id.items(): + review_session_template = ( + action_settings["review_session_template"] + ).strip() or self.default_template + + today_project_fill_data = copy.deepcopy(today_fill_data) + yesterday_project_fill_data = copy.deepcopy(yesterday_fill_data) + project_name = project_names_by_id[project_id] + today_project_fill_data["project_name"] = project_name + yesterday_project_fill_data["project_name"] = project_name + + today_session_name = self._fill_review_template( + review_session_template, today_project_fill_data + ) + yesterday_session_name = self._fill_review_template( + review_session_template, yesterday_project_fill_data + ) + # Skip if today's session name could not be filled + if not today_session_name: + continue + + # Find matchin review session + project_review_sessions = review_sessions_by_project_id[project_id] + todays_session = None + yesterdays_session = None + for review_session in project_review_sessions: + session_name = review_session["name"] + if session_name == today_session_name: + todays_session = review_session + break + elif session_name == yesterday_session_name: + yesterdays_session = review_session + + # Skip if today's session already exist + if todays_session is not None: + self.log.debug(( + "Todays ReviewSession \"{}\"" + " in project \"{}\" already exists" + ).format(today_session_name, project_name)) + continue + + # Check if there is yesterday's session and is empty + # - in that case just rename it + if ( + yesterdays_session is not None + and len(yesterdays_session["review_session_objects"]) == 0 + ): + self.log.debug(( + "Renaming yesterdays empty review session \"{}\" to \"{}\"" + " in project \"{}\"" + ).format( + yesterday_session_name, today_session_name, project_name + )) + yesterdays_session["name"] = today_session_name + session.commit() + continue + + # Create new review session with new name + self.log.debug(( + "Creating new review session \"{}\" in project \"{}\"" + ).format(today_session_name, project_name)) + session.create("ReviewSession", { + "project_id": project_id, + "name": today_session_name + }) + session.commit() + + def _get_action_settings(self, project_names_by_id): + settings_by_project_id = {} + for project_id, project_name in project_names_by_id.items(): + project_settings = get_project_settings(project_name) + action_settings = self._extract_action_settings(project_settings) + settings_by_project_id[project_id] = action_settings + return settings_by_project_id + + def _extract_action_settings(self, project_settings): + return ( + project_settings + .get("ftrack", {}) + .get(self.settings_frack_subkey, {}) + .get(self.settings_key) + ) or {} + + def _fill_review_template(self, template, data): + output = None + try: + output = template.format(**data) + except Exception: + self.log.warning( + ( + "Failed to fill review session template {} with data {}" + ).format(template, data), + exc_info=True + ) + return output + + +def register(session): + '''Register plugin. Called when used as an plugin.''' + CreateDailyReviewSessionServerAction(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py index 2e55be2743..332648cd02 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_server/action_prepare_project.py @@ -1,9 +1,8 @@ import json +import copy -from avalon.api import AvalonMongoDB -from openpype.api import ProjectSettings -from openpype.lib import create_project -from openpype.settings import SaveWarningExc +from openpype.client import get_project, create_project +from openpype.settings import ProjectSettings, SaveWarningExc from openpype_modules.ftrack.lib import ( ServerAction, @@ -363,12 +362,8 @@ class PrepareProjectServer(ServerAction): project_name = project_entity["full_name"] # Try to find project document - dbcon = AvalonMongoDB() - dbcon.install() - dbcon.Session["AVALON_PROJECT"] = project_name - project_doc = dbcon.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) + # Create project if is not available # - creation is required to be able set project anatomy and attributes if not project_doc: @@ -376,9 +371,11 @@ class PrepareProjectServer(ServerAction): self.log.info("Creating project \"{} [{}]\"".format( project_name, project_code )) - create_project(project_name, project_code, dbcon=dbcon) - - dbcon.uninstall() + create_project(project_name, project_code) + self.trigger_event( + "openpype.project.created", + {"project_name": project_name} + ) project_settings = ProjectSettings(project_name) project_anatomy_settings = project_settings["project_anatomy"] @@ -406,6 +403,10 @@ class PrepareProjectServer(ServerAction): self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value)) session.commit() + event_data = copy.deepcopy(in_data) + event_data["project_name"] = project_name + self.trigger_event("openpype.project.prepared", event_data) + return True diff --git a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py index 868bbb8463..1209375f82 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/action_push_frame_values_to_task.py @@ -356,7 +356,7 @@ class PushHierValuesToNonHier(ServerAction): values_per_entity_id[entity_id][key] = None values = query_custom_attributes( - session, all_ids_with_parents, hier_attr_ids, True + session, hier_attr_ids, all_ids_with_parents, True ) for item in values: entity_id = item["entity_id"] diff --git a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py index 58f79e8a2b..df9147bdf7 100644 --- a/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/action_sync_to_avalon.py @@ -1,7 +1,8 @@ import time import sys import json -import traceback + +import ftrack_api from openpype_modules.ftrack.lib import ServerAction from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory @@ -180,6 +181,13 @@ class SyncToAvalonServer(ServerAction): "* Total time: {}".format(time_7 - time_start) ) + if self.entities_factory.project_created: + event = ftrack_api.event.base.Event( + topic="openpype.project.created", + data={"project_name": project_name} + ) + self.session.event_hub.publish(event) + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( diff --git a/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py new file mode 100644 index 0000000000..d160b7200d --- /dev/null +++ b/openpype/modules/ftrack/event_handlers_server/action_tranfer_hierarchical_values.py @@ -0,0 +1,346 @@ +import copy +import json +import collections + +import ftrack_api + +from openpype_modules.ftrack.lib import ( + ServerAction, + statics_icon, +) +from openpype_modules.ftrack.lib.avalon_sync import create_chunks + + +class TransferHierarchicalValues(ServerAction): + """Transfer values across hierarhcical attributes. + + Aalso gives ability to convert types meanwhile. That is limited to + conversions between numbers and strings + - int <-> float + - in, float -> string + """ + + identifier = "transfer.hierarchical.values" + label = "OpenPype Admin" + variant = "- Transfer values between 2 custom attributes" + description = ( + "Move values from a hierarchical attribute to" + " second hierarchical attribute." + ) + icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") + + all_project_entities_query = ( + "select id, name, parent_id, link" + " from TypedContext where project_id is \"{}\"" + ) + cust_attr_query = ( + "select value, entity_id from CustomAttributeValue" + " where entity_id in ({}) and configuration_id is \"{}\"" + ) + settings_key = "transfer_values_of_hierarchical_attributes" + + def discover(self, session, entities, event): + """Show anywhere.""" + + return self.valid_roles(session, entities, event) + + def _selection_interface(self, session, event_values=None): + title = "Transfer hierarchical values" + + attr_confs = session.query( + ( + "select id, key from CustomAttributeConfiguration" + " where is_hierarchical is true" + ) + ).all() + attr_items = [] + for attr_conf in attr_confs: + attr_items.append({ + "value": attr_conf["id"], + "label": attr_conf["key"] + }) + + if len(attr_items) < 2: + return { + "title": title, + "items": [{ + "type": "label", + "value": ( + "Didn't found custom attributes" + " that can be transfered." + ) + }] + } + + attr_items = sorted(attr_items, key=lambda item: item["label"]) + items = [] + item_splitter = {"type": "label", "value": "---"} + items.append({ + "type": "label", + "value": ( + "

Please select source and destination" + " Custom attribute

" + ) + }) + items.append({ + "type": "label", + "value": ( + "WARNING: This will take affect for all projects!" + ) + }) + if event_values: + items.append({ + "type": "label", + "value": ( + "Note: Please select 2 different custom attributes." + ) + }) + + items.append(item_splitter) + + src_item = { + "type": "enumerator", + "label": "Source", + "name": "src_attr_id", + "data": copy.deepcopy(attr_items) + } + dst_item = { + "type": "enumerator", + "label": "Destination", + "name": "dst_attr_id", + "data": copy.deepcopy(attr_items) + } + delete_item = { + "type": "boolean", + "name": "delete_dst_attr_first", + "label": "Delete first", + "value": False + } + if event_values: + src_item["value"] = event_values["src_attr_id"] + dst_item["value"] = event_values["dst_attr_id"] + delete_item["value"] = event_values["delete_dst_attr_first"] + + items.append(src_item) + items.append(dst_item) + items.append(item_splitter) + items.append({ + "type": "label", + "value": ( + "WARNING: All values from destination" + " Custom Attribute will be removed if this is enabled." + ) + }) + items.append(delete_item) + + return { + "title": title, + "items": items + } + + def interface(self, session, entities, event): + if event["data"].get("values", {}): + return None + + return self._selection_interface(session) + + def launch(self, session, entities, event): + values = event["data"].get("values", {}) + if not values: + return None + src_attr_id = values["src_attr_id"] + dst_attr_id = values["dst_attr_id"] + delete_dst_values = values["delete_dst_attr_first"] + + if not src_attr_id or not dst_attr_id: + self.log.info("Attributes were not filled. Nothing to do.") + return { + "success": True, + "message": "Nothing to do" + } + + if src_attr_id == dst_attr_id: + self.log.info(( + "Same attributes were selected {}, {}." + " Showing interface again." + ).format(src_attr_id, dst_attr_id)) + return self._selection_interface(session, values) + + # Query custom attrbutes + src_conf = session.query(( + "select id from CustomAttributeConfiguration where id is {}" + ).format(src_attr_id)).one() + dst_conf = session.query(( + "select id from CustomAttributeConfiguration where id is {}" + ).format(dst_attr_id)).one() + src_type_name = src_conf["type"]["name"] + dst_type_name = dst_conf["type"]["name"] + # Limit conversion to + # - same type -> same type (there is no need to do conversion) + # - number -> number (int to float and back) + # - number -> str (any number can be converted to str) + src_type = None + dst_type = None + if src_type_name == "number" or src_type_name != dst_type_name: + src_type = self._get_attr_type(dst_conf) + dst_type = self._get_attr_type(dst_conf) + valid = False + # Can convert numbers + if src_type in (int, float) and dst_type in (int, float): + valid = True + # Can convert numbers to string + elif dst_type is str: + valid = True + + if not valid: + self.log.info(( + "Don't know how to properly convert" + " custom attribute types {} > {}" + ).format(src_type_name, dst_type_name)) + return { + "message": ( + "Don't know how to properly convert" + " custom attribute types {} > {}" + ).format(src_type_name, dst_type_name), + "success": False + } + + # Query source values + src_attr_values = session.query( + ( + "select value, entity_id" + " from CustomAttributeValue" + " where configuration_id is {}" + ).format(src_attr_id) + ).all() + + self.log.debug("Queried source values.") + failed_entity_ids = [] + if dst_type is not None: + self.log.debug("Converting source values to desctination type") + value_by_id = {} + for attr_value in src_attr_values: + entity_id = attr_value["entity_id"] + value = attr_value["value"] + if value is not None: + try: + if dst_type is not None: + value = dst_type(value) + value_by_id[entity_id] = value + except Exception: + failed_entity_ids.append(entity_id) + + if failed_entity_ids: + self.log.info( + "Couldn't convert some values to destination attribute" + ) + return { + "success": False, + "message": ( + "Couldn't convert some values to destination attribute" + ) + } + + # Delete destination custom attributes first + if delete_dst_values: + self.log.info("Deleting destination custom attribute values first") + self._delete_custom_attribute_values(session, dst_attr_id) + + self.log.info("Applying source values on destination custom attribute") + self._apply_values(session, value_by_id, dst_attr_id) + return True + + def _delete_custom_attribute_values(self, session, dst_attr_id): + dst_attr_values = session.query( + ( + "select configuration_id, entity_id" + " from CustomAttributeValue" + " where configuration_id is {}" + ).format(dst_attr_id) + ).all() + delete_operations = [] + for attr_value in dst_attr_values: + entity_id = attr_value["entity_id"] + configuration_id = attr_value["configuration_id"] + entity_key = collections.OrderedDict(( + ("configuration_id", configuration_id), + ("entity_id", entity_id) + )) + delete_operations.append( + ftrack_api.operation.DeleteEntityOperation( + "CustomAttributeValue", + entity_key + ) + ) + + if not delete_operations: + return + + for chunk in create_chunks(delete_operations, 500): + for operation in chunk: + session.recorded_operations.push(operation) + session.commit() + + def _apply_values(self, session, value_by_id, dst_attr_id): + dst_attr_values = session.query( + ( + "select configuration_id, entity_id" + " from CustomAttributeValue" + " where configuration_id is {}" + ).format(dst_attr_id) + ).all() + + dst_entity_ids_with_value = { + item["entity_id"] + for item in dst_attr_values + } + operations = [] + for entity_id, value in value_by_id.items(): + entity_key = collections.OrderedDict(( + ("configuration_id", dst_attr_id), + ("entity_id", entity_id) + )) + if entity_id in dst_entity_ids_with_value: + operations.append( + ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", + entity_key, + "value", + ftrack_api.symbol.NOT_SET, + value + ) + ) + else: + operations.append( + ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": value} + ) + ) + + if not operations: + return + + for chunk in create_chunks(operations, 500): + for operation in chunk: + session.recorded_operations.push(operation) + session.commit() + + def _get_attr_type(self, conf_def): + type_name = conf_def["type"]["name"] + if type_name == "text": + return str + + if type_name == "number": + config = json.loads(conf_def["config"]) + if config["isdecimal"]: + return float + return int + return None + + +def register(session): + '''Register plugin. Called when used as an plugin.''' + + TransferHierarchicalValues(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py b/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py index ecc6c95d90..8ef333effd 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py +++ b/openpype/modules/ftrack/event_handlers_server/event_first_version_status.py @@ -135,9 +135,9 @@ class FirstVersionStatus(BaseEvent): new_status = asset_version_statuses.get(found_item["status"]) if not new_status: - self.log.warning( + self.log.warning(( "AssetVersion doesn't have status `{}`." - ).format(found_item["status"]) + ).format(found_item["status"])) continue try: diff --git a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py index 0914933de4..dc76920a57 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py +++ b/openpype/modules/ftrack/event_handlers_server/event_push_frame_values_to_task.py @@ -1,10 +1,11 @@ import collections import datetime +import copy import ftrack_api from openpype_modules.ftrack.lib import ( BaseEvent, - query_custom_attributes + query_custom_attributes, ) @@ -124,10 +125,15 @@ class PushFrameValuesToTaskEvent(BaseEvent): # Separate value changes and task parent changes _entities_info = [] + added_entities = [] + added_entity_ids = set() task_parent_changes = [] for entity_info in entities_info: if entity_info["entity_type"].lower() == "task": task_parent_changes.append(entity_info) + elif entity_info.get("action") == "add": + added_entities.append(entity_info) + added_entity_ids.add(entity_info["entityId"]) else: _entities_info.append(entity_info) entities_info = _entities_info @@ -136,6 +142,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data, changed_keys_by_object_id = self.filter_changes( session, event, entities_info, interest_attributes ) + self.interesting_data_for_added( + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ) if not interesting_data and not task_parent_changes: return @@ -151,9 +164,13 @@ class PushFrameValuesToTaskEvent(BaseEvent): # - it is a complex way how to find out if interesting_data: self.process_attribute_changes( - session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ) if task_parent_changes: @@ -163,8 +180,12 @@ class PushFrameValuesToTaskEvent(BaseEvent): ) def process_task_parent_change( - self, session, object_types_by_name, task_parent_changes, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + task_parent_changes, + interest_entity_types, + interest_attributes ): """Push custom attribute values if task parent has changed. @@ -176,6 +197,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): real hierarchical value and non hierarchical custom attribute value should be set to hierarchical value. """ + # Store task ids which were created or moved under parent with entity # type defined in settings (interest_entity_types). task_ids = set() @@ -380,33 +402,49 @@ class PushFrameValuesToTaskEvent(BaseEvent): uncommited_changes = False for idx, item in enumerate(changes): new_value = item["new_value"] + old_value = item["old_value"] attr_id = item["attr_id"] entity_id = item["entity_id"] attr_key = item["attr_key"] - entity_key = collections.OrderedDict() - entity_key["configuration_id"] = attr_id - entity_key["entity_id"] = entity_id + entity_key = collections.OrderedDict(( + ("configuration_id", attr_id), + ("entity_id", entity_id) + )) self._cached_changes.append({ "attr_key": attr_key, "entity_id": entity_id, "value": new_value, "time": datetime.datetime.now() }) + old_value_is_set = ( + old_value is not ftrack_api.symbol.NOT_SET + and old_value is not None + ) if new_value is None: + if not old_value_is_set: + continue op = ftrack_api.operation.DeleteEntityOperation( "CustomAttributeValue", entity_key ) - else: + + elif old_value_is_set: op = ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", + "CustomAttributeValue", entity_key, "value", - ftrack_api.symbol.NOT_SET, + old_value, new_value ) + else: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": new_value} + ) + session.recorded_operations.push(op) self.log.info(( "Changing Custom Attribute \"{}\" to value" @@ -432,9 +470,14 @@ class PushFrameValuesToTaskEvent(BaseEvent): self.log.warning("Changing of values failed.", exc_info=True) def process_attribute_changes( - self, session, object_types_by_name, - interesting_data, changed_keys_by_object_id, - interest_entity_types, interest_attributes + self, + session, + object_types_by_name, + interesting_data, + changed_keys_by_object_id, + interest_entity_types, + interest_attributes, + added_entity_ids ): # Prepare task object id task_object_id = object_types_by_name["task"]["id"] @@ -522,15 +565,26 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id_by_task_id[task_id] = task_entity["parent_id"] self.finalize_attribute_changes( - session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ) def finalize_attribute_changes( - self, session, interesting_data, - changed_keys, attrs_by_obj_id, hier_attrs, - task_entity_ids, parent_id_by_task_id + self, + session, + interesting_data, + changed_keys, + attrs_by_obj_id, + hier_attrs, + task_entity_ids, + parent_id_by_task_id, + added_entity_ids ): attr_id_to_key = {} for attr_confs in attrs_by_obj_id.values(): @@ -550,7 +604,11 @@ class PushFrameValuesToTaskEvent(BaseEvent): attr_ids = set(attr_id_to_key.keys()) current_values_by_id = self.get_current_values( - session, attr_ids, entity_ids, task_entity_ids, hier_attrs + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ) changes = [] @@ -560,14 +618,25 @@ class PushFrameValuesToTaskEvent(BaseEvent): parent_id = entity_id values = interesting_data[parent_id] + added_entity = entity_id in added_entity_ids for attr_id, old_value in current_values.items(): + if added_entity and attr_id in hier_attrs: + continue + attr_key = attr_id_to_key.get(attr_id) if not attr_key: continue # Convert new value from string new_value = values.get(attr_key) - if new_value is not None and old_value is not None: + new_value_is_valid = ( + old_value is not ftrack_api.symbol.NOT_SET + and new_value is not None + ) + if added_entity and not new_value_is_valid: + continue + + if new_value is not None and new_value_is_valid: try: new_value = type(old_value)(new_value) except Exception: @@ -581,6 +650,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): changes.append({ "new_value": new_value, "attr_id": attr_id, + "old_value": old_value, "entity_id": entity_id, "attr_key": attr_key }) @@ -599,6 +669,7 @@ class PushFrameValuesToTaskEvent(BaseEvent): interesting_data = {} changed_keys_by_object_id = {} + for entity_info in entities_info: # Care only about changes if specific keys entity_changes = {} @@ -644,16 +715,123 @@ class PushFrameValuesToTaskEvent(BaseEvent): return interesting_data, changed_keys_by_object_id + def interesting_data_for_added( + self, + session, + added_entities, + interest_attributes, + interesting_data, + changed_keys_by_object_id + ): + if not added_entities or not interest_attributes: + return + + object_type_ids = set() + entity_ids = set() + all_entity_ids = set() + object_id_by_entity_id = {} + project_id = None + entity_ids_by_parent_id = collections.defaultdict(set) + for entity_info in added_entities: + object_id = entity_info["objectTypeId"] + entity_id = entity_info["entityId"] + object_type_ids.add(object_id) + entity_ids.add(entity_id) + object_id_by_entity_id[entity_id] = object_id + + for item in entity_info["parents"]: + entity_id = item["entityId"] + all_entity_ids.add(entity_id) + parent_id = item["parentId"] + if not parent_id: + project_id = entity_id + else: + entity_ids_by_parent_id[parent_id].add(entity_id) + + hier_attrs = self.get_hierarchical_configurations( + session, interest_attributes + ) + if not hier_attrs: + return + + hier_attrs_key_by_id = { + attr_conf["id"]: attr_conf["key"] + for attr_conf in hier_attrs + } + default_values_by_key = { + attr_conf["key"]: attr_conf["default"] + for attr_conf in hier_attrs + } + + values = query_custom_attributes( + session, list(hier_attrs_key_by_id.keys()), all_entity_ids, True + ) + values_per_entity_id = {} + for entity_id in all_entity_ids: + values_per_entity_id[entity_id] = {} + for attr_name in interest_attributes: + values_per_entity_id[entity_id][attr_name] = None + + for item in values: + entity_id = item["entity_id"] + key = hier_attrs_key_by_id[item["configuration_id"]] + values_per_entity_id[entity_id][key] = item["value"] + + fill_queue = collections.deque() + fill_queue.append((project_id, default_values_by_key)) + while fill_queue: + item = fill_queue.popleft() + entity_id, values_by_key = item + entity_values = values_per_entity_id[entity_id] + new_values_by_key = copy.deepcopy(values_by_key) + for key, value in values_by_key.items(): + current_value = entity_values[key] + if current_value is None: + entity_values[key] = value + else: + new_values_by_key[key] = current_value + + for child_id in entity_ids_by_parent_id[entity_id]: + fill_queue.append((child_id, new_values_by_key)) + + for entity_id in entity_ids: + entity_changes = {} + for key, value in values_per_entity_id[entity_id].items(): + if value is not None: + entity_changes[key] = value + + if not entity_changes: + continue + + interesting_data[entity_id] = entity_changes + object_id = object_id_by_entity_id[entity_id] + if object_id not in changed_keys_by_object_id: + changed_keys_by_object_id[object_id] = set() + changed_keys_by_object_id[object_id] |= set(entity_changes.keys()) + def get_current_values( - self, session, attr_ids, entity_ids, task_entity_ids, hier_attrs + self, + session, + attr_ids, + entity_ids, + task_entity_ids, + hier_attrs ): current_values_by_id = {} if not attr_ids or not entity_ids: return current_values_by_id + for entity_id in entity_ids: + current_values_by_id[entity_id] = {} + for attr_id in attr_ids: + current_values_by_id[entity_id][attr_id] = ( + ftrack_api.symbol.NOT_SET + ) + values = query_custom_attributes( session, attr_ids, entity_ids, True ) + for item in values: entity_id = item["entity_id"] attr_id = item["configuration_id"] @@ -699,6 +877,18 @@ class PushFrameValuesToTaskEvent(BaseEvent): output[obj_id][attr["key"]] = attr["id"] return output, hiearchical + def get_hierarchical_configurations(self, session, interest_attributes): + hier_attr_query = ( + "select id, key, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key in ({}) and is_hierarchical is true" + ) + if not interest_attributes: + return [] + return list(session.query(hier_attr_query.format( + self.join_query_keys(interest_attributes), + )).all()) + def register(session): PushFrameValuesToTaskEvent(session).register() diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py index 9610e7f5de..ae70c6756f 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_links.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_links.py @@ -1,7 +1,7 @@ from pymongo import UpdateOne from bson.objectid import ObjectId -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import ( CUST_ATTR_ID_KEY, diff --git a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py index 46c333c4c4..0058a428e3 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_server/event_sync_to_avalon.py @@ -12,8 +12,14 @@ from pymongo import UpdateOne import arrow import ftrack_api -from avalon import schema -from avalon.api import AvalonMongoDB +from openpype.client import ( + get_project, + get_assets, + get_archived_assets, + get_asset_ids_with_subsets +) +from openpype.client.operations import CURRENT_ASSET_DOC_SCHEMA +from openpype.pipeline import AvalonMongoDB, schema from openpype_modules.ftrack.lib import ( get_openpype_attr, @@ -30,7 +36,6 @@ from openpype_modules.ftrack.lib.avalon_sync import ( convert_to_fps, InvalidFpsValue ) -from openpype.lib import CURRENT_DOC_SCHEMAS class SyncToAvalonEvent(BaseEvent): @@ -150,12 +155,11 @@ class SyncToAvalonEvent(BaseEvent): @property def avalon_entities(self): if self._avalon_ents is None: + project_name = self.cur_project["full_name"] self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ( - self.cur_project["full_name"] - ) - avalon_project = self.dbcon.find_one({"type": "project"}) - avalon_entities = list(self.dbcon.find({"type": "asset"})) + self.dbcon.Session["AVALON_PROJECT"] = project_name + avalon_project = get_project(project_name) + avalon_entities = list(get_assets(project_name)) self._avalon_ents = (avalon_project, avalon_entities) return self._avalon_ents @@ -285,28 +289,21 @@ class SyncToAvalonEvent(BaseEvent): self._avalon_ents_by_ftrack_id[ftrack_id] = doc @property - def avalon_subsets_by_parents(self): - if self._avalon_subsets_by_parents is None: - self._avalon_subsets_by_parents = collections.defaultdict(list) - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ( - self.cur_project["full_name"] + def avalon_asset_ids_with_subsets(self): + if self._avalon_asset_ids_with_subsets is None: + project_name = self.cur_project["full_name"] + self._avalon_asset_ids_with_subsets = get_asset_ids_with_subsets( + project_name ) - for subset in self.dbcon.find({"type": "subset"}): - self._avalon_subsets_by_parents[subset["parent"]].append( - subset - ) - return self._avalon_subsets_by_parents + + return self._avalon_asset_ids_with_subsets @property def avalon_archived_by_id(self): if self._avalon_archived_by_id is None: self._avalon_archived_by_id = {} - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ( - self.cur_project["full_name"] - ) - for asset in self.dbcon.find({"type": "archived_asset"}): + project_name = self.cur_project["full_name"] + for asset in get_archived_assets(project_name): self._avalon_archived_by_id[asset["_id"]] = asset return self._avalon_archived_by_id @@ -328,7 +325,7 @@ class SyncToAvalonEvent(BaseEvent): avalon_project, avalon_entities = self.avalon_entities self._changeability_by_mongo_id[avalon_project["_id"]] = False self._bubble_changeability( - list(self.avalon_subsets_by_parents.keys()) + list(self.avalon_asset_ids_with_subsets) ) return self._changeability_by_mongo_id @@ -450,14 +447,9 @@ class SyncToAvalonEvent(BaseEvent): if not entity: # if entity is not found then it is subset without parent if entity_id in unchangeable_ids: - _subset_ids = [ - str(sub["_id"]) for sub in - self.avalon_subsets_by_parents[entity_id] - ] - joined_subset_ids = "| ".join(_subset_ids) self.log.warning(( - "Parent <{}> for subsets <{}> does not exist" - ).format(str(entity_id), joined_subset_ids)) + "Parent <{}> with subsets does not exist" + ).format(str(entity_id))) else: self.log.warning(( "In avalon are entities without valid parents that" @@ -484,7 +476,7 @@ class SyncToAvalonEvent(BaseEvent): self._avalon_ents_by_parent_id = None self._avalon_ents_by_ftrack_id = None self._avalon_ents_by_name = None - self._avalon_subsets_by_parents = None + self._avalon_asset_ids_with_subsets = None self._changeability_by_mongo_id = None self._avalon_archived_by_id = None self._avalon_archived_by_name = None @@ -705,13 +697,22 @@ class SyncToAvalonEvent(BaseEvent): continue auto_sync = changes[CUST_ATTR_AUTO_SYNC]["new"] - if auto_sync == "1": + turned_on = auto_sync == "1" + ft_project = self.cur_project + username = self._get_username(session, event) + message = ( + "Auto sync was turned {} for project \"{}\" by \"{}\"." + ).format( + "on" if turned_on else "off", + ft_project["full_name"], + username + ) + if turned_on: + message += " Triggering syncToAvalon action." + self.log.debug(message) + + if turned_on: # Trigger sync to avalon action if auto sync was turned on - ft_project = self.cur_project - self.log.debug(( - "Auto sync was turned on for project <{}>." - " Triggering syncToAvalon action." - ).format(ft_project["full_name"])) selection = [{ "entityId": ft_project["id"], "entityType": "show" @@ -859,6 +860,26 @@ class SyncToAvalonEvent(BaseEvent): self.report() return True + def _get_username(self, session, event): + username = "Unknown" + event_source = event.get("source") + if not event_source: + return username + user_info = event_source.get("user") + if not user_info: + return username + user_id = user_info.get("id") + if not user_id: + return username + + user_entity = session.query( + "User where id is {}".format(user_id) + ).first() + if user_entity: + username = user_entity["username"] or username + return username + + def process_removed(self): """ Handles removed entities (not removed tasks - handle separately). @@ -952,7 +973,7 @@ class SyncToAvalonEvent(BaseEvent): except Exception: # TODO logging # TODO report - self.process_session.rolback() + self.process_session.rollback() ent_path_items = [self.cur_project["full_name"]] ent_path_items.extend([ par for par in avalon_entity["data"]["parents"] @@ -995,7 +1016,7 @@ class SyncToAvalonEvent(BaseEvent): except Exception: # TODO logging # TODO report - self.process_session.rolback() + self.process_session.rollback() error_msg = ( "Couldn't update custom attributes after recreation" " of entity in Ftrack" @@ -1215,7 +1236,7 @@ class SyncToAvalonEvent(BaseEvent): "_id": mongo_id, "name": name, "type": "asset", - "schema": CURRENT_DOC_SCHEMAS["asset"], + "schema": CURRENT_ASSET_DOC_SCHEMA, "parent": proj["_id"], "data": { "ftrackId": ftrack_ent["id"], @@ -1317,7 +1338,7 @@ class SyncToAvalonEvent(BaseEvent): try: self.process_session.commit() except Exception: - self.process_session.rolback() + self.process_session.rollback() # TODO logging # TODO report error_msg = ( diff --git a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py index 96243c8c36..c4e48b92f0 100644 --- a/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py +++ b/openpype/modules/ftrack/event_handlers_server/event_user_assigment.py @@ -1,14 +1,11 @@ -import os import re import subprocess +from openpype.client import get_asset_by_id, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseEvent from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from avalon.api import AvalonMongoDB - -from bson.objectid import ObjectId - -from openpype.api import Anatomy, get_project_settings class UserAssigmentEvent(BaseEvent): @@ -37,8 +34,6 @@ class UserAssigmentEvent(BaseEvent): 3) path to publish files of task user was (de)assigned to """ - db_con = AvalonMongoDB() - def error(self, *err): for e in err: self.log.error(e) @@ -102,26 +97,16 @@ class UserAssigmentEvent(BaseEvent): :rtype: dict """ parent = task['parent'] - self.db_con.install() - self.db_con.Session['AVALON_PROJECT'] = task['project']['full_name'] - + project_name = task["project"]["full_name"] avalon_entity = None parent_id = parent['custom_attributes'].get(CUST_ATTR_ID_KEY) if parent_id: - parent_id = ObjectId(parent_id) - avalon_entity = self.db_con.find_one({ - '_id': parent_id, - 'type': 'asset' - }) + avalon_entity = get_asset_by_id(project_name, parent_id) if not avalon_entity: - avalon_entity = self.db_con.find_one({ - 'type': 'asset', - 'name': parent['name'] - }) + avalon_entity = get_asset_by_name(project_name, parent["name"]) if not avalon_entity: - self.db_con.uninstall() msg = 'Entity "{}" not found in avalon database'.format( parent['name'] ) @@ -130,7 +115,6 @@ class UserAssigmentEvent(BaseEvent): 'success': False, 'message': msg } - self.db_con.uninstall() return avalon_entity def _get_hierarchy(self, asset): @@ -148,7 +132,7 @@ class UserAssigmentEvent(BaseEvent): """ Get data to fill template from task - .. seealso:: :mod:`openpype.api.Anatomy` + .. seealso:: :mod:`openpype.pipeline.Anatomy` :param task: Task entity :type task: dict diff --git a/openpype/modules/ftrack/event_handlers_user/action_applications.py b/openpype/modules/ftrack/event_handlers_user/action_applications.py index 48a0dea006..102f04c956 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_applications.py +++ b/openpype/modules/ftrack/event_handlers_user/action_applications.py @@ -1,6 +1,6 @@ import os -from uuid import uuid4 +from openpype.client import get_project from openpype_modules.ftrack.lib import BaseAction from openpype.lib.applications import ( ApplicationManager, @@ -8,7 +8,6 @@ from openpype.lib.applications import ( ApplictionExecutableNotFound, CUSTOM_LAUNCH_APP_GROUPS ) -from avalon.api import AvalonMongoDB class AppplicationsAction(BaseAction): @@ -26,7 +25,6 @@ class AppplicationsAction(BaseAction): super(AppplicationsAction, self).__init__(*args, **kwargs) self.application_manager = ApplicationManager() - self.dbcon = AvalonMongoDB() @property def discover_identifier(self): @@ -111,12 +109,7 @@ class AppplicationsAction(BaseAction): if avalon_project_doc is None: ft_project = self.get_project_from_entity(entity) project_name = ft_project["full_name"] - if not self.dbcon.is_installed(): - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = project_name - avalon_project_doc = self.dbcon.find_one({ - "type": "project" - }) or False + avalon_project_doc = get_project(project_name) or False event["data"]["avalon_project_doc"] = avalon_project_doc if not avalon_project_doc: diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py index 88dc8213bd..c19cfd1502 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_cust_attrs.py @@ -18,7 +18,7 @@ from openpype_modules.ftrack.lib import ( tool_definitions_from_app_manager ) -from openpype.api import get_system_settings +from openpype.settings import get_system_settings from openpype.lib import ApplicationManager """ @@ -140,9 +140,9 @@ class CustomAttributes(BaseAction): identifier = 'create.update.attributes' #: Action label. label = "OpenPype Admin" - variant = '- Create/Update Avalon Attributes' + variant = '- Create/Update Custom Attributes' #: Action description. - description = 'Creates Avalon/Mongo ID for double check' + description = 'Creates required custom attributes in ftrack' icon = statics_icon("ftrack", "action_icons", "OpenPypeAdmin.svg") settings_key = "create_update_attributes" diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py index d15a865124..9806f83773 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_folders.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_folders.py @@ -1,11 +1,8 @@ import os +import collections +import copy +from openpype.pipeline import Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon -from avalon import lib as avalonlib -from openpype.api import ( - Anatomy, - get_project_settings -) -from openpype.lib import ApplicationManager class CreateFolders(BaseAction): @@ -14,55 +11,59 @@ class CreateFolders(BaseAction): icon = statics_icon("ftrack", "action_icons", "CreateFolders.svg") def discover(self, session, entities, event): - if len(entities) != 1: - return False - - not_allowed = ["assetversion", "project"] - if entities[0].entity_type.lower() in not_allowed: - return False - - return True + for entity_item in event["data"]["selection"]: + if entity_item.get("entityType").lower() in ("task", "show"): + return True + return False def interface(self, session, entities, event): if event["data"].get("values", {}): return - entity = entities[0] - without_interface = True - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - without_interface = False + + with_interface = False + for entity in entities: + if entity.entity_type.lower() != "task": + with_interface = True break - self.without_interface = without_interface - if without_interface: + + if "values" not in event["data"]: + event["data"]["values"] = {} + + event["data"]["values"]["with_interface"] = with_interface + if not with_interface: return + title = "Create folders" entity_name = entity["name"] msg = ( "

Do you want create folders also" - " for all children of \"{}\"?

" + " for all children of your selection?" ) if entity.entity_type.lower() == "project": entity_name = entity["full_name"] msg = msg.replace(" also", "") msg += "

(Project root won't be created if not checked)

" - items = [] - item_msg = { - "type": "label", - "value": msg.format(entity_name) - } - item_label = { - "type": "label", - "value": "With all chilren entities" - } - item = { - "name": "children_included", - "type": "boolean", - "value": False - } - items.append(item_msg) - items.append(item_label) - items.append(item) + items = [ + { + "type": "label", + "value": msg.format(entity_name) + }, + { + "type": "label", + "value": "With all chilren entities" + }, + { + "name": "children_included", + "type": "boolean", + "value": False + }, + { + "type": "hidden", + "name": "with_interface", + "value": with_interface + } + ] return { "items": items, @@ -71,30 +72,47 @@ class CreateFolders(BaseAction): def launch(self, session, entities, event): '''Callback method for custom action.''' + + if "values" not in event["data"]: + return + + with_interface = event["data"]["values"]["with_interface"] with_childrens = True - if self.without_interface is False: - if "values" not in event["data"]: - return + if with_interface: with_childrens = event["data"]["values"]["children_included"] - entity = entities[0] - if entity.entity_type.lower() == "project": - proj = entity - else: - proj = entity["project"] - project_name = proj["full_name"] - project_code = proj["name"] + filtered_entities = [] + for entity in entities: + low_context_type = entity["context_type"].lower() + if low_context_type in ("task", "show"): + if not with_childrens and low_context_type == "show": + continue + filtered_entities.append(entity) - if entity.entity_type.lower() == 'project' and with_childrens is False: + if not filtered_entities: return { - 'success': True, - 'message': 'Nothing was created' + "success": True, + "message": 'Nothing was created' } - all_entities = [] - all_entities.append(entity) - if with_childrens: - all_entities = self.get_notask_children(entity) + project_entity = self.get_project_from_entity(filtered_entities[0]) + + project_name = project_entity["full_name"] + project_code = project_entity["name"] + + task_entities = [] + other_entities = [] + self.get_all_entities( + session, entities, task_entities, other_entities + ) + hierarchy = self.get_entities_hierarchy( + session, task_entities, other_entities + ) + task_types = session.query("select id, name from Type").all() + task_type_names_by_id = { + task_type["id"]: task_type["name"] + for task_type in task_types + } anatomy = Anatomy(project_name) @@ -102,77 +120,67 @@ class CreateFolders(BaseAction): work_template = anatomy.templates for key in work_keys: work_template = work_template[key] - work_has_apps = "{app" in work_template publish_keys = ["publish", "folder"] publish_template = anatomy.templates for key in publish_keys: publish_template = publish_template[key] - publish_has_apps = "{app" in publish_template + + project_data = { + "project": { + "name": project_name, + "code": project_code + } + } collected_paths = [] - for entity in all_entities: - if entity.entity_type.lower() == "project": - continue - ent_data = { - "project": { - "name": project_name, - "code": project_code - } - } + for item in hierarchy: + parent_entity, task_entities = item - ent_data["asset"] = entity["name"] + parent_data = copy.deepcopy(project_data) - parents = entity["link"][1:-1] + parents = parent_entity["link"][1:-1] hierarchy_names = [p["name"] for p in parents] - hierarchy = "" + hierarchy = "/".join(hierarchy_names) + if hierarchy_names: - hierarchy = os.path.sep.join(hierarchy_names) - ent_data["hierarchy"] = hierarchy + parent_name = hierarchy_names[-1] + else: + parent_name = project_name - tasks_created = False - for child in entity["children"]: - if child["object_type"]["name"].lower() != "task": - continue - tasks_created = True - task_data = ent_data.copy() - task_data["task"] = child["name"] + parent_data.update({ + "asset": parent_entity["name"], + "hierarchy": hierarchy, + "parent": parent_name + }) - apps = [] - - # Template wok - if work_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, work_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, work_keys - )) - - # Template publish - if publish_has_apps: - app_data = task_data.copy() - for app in apps: - app_data["app"] = app - collected_paths.append(self.compute_template( - anatomy, app_data, publish_keys - )) - else: - collected_paths.append(self.compute_template( - anatomy, task_data, publish_keys - )) - - if not tasks_created: + if not task_entities: # create path for entity collected_paths.append(self.compute_template( - anatomy, ent_data, work_keys + anatomy, parent_data, work_keys )) collected_paths.append(self.compute_template( - anatomy, ent_data, publish_keys + anatomy, parent_data, publish_keys + )) + continue + + for task_entity in task_entities: + task_type_id = task_entity["type_id"] + task_type_name = task_type_names_by_id[task_type_id] + task_data = copy.deepcopy(parent_data) + task_data["task"] = { + "name": task_entity["name"], + "type": task_type_name + } + + # Template wok + collected_paths.append(self.compute_template( + anatomy, task_data, work_keys + )) + + # Template publish + collected_paths.append(self.compute_template( + anatomy, task_data, publish_keys )) if len(collected_paths) == 0: @@ -193,14 +201,65 @@ class CreateFolders(BaseAction): "message": "Successfully created project folders." } - def get_notask_children(self, entity): + def get_all_entities( + self, session, entities, task_entities, other_entities + ): + if not entities: + return + + no_task_entities = [] + for entity in entities: + if entity.entity_type.lower() == "task": + task_entities.append(entity) + else: + no_task_entities.append(entity) + + if not no_task_entities: + return task_entities + + other_entities.extend(no_task_entities) + + no_task_entity_ids = [entity["id"] for entity in no_task_entities] + next_entities = session.query(( + "select id, parent_id" + " from TypedContext where parent_id in ({})" + ).format(self.join_query_keys(no_task_entity_ids))).all() + + self.get_all_entities( + session, next_entities, task_entities, other_entities + ) + + def get_entities_hierarchy(self, session, task_entities, other_entities): + task_entity_ids = [entity["id"] for entity in task_entities] + full_task_entities = session.query(( + "select id, name, type_id, parent_id" + " from TypedContext where id in ({})" + ).format(self.join_query_keys(task_entity_ids))) + task_entities_by_parent_id = collections.defaultdict(list) + for entity in full_task_entities: + parent_id = entity["parent_id"] + task_entities_by_parent_id[parent_id].append(entity) + output = [] - if entity.entity_type.lower() == "task": + if not task_entities_by_parent_id: return output - output.append(entity) - for child in entity["children"]: - output.extend(self.get_notask_children(child)) + other_ids = set() + for entity in other_entities: + other_ids.add(entity["id"]) + other_ids |= set(task_entities_by_parent_id.keys()) + + parent_entities = session.query(( + "select id, name from TypedContext where id in ({})" + ).format(self.join_query_keys(other_ids))).all() + + for parent_entity in parent_entities: + parent_id = parent_entity["id"] + output.append(( + parent_entity, + task_entities_by_parent_id[parent_id] + )) + return output def compute_template(self, anatomy, data, anatomy_keys): diff --git a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py index 94f359c317..7c896570b1 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py +++ b/openpype/modules/ftrack/event_handlers_user/action_create_project_structure.py @@ -1,9 +1,10 @@ -import os import re -import json +from openpype.pipeline.project_folders import ( + get_project_basic_paths, + create_project_folders, +) from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype.api import get_project_basic_paths, create_project_folders class CreateProjectFolders(BaseAction): @@ -83,9 +84,14 @@ class CreateProjectFolders(BaseAction): } # Invoking OpenPype API to create the project folders - create_project_folders(basic_paths, project_name) + create_project_folders(project_name, basic_paths) self.create_ftrack_entities(basic_paths, project_entity) + self.trigger_event( + "openpype.project.structure.created", + {"project_name": project_name} + ) + except Exception as exc: self.log.warning("Creating of structure crashed.", exc_info=True) session.rollback() diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py index 94385a36c5..03d029b0c1 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_asset.py @@ -3,7 +3,9 @@ import uuid from datetime import datetime from bson.objectid import ObjectId -from avalon.api import AvalonMongoDB + +from openpype.client import get_assets, get_subsets +from openpype.pipeline import AvalonMongoDB from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks @@ -90,10 +92,8 @@ class DeleteAssetSubset(BaseAction): continue ftrack_id = entity.get("entityId") - if not ftrack_id: - continue - - ftrack_ids.append(ftrack_id) + if ftrack_id: + ftrack_ids.append(ftrack_id) if project_in_selection: msg = "It is not possible to use this action on project entity." @@ -119,48 +119,51 @@ class DeleteAssetSubset(BaseAction): "message": "Invalid selection for this action (Bug)" } - if entities[0].entity_type.lower() == "project": - project = entities[0] - else: - project = entities[0]["project"] - + project = self.get_project_from_entity(entities[0], session) project_name = project["full_name"] self.dbcon.Session["AVALON_PROJECT"] = project_name - selected_av_entities = list(self.dbcon.find({ - "type": "asset", - "data.ftrackId": {"$in": ftrack_ids} - })) + asset_docs = list(get_assets( + project_name, + fields=["_id", "name", "data.ftrackId", "data.parents"] + )) + selected_av_entities = [] + found_ftrack_ids = set() + asset_docs_by_name = collections.defaultdict(list) + for asset_doc in asset_docs: + ftrack_id = asset_doc["data"].get("ftrackId") + if ftrack_id: + found_ftrack_ids.add(ftrack_id) + if ftrack_id in entity_mapping: + selected_av_entities.append(asset_doc) + + asset_name = asset_doc["name"] + asset_docs_by_name[asset_name].append(asset_doc) + found_without_ftrack_id = {} - if len(selected_av_entities) != len(ftrack_ids): - found_ftrack_ids = [ - ent["data"]["ftrackId"] for ent in selected_av_entities - ] - for ftrack_id, entity in entity_mapping.items(): - if ftrack_id in found_ftrack_ids: + for ftrack_id, entity in entity_mapping.items(): + if ftrack_id in found_ftrack_ids: + continue + + av_ents_by_name = asset_docs_by_name[entity["name"]] + if not av_ents_by_name: + continue + + ent_path_items = [ent["name"] for ent in entity["link"]] + end_index = len(ent_path_items) - 1 + parents = ent_path_items[1:end_index:] + # TODO we should say to user that + # few of them are missing in avalon + for av_ent in av_ents_by_name: + if av_ent["data"]["parents"] != parents: continue - av_ents_by_name = list(self.dbcon.find({ - "type": "asset", - "name": entity["name"] - })) - if not av_ents_by_name: - continue - - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - # TODO we should say to user that - # few of them are missing in avalon - for av_ent in av_ents_by_name: - if av_ent["data"]["parents"] != parents: - continue - - # TODO we should say to user that found entity - # with same name does not match same ftrack id? - if "ftrackId" not in av_ent["data"]: - selected_av_entities.append(av_ent) - found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id - break + # TODO we should say to user that found entity + # with same name does not match same ftrack id? + if "ftrackId" not in av_ent["data"]: + selected_av_entities.append(av_ent) + found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id + break if not selected_av_entities: return { @@ -205,10 +208,7 @@ class DeleteAssetSubset(BaseAction): items.append(id_item) asset_ids = [ent["_id"] for ent in selected_av_entities] - subsets_for_selection = self.dbcon.find({ - "type": "subset", - "parent": {"$in": asset_ids} - }) + subsets_for_selection = get_subsets(project_name, asset_ids=asset_ids) asset_ending = "" if len(selected_av_entities) > 1: @@ -458,13 +458,9 @@ class DeleteAssetSubset(BaseAction): if len(assets_to_delete) > 0: map_av_ftrack_id = spec_data["without_ftrack_id"] # Prepare data when deleting whole avalon asset - avalon_assets = self.dbcon.find( - {"type": "asset"}, - { - "_id": 1, - "data.visualParent": 1, - "data.ftrackId": 1 - } + avalon_assets = get_assets( + project_name, + fields=["_id", "data.visualParent", "data.ftrackId"] ) avalon_assets_by_parent = collections.defaultdict(list) for asset in avalon_assets: diff --git a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py index 5871646b20..c543dc8834 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delete_old_versions.py @@ -5,10 +5,18 @@ import uuid import clique from pymongo import UpdateOne -from avalon.api import AvalonMongoDB - -from openpype.api import Anatomy -from openpype.lib import StringTemplate, TemplateUnsolved +from openpype.client import ( + get_assets, + get_subsets, + get_versions, + get_representations +) +from openpype.lib import ( + StringTemplate, + TemplateUnsolved, + format_file_size, +) +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -130,13 +138,6 @@ class DeleteOldVersions(BaseAction): "title": self.inteface_title } - def sizeof_fmt(self, num, suffix='B'): - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) - def launch(self, session, entities, event): values = event["data"].get("values") if not values: @@ -198,10 +199,9 @@ class DeleteOldVersions(BaseAction): self.log.debug("Project is set to {}".format(project_name)) # Get Assets from avalon database - assets = list(self.dbcon.find({ - "type": "asset", - "name": {"$in": avalon_asset_names} - })) + assets = list( + get_assets(project_name, asset_names=avalon_asset_names) + ) asset_id_to_name_map = { asset["_id"]: asset["name"] for asset in assets } @@ -210,10 +210,9 @@ class DeleteOldVersions(BaseAction): self.log.debug("Collected assets ({})".format(len(asset_ids))) # Get Subsets - subsets = list(self.dbcon.find({ - "type": "subset", - "parent": {"$in": asset_ids} - })) + subsets = list( + get_subsets(project_name, asset_ids=asset_ids) + ) subsets_by_id = {} subset_ids = [] for subset in subsets: @@ -230,10 +229,9 @@ class DeleteOldVersions(BaseAction): self.log.debug("Collected subsets ({})".format(len(subset_ids))) # Get Versions - versions = list(self.dbcon.find({ - "type": "version", - "parent": {"$in": subset_ids} - })) + versions = list( + get_versions(project_name, subset_ids=subset_ids) + ) versions_by_parent = collections.defaultdict(list) for ent in versions: @@ -295,10 +293,9 @@ class DeleteOldVersions(BaseAction): "message": msg } - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list( + get_representations(project_name, version_ids=version_ids) + ) self.log.debug( "Collected representations to remove ({})".format(len(repres)) @@ -359,7 +356,7 @@ class DeleteOldVersions(BaseAction): dir_paths, file_paths_by_dir, delete=False ) - msg = "Total size of files: " + self.sizeof_fmt(size) + msg = "Total size of files: {}".format(format_file_size(size)) self.log.warning(msg) @@ -430,7 +427,7 @@ class DeleteOldVersions(BaseAction): "message": msg } - msg = "Total size of files deleted: " + self.sizeof_fmt(size) + msg = "Total size of files deleted: {}".format(format_file_size(size)) self.log.warning(msg) @@ -569,7 +566,7 @@ class DeleteOldVersions(BaseAction): context["frame"] = self.sequence_splitter sequence_path = os.path.normpath( StringTemplate.format_strict_template( - context, template + template, context ) ) diff --git a/openpype/modules/ftrack/event_handlers_user/action_delivery.py b/openpype/modules/ftrack/event_handlers_user/action_delivery.py index 1f28b18900..a400c8f5f0 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_delivery.py +++ b/openpype/modules/ftrack/event_handlers_user/action_delivery.py @@ -3,23 +3,30 @@ import copy import json import collections -from bson.objectid import ObjectId - -from openpype.api import Anatomy, config +from openpype.client import ( + get_project, + get_assets, + get_subsets, + get_versions, + get_representations +) from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY -from openpype.lib.delivery import ( - path_from_representation, +from openpype_modules.ftrack.lib.custom_attributes import ( + query_custom_attributes +) +from openpype.lib.dateutils import get_datetime_data +from openpype.pipeline import Anatomy +from openpype.pipeline.load import get_representation_path_with_anatomy +from openpype.pipeline.delivery import ( get_format_dict, check_destination_path, - process_single_file, - process_sequence + deliver_single_file, + deliver_sequence, ) -from avalon.api import AvalonMongoDB class Delivery(BaseAction): - identifier = "delivery.action" label = "Delivery" description = "Deliver data to client" @@ -27,15 +34,10 @@ class Delivery(BaseAction): icon = statics_icon("ftrack", "action_icons", "Delivery.svg") settings_key = "delivery_action" - def __init__(self, *args, **kwargs): - self.db_con = AvalonMongoDB() - - super(Delivery, self).__init__(*args, **kwargs) - def discover(self, session, entities, event): is_valid = False for entity in entities: - if entity.entity_type.lower() == "assetversion": + if entity.entity_type.lower() in ("assetversion", "reviewsession"): is_valid = True break @@ -54,9 +56,7 @@ class Delivery(BaseAction): project_entity = self.get_project_from_entity(entities[0]) project_name = project_entity["full_name"] - self.db_con.install() - self.db_con.Session["AVALON_PROJECT"] = project_name - project_doc = self.db_con.find_one({"type": "project"}) + project_doc = get_project(project_name, fields=["name"]) if not project_doc: return { "success": False, @@ -65,8 +65,7 @@ class Delivery(BaseAction): ).format(project_name) } - repre_names = self._get_repre_names(entities) - self.db_con.uninstall() + repre_names = self._get_repre_names(project_name, session, entities) items.append({ "type": "hidden", @@ -195,51 +194,122 @@ class Delivery(BaseAction): "title": title } - def _get_repre_names(self, entities): - version_ids = self._get_interest_version_ids(entities) - repre_docs = self.db_con.find({ - "type": "representation", - "parent": {"$in": version_ids} - }) - return list(sorted(repre_docs.distinct("name"))) + def _get_repre_names(self, project_name, session, entities): + version_ids = self._get_interest_version_ids( + project_name, session, entities + ) + if not version_ids: + return [] + repre_docs = get_representations( + project_name, + version_ids=version_ids, + fields=["name"] + ) + repre_names = {repre_doc["name"] for repre_doc in repre_docs} + return list(sorted(repre_names)) - def _get_interest_version_ids(self, entities): - parent_ent_by_id = {} + def _get_interest_version_ids(self, project_name, session, entities): + # Extract AssetVersion entities + asset_versions = self._extract_asset_versions(session, entities) + # Prepare Asset ids + asset_ids = { + asset_version["asset_id"] + for asset_version in asset_versions + } + # Query Asset entities + assets = session.query(( + "select id, name, context_id from Asset where id in ({})" + ).format(self.join_query_keys(asset_ids))).all() + assets_by_id = { + asset["id"]: asset + for asset in assets + } + parent_ids = set() subset_names = set() version_nums = set() - for entity in entities: - asset = entity["asset"] - parent = asset["parent"] - parent_ent_by_id[parent["id"]] = parent + for asset_version in asset_versions: + asset_id = asset_version["asset_id"] + asset = assets_by_id[asset_id] - subset_name = asset["name"] - subset_names.add(subset_name) + parent_ids.add(asset["context_id"]) + subset_names.add(asset["name"]) + version_nums.add(asset_version["version"]) - version = entity["version"] - version_nums.add(version) - - asset_docs_by_ftrack_id = self._get_asset_docs(parent_ent_by_id) + asset_docs_by_ftrack_id = self._get_asset_docs( + project_name, session, parent_ids + ) subset_docs = self._get_subset_docs( - asset_docs_by_ftrack_id, subset_names, entities + project_name, + asset_docs_by_ftrack_id, + subset_names, + asset_versions, + assets_by_id ) version_docs = self._get_version_docs( - asset_docs_by_ftrack_id, subset_docs, version_nums, entities + project_name, + asset_docs_by_ftrack_id, + subset_docs, + version_nums, + asset_versions, + assets_by_id ) return [version_doc["_id"] for version_doc in version_docs] + def _extract_asset_versions(self, session, entities): + asset_version_ids = set() + review_session_ids = set() + for entity in entities: + entity_type_low = entity.entity_type.lower() + if entity_type_low == "assetversion": + asset_version_ids.add(entity["id"]) + elif entity_type_low == "reviewsession": + review_session_ids.add(entity["id"]) + + for version_id in self._get_asset_version_ids_from_review_sessions( + session, review_session_ids + ): + asset_version_ids.add(version_id) + + asset_versions = session.query(( + "select id, version, asset_id from AssetVersion where id in ({})" + ).format(self.join_query_keys(asset_version_ids))).all() + + return asset_versions + + def _get_asset_version_ids_from_review_sessions( + self, session, review_session_ids + ): + if not review_session_ids: + return set() + review_session_objects = session.query(( + "select version_id from ReviewSessionObject" + " where review_session_id in ({})" + ).format(self.join_query_keys(review_session_ids))).all() + + return { + review_session_object["version_id"] + for review_session_object in review_session_objects + } + def _get_version_docs( - self, asset_docs_by_ftrack_id, subset_docs, version_nums, entities + self, + project_name, + asset_docs_by_ftrack_id, + subset_docs, + version_nums, + asset_versions, + assets_by_id ): subset_docs_by_id = { subset_doc["_id"]: subset_doc for subset_doc in subset_docs } - version_docs = list(self.db_con.find({ - "type": "version", - "parent": {"$in": list(subset_docs_by_id.keys())}, - "name": {"$in": list(version_nums)} - })) + version_docs = list(get_versions( + project_name, + subset_ids=subset_docs_by_id.keys(), + versions=version_nums + )) version_docs_by_parent_id = collections.defaultdict(dict) for version_doc in version_docs: subset_doc = subset_docs_by_id[version_doc["parent"]] @@ -255,11 +325,13 @@ class Delivery(BaseAction): ) filtered_versions = [] - for entity in entities: - asset = entity["asset"] - - parent = asset["parent"] - asset_doc = asset_docs_by_ftrack_id[parent["id"]] + for asset_version in asset_versions: + asset_id = asset_version["asset_id"] + asset = assets_by_id[asset_id] + parent_id = asset["context_id"] + asset_doc = asset_docs_by_ftrack_id.get(parent_id) + if not asset_doc: + continue subsets_by_name = version_docs_by_parent_id.get(asset_doc["_id"]) if not subsets_by_name: @@ -270,24 +342,29 @@ class Delivery(BaseAction): if not version_docs_by_version: continue - version = entity["version"] + version = asset_version["version"] version_doc = version_docs_by_version.get(version) if version_doc: filtered_versions.append(version_doc) return filtered_versions def _get_subset_docs( - self, asset_docs_by_ftrack_id, subset_names, entities + self, + project_name, + asset_docs_by_ftrack_id, + subset_names, + asset_versions, + assets_by_id ): - asset_doc_ids = list() - for asset_doc in asset_docs_by_ftrack_id.values(): - asset_doc_ids.append(asset_doc["_id"]) - - subset_docs = list(self.db_con.find({ - "type": "subset", - "parent": {"$in": asset_doc_ids}, - "name": {"$in": list(subset_names)} - })) + asset_doc_ids = [ + asset_doc["_id"] + for asset_doc in asset_docs_by_ftrack_id.values() + ] + subset_docs = list(get_subsets( + project_name, + asset_ids=asset_doc_ids, + subset_names=subset_names + )) subset_docs_by_parent_id = collections.defaultdict(dict) for subset_doc in subset_docs: asset_id = subset_doc["parent"] @@ -295,11 +372,14 @@ class Delivery(BaseAction): subset_docs_by_parent_id[asset_id][subset_name] = subset_doc filtered_subsets = [] - for entity in entities: - asset = entity["asset"] + for asset_version in asset_versions: + asset_id = asset_version["asset_id"] + asset = assets_by_id[asset_id] - parent = asset["parent"] - asset_doc = asset_docs_by_ftrack_id[parent["id"]] + parent_id = asset["context_id"] + asset_doc = asset_docs_by_ftrack_id.get(parent_id) + if not asset_doc: + continue subsets_by_name = subset_docs_by_parent_id.get(asset_doc["_id"]) if not subsets_by_name: @@ -311,58 +391,58 @@ class Delivery(BaseAction): filtered_subsets.append(subset_doc) return filtered_subsets - def _get_asset_docs(self, parent_ent_by_id): - asset_docs = list(self.db_con.find({ - "type": "asset", - "data.ftrackId": {"$in": list(parent_ent_by_id.keys())} - })) - asset_docs_by_ftrack_id = { - asset_doc["data"]["ftrackId"]: asset_doc - for asset_doc in asset_docs - } + def _get_asset_docs(self, project_name, session, parent_ids): + asset_docs = list(get_assets( + project_name, fields=["_id", "name", "data.ftrackId"] + )) - entities_by_mongo_id = {} - entities_by_names = {} - for ftrack_id, entity in parent_ent_by_id.items(): - if ftrack_id not in asset_docs_by_ftrack_id: - parent_mongo_id = entity["custom_attributes"].get( - CUST_ATTR_ID_KEY - ) - if parent_mongo_id: - entities_by_mongo_id[ObjectId(parent_mongo_id)] = entity - else: - entities_by_names[entity["name"]] = entity + asset_docs_by_id = {} + asset_docs_by_name = {} + asset_docs_by_ftrack_id = {} + for asset_doc in asset_docs: + asset_id = str(asset_doc["_id"]) + asset_name = asset_doc["name"] + ftrack_id = asset_doc["data"].get("ftrackId") - expressions = [] - if entities_by_mongo_id: - expression = { - "type": "asset", - "_id": {"$in": list(entities_by_mongo_id.keys())} + asset_docs_by_id[asset_id] = asset_doc + asset_docs_by_name[asset_name] = asset_doc + if ftrack_id: + asset_docs_by_ftrack_id[ftrack_id] = asset_doc + + attr_def = session.query(( + "select id from CustomAttributeConfiguration where key is \"{}\"" + ).format(CUST_ATTR_ID_KEY)).first() + if attr_def is None: + return asset_docs_by_ftrack_id + + avalon_mongo_id_values = query_custom_attributes( + session, [attr_def["id"]], parent_ids, True + ) + missing_ids = set(parent_ids) + for item in avalon_mongo_id_values: + if not item["value"]: + continue + asset_id = item["value"] + entity_id = item["entity_id"] + asset_doc = asset_docs_by_id.get(asset_id) + if asset_doc: + asset_docs_by_ftrack_id[entity_id] = asset_doc + missing_ids.remove(entity_id) + + entity_ids_by_name = {} + if missing_ids: + not_found_entities = session.query(( + "select id, name from TypedContext where id in ({})" + ).format(self.join_query_keys(missing_ids))).all() + entity_ids_by_name = { + entity["name"]: entity["id"] + for entity in not_found_entities } - expressions.append(expression) - if entities_by_names: - expression = { - "type": "asset", - "name": {"$in": list(entities_by_names.keys())} - } - expressions.append(expression) - - if expressions: - if len(expressions) == 1: - filter = expressions[0] - else: - filter = {"$or": expressions} - - asset_docs = self.db_con.find(filter) - for asset_doc in asset_docs: - if asset_doc["_id"] in entities_by_mongo_id: - entity = entities_by_mongo_id[asset_doc["_id"]] - asset_docs_by_ftrack_id[entity["id"]] = asset_doc - - elif asset_doc["name"] in entities_by_names: - entity = entities_by_names[asset_doc["name"]] - asset_docs_by_ftrack_id[entity["id"]] = asset_doc + for asset_name, entity_id in entity_ids_by_name.items(): + asset_doc = asset_docs_by_name.get(asset_name) + if asset_doc: + asset_docs_by_ftrack_id[entity_id] = asset_doc return asset_docs_by_ftrack_id @@ -396,7 +476,6 @@ class Delivery(BaseAction): session.commit() try: - self.db_con.install() report = self.real_launch(session, entities, event) except Exception as exc: @@ -422,7 +501,6 @@ class Delivery(BaseAction): else: job["status"] = "failed" session.commit() - self.db_con.uninstall() if not report["success"]: self.show_interface( @@ -464,21 +542,20 @@ class Delivery(BaseAction): if not os.path.exists(location_path): os.makedirs(location_path) - self.db_con.Session["AVALON_PROJECT"] = project_name - self.log.debug("Collecting representations to process.") - version_ids = self._get_interest_version_ids(entities) - repres_to_deliver = list(self.db_con.find({ - "type": "representation", - "parent": {"$in": version_ids}, - "name": {"$in": repre_names} - })) - + version_ids = self._get_interest_version_ids( + project_name, session, entities + ) + repres_to_deliver = list(get_representations( + project_name, + representation_names=repre_names, + version_ids=version_ids + )) anatomy = Anatomy(project_name) format_dict = get_format_dict(anatomy, location_path) - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() for repre in repres_to_deliver: source_path = repre.get("data", {}).get("path") debug_msg = "Processing representation {}".format(repre["_id"]) @@ -503,7 +580,7 @@ class Delivery(BaseAction): if frame: repre["context"]["frame"] = len(str(frame)) * "#" - repre_path = path_from_representation(repre, anatomy) + repre_path = get_representation_path_with_anatomy(repre, anatomy) # TODO add backup solution where root of path from component # is replaced with root args = ( @@ -517,9 +594,9 @@ class Delivery(BaseAction): self.log ) if not frame: - process_single_file(*args) + deliver_single_file(*args) else: - process_sequence(*args) + deliver_sequence(*args) return self.report(report_items) diff --git a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py index 3888379e04..fb1cdf340e 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py +++ b/openpype/modules/ftrack/event_handlers_user/action_fill_workfile_attr.py @@ -7,14 +7,15 @@ import datetime import ftrack_api -from avalon.api import AvalonMongoDB -from openpype.api import get_project_settings -from openpype.lib import ( - get_workfile_template_key, - get_workdir_data, - Anatomy, - StringTemplate, +from openpype.client import ( + get_project, + get_assets, ) +from openpype.settings import get_project_settings, get_system_settings +from openpype.lib import StringTemplate +from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data +from openpype.pipeline.workfile import get_workfile_template_key from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import create_chunks @@ -248,10 +249,8 @@ class FillWorkfileAttributeAction(BaseAction): # Find matchin asset documents and map them by ftrack task entities # - result stored to 'asset_docs_with_task_entities' is list with # tuple `(asset document, [task entitis, ...])` - dbcon = AvalonMongoDB() - dbcon.Session["AVALON_PROJECT"] = project_name # Quety all asset documents - asset_docs = list(dbcon.find({"type": "asset"})) + asset_docs = list(get_assets(project_name)) job_entity["data"] = json.dumps({ "description": "(1/3) Asset documents queried." }) @@ -276,16 +275,21 @@ class FillWorkfileAttributeAction(BaseAction): # Keep placeholders in the template unfilled host_name = "{app}" extension = "{ext}" - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) project_settings = get_project_settings(project_name) + system_settings = get_system_settings() anatomy = Anatomy(project_name) templates_by_key = {} operations = [] for asset_doc, task_entities in asset_docs_with_task_entities: for task_entity in task_entities: - workfile_data = get_workdir_data( - project_doc, asset_doc, task_entity["name"], host_name + workfile_data = get_template_data( + project_doc, + asset_doc, + task_entity["name"], + host_name, + system_settings ) # Use version 1 for each workfile workfile_data["version"] = 1 @@ -293,7 +297,10 @@ class FillWorkfileAttributeAction(BaseAction): task_type = workfile_data["task"]["type"] template_key = get_workfile_template_key( - task_type, host_name, project_settings=project_settings + task_type, + host_name, + project_name, + project_settings=project_settings ) if template_key in templates_by_key: template = templates_by_key[template_key] diff --git a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py index 3759bc81ac..e825198180 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py +++ b/openpype/modules/ftrack/event_handlers_user/action_prepare_project.py @@ -1,9 +1,8 @@ import json +import copy -from avalon.api import AvalonMongoDB -from openpype.api import ProjectSettings -from openpype.lib import create_project -from openpype.settings import SaveWarningExc +from openpype.client import get_project, create_project +from openpype.settings import ProjectSettings, SaveWarningExc from openpype_modules.ftrack.lib import ( BaseAction, @@ -389,12 +388,8 @@ class PrepareProjectLocal(BaseAction): project_name = project_entity["full_name"] # Try to find project document - dbcon = AvalonMongoDB() - dbcon.install() - dbcon.Session["AVALON_PROJECT"] = project_name - project_doc = dbcon.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) + # Create project if is not available # - creation is required to be able set project anatomy and attributes if not project_doc: @@ -402,9 +397,11 @@ class PrepareProjectLocal(BaseAction): self.log.info("Creating project \"{} [{}]\"".format( project_name, project_code )) - create_project(project_name, project_code, dbcon=dbcon) - - dbcon.uninstall() + create_project(project_name, project_code) + self.trigger_event( + "openpype.project.created", + {"project_name": project_name} + ) project_settings = ProjectSettings(project_name) project_anatomy_settings = project_settings["project_anatomy"] @@ -439,6 +436,10 @@ class PrepareProjectLocal(BaseAction): self.process_identifier() ) self.trigger_action(trigger_identifier, event) + + event_data = copy.deepcopy(in_data) + event_data["project_name"] = project_name + self.trigger_event("openpype.project.prepared", event_data) return True diff --git a/openpype/modules/ftrack/event_handlers_user/action_rv.py b/openpype/modules/ftrack/event_handlers_user/action_rv.py index bdb0eaf250..d05f0c47f6 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_rv.py +++ b/openpype/modules/ftrack/event_handlers_user/action_rv.py @@ -4,8 +4,18 @@ import traceback import json import ftrack_api -from avalon import io, api -from openpype.pipeline import get_representation_path + +from openpype.client import ( + get_asset_by_name, + get_subset_by_name, + get_version_by_name, + get_representation_by_name +) +from openpype.pipeline import ( + get_representation_path, + AvalonMongoDB, + Anatomy, +) from openpype_modules.ftrack.lib import BaseAction, statics_icon @@ -252,9 +262,10 @@ class RVAction(BaseAction): "Component", list(event["data"]["values"].values())[0] )["version"]["asset"]["parent"]["link"][0] project = session.get(link["type"], link["id"]) - os.environ["AVALON_PROJECT"] = project["name"] - api.Session["AVALON_PROJECT"] = project["name"] - io.install() + project_name = project["full_name"] + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + anatomy = Anatomy(project_name) location = ftrack_api.Session().pick_location() @@ -278,37 +289,38 @@ class RVAction(BaseAction): if online_source: continue - asset = io.find_one({"type": "asset", "name": parent_name}) - subset = io.find_one( - { - "type": "subset", - "name": component["version"]["asset"]["name"], - "parent": asset["_id"] - } + subset_name = component["version"]["asset"]["name"] + version_name = component["version"]["version"] + representation_name = component["file_type"][1:] + + asset_doc = get_asset_by_name( + project_name, parent_name, fields=["_id"] ) - version = io.find_one( - { - "type": "version", - "name": component["version"]["version"], - "parent": subset["_id"] - } + subset_doc = get_subset_by_name( + project_name, + subset_name=subset_name, + asset_id=asset_doc["_id"] ) - representation = io.find_one( - { - "type": "representation", - "parent": version["_id"], - "name": component["file_type"][1:] - } + version_doc = get_version_by_name( + project_name, + version=version_name, + subset_id=subset_doc["_id"] ) - if representation is None: - representation = io.find_one( - { - "type": "representation", - "parent": version["_id"], - "name": "preview" - } + repre_doc = get_representation_by_name( + project_name, + version_id=version_doc["_id"], + representation_name=representation_name + ) + if not repre_doc: + repre_doc = get_representation_by_name( + project_name, + version_id=version_doc["_id"], + representation_name="preview" ) - paths.append(get_representation_path(representation)) + + paths.append(get_representation_path( + repre_doc, root=anatomy.roots, dbcon=dbcon + )) return paths diff --git a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py index 4820925844..8748f426bd 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_store_thumbnails_to_avalon.py @@ -4,9 +4,17 @@ import json import requests from bson.objectid import ObjectId + +from openpype.client import ( + get_project, + get_asset_by_id, + get_assets, + get_subset_by_name, + get_version_by_name, + get_representations +) from openpype_modules.ftrack.lib import BaseAction, statics_icon -from openpype.api import Anatomy -from avalon.api import AvalonMongoDB +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype_modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY @@ -384,7 +392,7 @@ class StoreThumbnailsToAvalon(BaseAction): db_con.Session["AVALON_PROJECT"] = project_name - avalon_project = db_con.find_one({"type": "project"}) + avalon_project = get_project(project_name) output["project"] = avalon_project if not avalon_project: @@ -398,19 +406,17 @@ class StoreThumbnailsToAvalon(BaseAction): asset_mongo_id = parent["custom_attributes"].get(CUST_ATTR_ID_KEY) if asset_mongo_id: try: - asset_mongo_id = ObjectId(asset_mongo_id) - asset_ent = db_con.find_one({ - "type": "asset", - "_id": asset_mongo_id - }) + asset_ent = get_asset_by_id(project_name, asset_mongo_id) except Exception: pass if not asset_ent: - asset_ent = db_con.find_one({ - "type": "asset", - "data.ftrackId": parent["id"] - }) + asset_docs = get_assets(project_name, asset_names=[parent["name"]]) + for asset_doc in asset_docs: + ftrack_id = asset_doc.get("data", {}).get("ftrackId") + if ftrack_id == parent["id"]: + asset_ent = asset_doc + break output["asset"] = asset_ent @@ -421,13 +427,11 @@ class StoreThumbnailsToAvalon(BaseAction): ) return output - asset_mongo_id = asset_ent["_id"] - - subset_ent = db_con.find_one({ - "type": "subset", - "parent": asset_mongo_id, - "name": subset_name - }) + subset_ent = get_subset_by_name( + project_name, + subset_name=subset_name, + asset_id=asset_ent["_id"] + ) output["subset"] = subset_ent @@ -438,11 +442,11 @@ class StoreThumbnailsToAvalon(BaseAction): ).format(subset_name, ent_path) return output - version_ent = db_con.find_one({ - "type": "version", - "name": version, - "parent": subset_ent["_id"] - }) + version_ent = get_version_by_name( + project_name, + version, + subset_ent["_id"] + ) output["version"] = version_ent @@ -453,10 +457,10 @@ class StoreThumbnailsToAvalon(BaseAction): ).format(version, subset_name, ent_path) return output - repre_ents = list(db_con.find({ - "type": "representation", - "parent": version_ent["_id"] - })) + repre_ents = list(get_representations( + project_name, + version_ids=[version_ent["_id"]] + )) output["representations"] = repre_ents return output diff --git a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py index cd2f371f38..e52a061471 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py +++ b/openpype/modules/ftrack/event_handlers_user/action_sync_to_avalon.py @@ -1,7 +1,8 @@ import time import sys import json -import traceback + +import ftrack_api from openpype_modules.ftrack.lib import BaseAction, statics_icon from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory @@ -184,6 +185,13 @@ class SyncToAvalonLocal(BaseAction): "* Total time: {}".format(time_7 - time_start) ) + if self.entities_factory.project_created: + event = ftrack_api.event.base.Event( + topic="openpype.project.created", + data={"project_name": project_name} + ) + self.session.event_hub.publish(event) + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( diff --git a/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py b/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py index 0d69913996..65d1b42d82 100644 --- a/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py +++ b/openpype/modules/ftrack/event_handlers_user/action_where_run_ask.py @@ -3,6 +3,7 @@ import socket import getpass from openpype_modules.ftrack.lib import BaseAction +from openpype_modules.ftrack.ftrack_server.lib import get_host_ip class ActionWhereIRun(BaseAction): @@ -53,8 +54,7 @@ class ActionWhereIRun(BaseAction): try: host_name = socket.gethostname() msgs["Hostname"] = host_name - host_ip = socket.gethostbyname(host_name) - msgs["IP"] = host_ip + msgs["IP"] = get_host_ip() or "N/A" except Exception: pass diff --git a/openpype/modules/ftrack/ftrack_module.py b/openpype/modules/ftrack/ftrack_module.py index 5c38df2e03..d61b5f0b26 100644 --- a/openpype/modules/ftrack/ftrack_module.py +++ b/openpype/modules/ftrack/ftrack_module.py @@ -5,23 +5,23 @@ import platform import click -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( +from openpype.modules import ( + OpenPypeModule, ITrayModule, IPluginPaths, - ILaunchHookPaths, ISettingsChangeListener ) from openpype.settings import SaveWarningExc +from openpype.lib import Logger FTRACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) +_URL_NOT_SET = object() class FtrackModule( OpenPypeModule, ITrayModule, IPluginPaths, - ILaunchHookPaths, ISettingsChangeListener ): name = "ftrack" @@ -30,17 +30,8 @@ class FtrackModule( ftrack_settings = settings[self.name] self.enabled = ftrack_settings["enabled"] - # Add http schema - ftrack_url = ftrack_settings["ftrack_server"].strip("/ ") - if ftrack_url: - if "http" not in ftrack_url: - ftrack_url = "https://" + ftrack_url - - # Check if "ftrack.app" is part os url - if "ftrackapp.com" not in ftrack_url: - ftrack_url = ftrack_url + ".ftrackapp.com" - - self.ftrack_url = ftrack_url + self._settings_ftrack_url = ftrack_settings["ftrack_server"] + self._ftrack_url = _URL_NOT_SET current_dir = os.path.dirname(os.path.abspath(__file__)) low_platform = platform.system().lower() @@ -72,8 +63,39 @@ class FtrackModule( self.timers_manager_connector = None self._timers_manager_module = None + def get_ftrack_url(self): + """Resolved ftrack url. + + Resolving is trying to fill missing information in url and tried to + connect to the server. + + Returns: + Union[str, None]: Final variant of url or None if url could not be + reached. + """ + + if self._ftrack_url is _URL_NOT_SET: + self._ftrack_url = resolve_ftrack_url( + self._settings_ftrack_url, + logger=self.log + ) + return self._ftrack_url + + ftrack_url = property(get_ftrack_url) + + @property + def settings_ftrack_url(self): + """Ftrack url from settings in a format as it is. + + Returns: + str: Ftrack url from settings. + """ + + return self._settings_ftrack_url + def get_global_environments(self): """Ftrack's global environments.""" + return { "FTRACK_SERVER": self.ftrack_url } @@ -85,9 +107,44 @@ class FtrackModule( } def get_launch_hook_paths(self): - """Implementation of `ILaunchHookPaths`.""" + """Implementation for applications launch hooks.""" + return os.path.join(FTRACK_MODULE_DIR, "launch_hooks") + def modify_application_launch_arguments(self, application, env): + if not application.use_python_2: + return + + self.log.info("Adding Ftrack Python 2 packages to PYTHONPATH.") + + # Prepare vendor dir path + python_2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor") + + # Add Python 2 modules + python_paths = [ + # `python-ftrack-api` + os.path.join(python_2_vendor, "ftrack-python-api", "source"), + # `arrow` + os.path.join(python_2_vendor, "arrow"), + # `builtins` from `python-future` + # - `python-future` is strict Python 2 module that cause crashes + # of Python 3 scripts executed through OpenPype + # (burnin script etc.) + os.path.join(python_2_vendor, "builtins"), + # `backports.functools_lru_cache` + os.path.join( + python_2_vendor, "backports.functools_lru_cache" + ) + ] + + # Load PYTHONPATH from current launch context + python_path = env.get("PYTHONPATH") + if python_path: + python_paths.append(python_path) + + # Set new PYTHONPATH to launch context environments + env["PYTHONPATH"] = os.pathsep.join(python_paths) + def connect_with_modules(self, enabled_modules): for module in enabled_modules: if not hasattr(module, "get_ftrack_event_handler_paths"): @@ -159,7 +216,7 @@ class FtrackModule( app_definitions_from_app_manager, tool_definitions_from_app_manager ) - from openpype.api import ApplicationManager + from openpype.lib import ApplicationManager query_keys = [ "id", "key", @@ -446,6 +503,54 @@ class FtrackModule( click_group.add_command(cli_main) +def _check_ftrack_url(url): + import requests + + try: + result = requests.get(url, allow_redirects=False) + except requests.exceptions.RequestException: + return False + + if (result.status_code != 200 or "FTRACK_VERSION" not in result.headers): + return False + return True + + +def resolve_ftrack_url(url, logger=None): + """Checks if Ftrack server is responding.""" + + if logger is None: + logger = Logger.get_logger(__name__) + + url = url.strip("/ ") + if not url: + logger.error("Ftrack URL is not set!") + return None + + if not url.startswith("http"): + url = "https://" + url + + ftrack_url = None + if url and _check_ftrack_url(url): + ftrack_url = url + + if not ftrack_url and not url.endswith("ftrackapp.com"): + ftrackapp_url = url + ".ftrackapp.com" + if _check_ftrack_url(ftrackapp_url): + ftrack_url = ftrackapp_url + + if not ftrack_url and _check_ftrack_url(url): + ftrack_url = url + + if ftrack_url: + logger.debug("Ftrack server \"{}\" is accessible.".format(ftrack_url)) + + else: + logger.error("Ftrack server \"{}\" is not accessible!".format(url)) + + return ftrack_url + + @click.group(FtrackModule.name, help="Ftrack module related commands.") def cli_main(): pass diff --git a/openpype/modules/ftrack/ftrack_server/__init__.py b/openpype/modules/ftrack/ftrack_server/__init__.py index 9e3920b500..8e5f7c4c51 100644 --- a/openpype/modules/ftrack/ftrack_server/__init__.py +++ b/openpype/modules/ftrack/ftrack_server/__init__.py @@ -1,8 +1,6 @@ from .ftrack_server import FtrackServer -from .lib import check_ftrack_url __all__ = ( "FtrackServer", - "check_ftrack_url" ) diff --git a/openpype/modules/ftrack/ftrack_server/event_server_cli.py b/openpype/modules/ftrack/ftrack_server/event_server_cli.py index 90ce757242..ad7ffd8e25 100644 --- a/openpype/modules/ftrack/ftrack_server/event_server_cli.py +++ b/openpype/modules/ftrack/ftrack_server/event_server_cli.py @@ -1,11 +1,9 @@ import os -import sys import signal import datetime import subprocess import socket import json -import platform import getpass import atexit import time @@ -13,17 +11,22 @@ import uuid import ftrack_api import pymongo +from openpype.client.mongo import ( + OpenPypeMongoConnection, + validate_mongo_connection, +) from openpype.lib import ( get_openpype_execute_args, - OpenPypeMongoConnection, get_openpype_version, get_build_version, - validate_mongo_connection ) -from openpype_modules.ftrack import FTRACK_MODULE_DIR +from openpype_modules.ftrack import ( + FTRACK_MODULE_DIR, + resolve_ftrack_url, +) from openpype_modules.ftrack.lib import credentials -from openpype_modules.ftrack.ftrack_server.lib import check_ftrack_url from openpype_modules.ftrack.ftrack_server import socket_thread +from openpype_modules.ftrack.ftrack_server.lib import get_host_ip class MongoPermissionsError(Exception): @@ -114,7 +117,7 @@ def legacy_server(ftrack_url): while True: if not ftrack_accessible: - ftrack_accessible = check_ftrack_url(ftrack_url) + ftrack_accessible = resolve_ftrack_url(ftrack_url) # Run threads only if Ftrack is accessible if not ftrack_accessible and not printed_ftrack_error: @@ -243,11 +246,13 @@ def main_loop(ftrack_url): ) host_name = socket.gethostname() + host_ip = get_host_ip() + main_info = [ ["created_at", datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S")], ["Username", getpass.getuser()], ["Host Name", host_name], - ["Host IP", socket.gethostbyname(host_name)], + ["Host IP", host_ip or "N/A"], ["OpenPype executable", get_openpype_execute_args()[-1]], ["OpenPype version", get_openpype_version() or "N/A"], ["OpenPype build version", get_build_version() or "N/A"] @@ -257,7 +262,7 @@ def main_loop(ftrack_url): while True: # Check if accessible Ftrack and Mongo url if not ftrack_accessible: - ftrack_accessible = check_ftrack_url(ftrack_url) + ftrack_accessible = resolve_ftrack_url(ftrack_url) if not mongo_accessible: mongo_accessible = check_mongo_url(mongo_uri) @@ -311,7 +316,7 @@ def main_loop(ftrack_url): statuser_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not statuser_thread.isAlive(): + elif not statuser_thread.is_alive(): statuser_thread.join() statuser_thread = None ftrack_accessible = False @@ -354,7 +359,7 @@ def main_loop(ftrack_url): storer_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not storer_thread.isAlive(): + elif not storer_thread.is_alive(): if storer_thread.mongo_error: raise MongoPermissionsError() storer_thread.join() @@ -391,7 +396,7 @@ def main_loop(ftrack_url): processor_failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not processor_thread.isAlive(): + elif not processor_thread.is_alive(): if processor_thread.mongo_error: raise Exception( "Exiting because have issue with acces to MongoDB" @@ -441,7 +446,7 @@ def run_event_server( os.environ["CLOCKIFY_API_KEY"] = clockify_api_key # Check url regex and accessibility - ftrack_url = check_ftrack_url(ftrack_url) + ftrack_url = resolve_ftrack_url(ftrack_url) if not ftrack_url: print('Exiting! < Please enter Ftrack server url >') return 1 diff --git a/openpype/modules/ftrack/ftrack_server/ftrack_server.py b/openpype/modules/ftrack/ftrack_server/ftrack_server.py index 8944591b71..c75b8f7172 100644 --- a/openpype/modules/ftrack/ftrack_server/ftrack_server.py +++ b/openpype/modules/ftrack/ftrack_server/ftrack_server.py @@ -7,12 +7,10 @@ import traceback import ftrack_api from openpype.lib import ( - PypeLogger, + Logger, modules_from_path ) -log = PypeLogger.get_logger(__name__) - """ # Required - Needed for connection to Ftrack FTRACK_SERVER # Ftrack server e.g. "https://myFtrack.ftrackapp.com" @@ -43,10 +41,13 @@ class FtrackServer: server.run_server() .. """ + # set Ftrack logging to Warning only - OPTIONAL ftrack_log = logging.getLogger("ftrack_api") ftrack_log.setLevel(logging.WARNING) + self.log = Logger.get_logger(__name__) + self.stopped = True self.is_running = False @@ -72,7 +73,7 @@ class FtrackServer: # Get all modules with functions modules, crashed = modules_from_path(path) for filepath, exc_info in crashed: - log.warning("Filepath load crashed {}.\n{}".format( + self.log.warning("Filepath load crashed {}.\n{}".format( filepath, traceback.format_exception(*exc_info) )) @@ -87,7 +88,7 @@ class FtrackServer: break if not register_function: - log.warning( + self.log.warning( "\"{}\" - Missing register method".format(filepath) ) continue @@ -97,7 +98,7 @@ class FtrackServer: ) if not register_functions: - log.warning(( + self.log.warning(( "There are no events with `register` function" " in registered paths: \"{}\"" ).format("| ".join(paths))) @@ -106,7 +107,7 @@ class FtrackServer: try: register_func(self.session) except Exception: - log.warning( + self.log.warning( "\"{}\" - register was not successful".format(filepath), exc_info=True ) @@ -141,7 +142,7 @@ class FtrackServer: self.session = session if load_files: if not self.handler_paths: - log.warning(( + self.log.warning(( "Paths to event handlers are not set." " Ftrack server won't launch." )) @@ -151,8 +152,8 @@ class FtrackServer: self.set_files(self.handler_paths) msg = "Registration of event handlers has finished!" - log.info(len(msg) * "*") - log.info(msg) + self.log.info(len(msg) * "*") + self.log.info(msg) # keep event_hub on session running self.session.event_hub.wait() diff --git a/openpype/modules/ftrack/ftrack_server/lib.py b/openpype/modules/ftrack/ftrack_server/lib.py index f8319b67d4..eb64063fab 100644 --- a/openpype/modules/ftrack/ftrack_server/lib.py +++ b/openpype/modules/ftrack/ftrack_server/lib.py @@ -7,9 +7,11 @@ import threading import datetime import time import queue +import collections import appdirs -import pymongo +import socket +import pymongo import requests import ftrack_api import ftrack_api.session @@ -24,41 +26,21 @@ except ImportError: from ftrack_api._weakref import WeakMethod from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info -from openpype.lib import OpenPypeMongoConnection -from openpype.api import Logger +from openpype.client import OpenPypeMongoConnection +from openpype.lib import Logger TOPIC_STATUS_SERVER = "openpype.event.server.status" TOPIC_STATUS_SERVER_RESULT = "openpype.event.server.status.result" -def check_ftrack_url(url, log_errors=True): - """Checks if Ftrack server is responding""" - if not url: - print('ERROR: Ftrack URL is not set!') - return None - - url = url.strip('/ ') - - if 'http' not in url: - if url.endswith('ftrackapp.com'): - url = 'https://' + url - else: - url = 'https://{0}.ftrackapp.com'.format(url) +def get_host_ip(): + host_name = socket.gethostname() try: - result = requests.get(url, allow_redirects=False) - except requests.exceptions.RequestException: - if log_errors: - print('ERROR: Entered Ftrack URL is not accesible!') - return False + return socket.gethostbyname(host_name) + except Exception: + pass - if (result.status_code != 200 or 'FTRACK_VERSION' not in result.headers): - if log_errors: - print('ERROR: Entered Ftrack URL is not accesible!') - return False - - print('DEBUG: Ftrack server {} is accessible.'.format(url)) - - return url + return None class SocketBaseEventHub(ftrack_api.event.hub.EventHub): @@ -133,7 +115,7 @@ class ProcessEventHub(SocketBaseEventHub): hearbeat_msg = b"processor" is_collection_created = False - pypelog = Logger().get_logger("Session Processor") + pypelog = Logger.get_logger("Session Processor") def __init__(self, *args, **kwargs): self.mongo_url = None @@ -192,7 +174,7 @@ class ProcessEventHub(SocketBaseEventHub): except pymongo.errors.AutoReconnect: self.pypelog.error(( "Mongo server \"{}\" is not responding, exiting." - ).format(os.environ["AVALON_MONGO"])) + ).format(os.environ["OPENPYPE_MONGO"])) sys.exit(0) # Additional special processing of events. if event['topic'] == 'ftrack.meta.disconnected': @@ -306,7 +288,20 @@ class CustomEventHubSession(ftrack_api.session.Session): # Currently pending operations. self.recorded_operations = ftrack_api.operation.Operations() - self.record_operations = True + + # OpenPype change - In new API are operations properties + new_api = hasattr(self.__class__, "record_operations") + + if new_api: + self._record_operations = collections.defaultdict( + lambda: True + ) + self._auto_populate = collections.defaultdict( + lambda: auto_populate + ) + else: + self.record_operations = True + self.auto_populate = auto_populate self.cache_key_maker = cache_key_maker if self.cache_key_maker is None: @@ -325,6 +320,9 @@ class CustomEventHubSession(ftrack_api.session.Session): if cache is not None: self.cache.caches.append(cache) + if new_api: + self.merge_lock = threading.RLock() + self._managed_request = None self._request = requests.Session() self._request.auth = ftrack_api.session.SessionAuthentication( @@ -332,8 +330,6 @@ class CustomEventHubSession(ftrack_api.session.Session): ) self.request_timeout = timeout - self.auto_populate = auto_populate - # Fetch server information and in doing so also check credentials. self._server_information = self._fetch_server_information() diff --git a/openpype/modules/ftrack/ftrack_server/socket_thread.py b/openpype/modules/ftrack/ftrack_server/socket_thread.py index f49ca5557e..3ef55f8daa 100644 --- a/openpype/modules/ftrack/ftrack_server/socket_thread.py +++ b/openpype/modules/ftrack/ftrack_server/socket_thread.py @@ -5,8 +5,8 @@ import socket import threading import traceback import subprocess -from openpype.api import Logger -from openpype.lib import get_openpype_execute_args + +from openpype.lib import get_openpype_execute_args, Logger class SocketThread(threading.Thread): @@ -16,7 +16,7 @@ class SocketThread(threading.Thread): def __init__(self, name, port, filepath, additional_args=[]): super(SocketThread, self).__init__() - self.log = Logger().get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.setName(name) self.name = name self.port = port diff --git a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py index d5a95fad91..86ecffd5b8 100644 --- a/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py +++ b/openpype/modules/ftrack/launch_hooks/post_ftrack_changes.py @@ -1,7 +1,7 @@ import os import ftrack_api -from openpype.api import get_project_settings +from openpype.settings import get_project_settings from openpype.lib import PostLaunchHook diff --git a/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py b/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py deleted file mode 100644 index 0dd894bebf..0000000000 --- a/openpype/modules/ftrack/launch_hooks/pre_python2_vendor.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -from openpype.lib import PreLaunchHook -from openpype_modules.ftrack import FTRACK_MODULE_DIR - - -class PrePython2Support(PreLaunchHook): - """Add python ftrack api module for Python 2 to PYTHONPATH. - - Path to vendor modules is added to the beggining of PYTHONPATH. - """ - - def execute(self): - if not self.application.use_python_2: - return - - self.log.info("Adding Ftrack Python 2 packages to PYTHONPATH.") - - # Prepare vendor dir path - python_2_vendor = os.path.join(FTRACK_MODULE_DIR, "python2_vendor") - - # Add Python 2 modules - python_paths = [ - # `python-ftrack-api` - os.path.join(python_2_vendor, "ftrack-python-api", "source"), - # `arrow` - os.path.join(python_2_vendor, "arrow"), - # `builtins` from `python-future` - # - `python-future` is strict Python 2 module that cause crashes - # of Python 3 scripts executed through OpenPype (burnin script etc.) - os.path.join(python_2_vendor, "builtins"), - # `backports.functools_lru_cache` - os.path.join( - python_2_vendor, "backports.functools_lru_cache" - ) - ] - - # Load PYTHONPATH from current launch context - python_path = self.launch_context.env.get("PYTHONPATH") - if python_path: - python_paths.append(python_path) - - # Set new PYTHONPATH to launch context environments - self.launch_context.env["PYTHONPATH"] = os.pathsep.join(python_paths) diff --git a/openpype/modules/ftrack/lib/avalon_sync.py b/openpype/modules/ftrack/lib/avalon_sync.py index 5301ec568e..0341c25717 100644 --- a/openpype/modules/ftrack/lib/avalon_sync.py +++ b/openpype/modules/ftrack/lib/avalon_sync.py @@ -6,16 +6,22 @@ import numbers import six -from avalon.api import AvalonMongoDB - -import avalon - -from openpype.api import ( - Logger, - Anatomy, - get_anatomy_settings +from openpype.client import ( + get_project, + get_assets, + get_archived_assets, + get_subsets, + get_versions, + get_representations ) -from openpype.lib import ApplicationManager +from openpype.client.operations import ( + CURRENT_ASSET_DOC_SCHEMA, + CURRENT_PROJECT_SCHEMA, + CURRENT_PROJECT_CONFIG_SCHEMA, +) +from openpype.settings import get_anatomy_settings +from openpype.lib import ApplicationManager, Logger +from openpype.pipeline import AvalonMongoDB, schema from .constants import CUST_ATTR_ID_KEY, FPS_KEYS from .custom_attributes import get_openpype_attr, query_custom_attributes @@ -28,14 +34,6 @@ import ftrack_api log = Logger.get_logger(__name__) -# Current schemas for avalon types -CURRENT_DOC_SCHEMAS = { - "project": "openpype:project-3.0", - "asset": "openpype:asset-3.0", - "config": "openpype:config-2.0" -} - - class InvalidFpsValue(Exception): pass @@ -147,14 +145,17 @@ def create_chunks(iterable, chunk_size=None): list: Chunked items. """ chunks = [] - if not iterable: - return chunks tupled_iterable = tuple(iterable) + if not tupled_iterable: + return chunks iterable_size = len(tupled_iterable) if chunk_size is None: chunk_size = 200 + if chunk_size < 1: + chunk_size = 1 + for idx in range(0, iterable_size, chunk_size): chunks.append(tupled_iterable[idx:idx + chunk_size]) return chunks @@ -175,7 +176,7 @@ def check_regex(name, entity_type, in_schema=None, schema_patterns=None): if not name_pattern: default_pattern = "^[a-zA-Z0-9_.]*$" - schema_obj = avalon.schema._cache.get(schema_name + ".json") + schema_obj = schema._cache.get(schema_name + ".json") if not schema_obj: name_pattern = default_pattern else: @@ -286,21 +287,6 @@ def from_dict_to_set(data, is_project): return result -def get_avalon_project_template(project_name): - """Get avalon template - Args: - project_name: (string) - Returns: - dictionary with templates - """ - templates = Anatomy(project_name).templates - return { - "workfile": templates["avalon"]["workfile"], - "work": templates["avalon"]["work"], - "publish": templates["avalon"]["publish"] - } - - def get_project_apps(in_app_list): """ Application definitions for app name. @@ -451,6 +437,7 @@ class SyncEntitiesFactory: } self.create_list = [] + self.project_created = False self.unarchive_list = [] self.updates = collections.defaultdict(dict) @@ -592,6 +579,10 @@ class SyncEntitiesFactory: self.ft_project_id = ft_project_id self.entities_dict = entities_dict + @property + def project_name(self): + return self.entities_dict[self.ft_project_id]["name"] + @property def avalon_ents_by_id(self): """ @@ -676,9 +667,9 @@ class SyncEntitiesFactory: (list) of assets """ if self._avalon_archived_ents is None: - self._avalon_archived_ents = [ - ent for ent in self.dbcon.find({"type": "archived_asset"}) - ] + self._avalon_archived_ents = list( + get_archived_assets(self.project_name) + ) return self._avalon_archived_ents @property @@ -746,7 +737,7 @@ class SyncEntitiesFactory: """ if self._subsets_by_parent_id is None: self._subsets_by_parent_id = collections.defaultdict(list) - for subset in self.dbcon.find({"type": "subset"}): + for subset in get_subsets(self.project_name): self._subsets_by_parent_id[str(subset["parent"])].append( subset ) @@ -1437,8 +1428,8 @@ class SyncEntitiesFactory: # Avalon entities self.dbcon.install() self.dbcon.Session["AVALON_PROJECT"] = ft_project_name - avalon_project = self.dbcon.find_one({"type": "project"}) - avalon_entities = self.dbcon.find({"type": "asset"}) + avalon_project = get_project(ft_project_name) + avalon_entities = get_assets(ft_project_name) self.avalon_project = avalon_project self.avalon_entities = avalon_entities @@ -1565,7 +1556,7 @@ class SyncEntitiesFactory: deleted_entities.append(mongo_id) av_ent = self.avalon_ents_by_id[mongo_id] - av_ent_path_items = [p for p in av_ent["data"]["parents"]] + av_ent_path_items = list(av_ent["data"]["parents"]) av_ent_path_items.append(av_ent["name"]) self.log.debug("Deleted <{}>".format("/".join(av_ent_path_items))) @@ -1864,7 +1855,7 @@ class SyncEntitiesFactory: _vis_par = _avalon_ent["data"]["visualParent"] _name = _avalon_ent["name"] if _name in self.all_ftrack_names: - av_ent_path_items = _avalon_ent["data"]["parents"] + av_ent_path_items = list(_avalon_ent["data"]["parents"]) av_ent_path_items.append(_name) av_ent_path = "/".join(av_ent_path_items) # TODO report @@ -2006,7 +1997,7 @@ class SyncEntitiesFactory: {"_id": mongo_id}, item )) - av_ent_path_items = item["data"]["parents"] + av_ent_path_items = list(item["data"]["parents"]) av_ent_path_items.append(item["name"]) av_ent_path = "/".join(av_ent_path_items) self.log.debug( @@ -2066,7 +2057,7 @@ class SyncEntitiesFactory: item["_id"] = new_id item["parent"] = self.avalon_project_id - item["schema"] = CURRENT_DOC_SCHEMAS["asset"] + item["schema"] = CURRENT_ASSET_DOC_SCHEMA item["data"]["visualParent"] = avalon_parent new_id_str = str(new_id) @@ -2119,6 +2110,7 @@ class SyncEntitiesFactory: entity_dict = self.entities_dict[ftrack_id] + final_parents = entity_dict["final_entity"]["data"]["parents"] if archived_by_id: # if is changeable then unarchive (nothing to check here) if self.changeability_by_mongo_id[mongo_id]: @@ -2132,10 +2124,8 @@ class SyncEntitiesFactory: archived_name = archived_by_id["name"] if ( - archived_name != entity_dict["name"] or - archived_parents != entity_dict["final_entity"]["data"][ - "parents" - ] + archived_name != entity_dict["name"] + or archived_parents != final_parents ): return None @@ -2145,11 +2135,7 @@ class SyncEntitiesFactory: for archived in archived_by_name: mongo_id = str(archived["_id"]) archived_parents = archived.get("data", {}).get("parents") - if ( - archived_parents == entity_dict["final_entity"]["data"][ - "parents" - ] - ): + if archived_parents == final_parents: return mongo_id # Secondly try to find more close to current ftrack entity @@ -2201,8 +2187,8 @@ class SyncEntitiesFactory: project_item["_id"] = new_id project_item["parent"] = None - project_item["schema"] = CURRENT_DOC_SCHEMAS["project"] - project_item["config"]["schema"] = CURRENT_DOC_SCHEMAS["config"] + project_item["schema"] = CURRENT_PROJECT_SCHEMA + project_item["config"]["schema"] = CURRENT_PROJECT_CONFIG_SCHEMA self.ftrack_avalon_mapper[self.ft_project_id] = new_id self.avalon_ftrack_mapper[new_id] = self.ft_project_id @@ -2218,6 +2204,7 @@ class SyncEntitiesFactory: self._avalon_ents_by_name[project_item["name"]] = str(new_id) self.create_list.append(project_item) + self.project_created = True # store mongo id to ftrack entity entity = self.entities_dict[self.ft_project_id]["entity"] @@ -2274,46 +2261,37 @@ class SyncEntitiesFactory: self._delete_subsets_without_asset(subsets_to_remove) def _delete_subsets_without_asset(self, not_existing_parents): - subset_ids = [] - version_ids = [] repre_ids = [] to_delete = [] + subset_ids = [] for parent_id in not_existing_parents: subsets = self.subsets_by_parent_id.get(parent_id) if not subsets: continue for subset in subsets: - if subset.get("type") != "subset": - continue - subset_ids.append(subset["_id"]) + if subset.get("type") == "subset": + subset_ids.append(subset["_id"]) - db_subsets = self.dbcon.find({ - "_id": {"$in": subset_ids}, - "type": "subset" - }) - if not db_subsets: - return - - db_versions = self.dbcon.find({ - "parent": {"$in": subset_ids}, - "type": "version" - }) - if db_versions: - version_ids = [ver["_id"] for ver in db_versions] - - db_repres = self.dbcon.find({ - "parent": {"$in": version_ids}, - "type": "representation" - }) - if db_repres: - repre_ids = [repre["_id"] for repre in db_repres] + db_versions = get_versions( + self.project_name, + subset_ids=subset_ids, + fields=["_id"] + ) + version_ids = [ver["_id"] for ver in db_versions] + db_repres = get_representations( + self.project_name, + version_ids=version_ids, + fields=["_id"] + ) + repre_ids = [repre["_id"] for repre in db_repres] to_delete.extend(subset_ids) to_delete.extend(version_ids) to_delete.extend(repre_ids) - self.dbcon.delete_many({"_id": {"$in": to_delete}}) + if to_delete: + self.dbcon.delete_many({"_id": {"$in": to_delete}}) # Probably deprecated def _check_changeability(self, parent_id=None): @@ -2367,8 +2345,7 @@ class SyncEntitiesFactory: continue changed = True - parents = [par for par in _parents] - hierarchy = "/".join(parents) + parents = list(_parents) self.entities_dict[ftrack_id][ "final_entity"]["data"]["parents"] = parents @@ -2795,8 +2772,7 @@ class SyncEntitiesFactory: def report(self): items = [] - project_name = self.entities_dict[self.ft_project_id]["name"] - title = "Synchronization report ({}):".format(project_name) + title = "Synchronization report ({}):".format(self.project_name) keys = ["error", "warning", "info"] for key in keys: diff --git a/openpype/modules/ftrack/lib/credentials.py b/openpype/modules/ftrack/lib/credentials.py index 4e29e66382..2eb64254d1 100644 --- a/openpype/modules/ftrack/lib/credentials.py +++ b/openpype/modules/ftrack/lib/credentials.py @@ -92,14 +92,18 @@ def check_credentials(username, api_key, ftrack_server=None): if not ftrack_server or not username or not api_key: return False + user_exists = False try: session = ftrack_api.Session( server_url=ftrack_server, api_key=api_key, api_user=username ) + # Validated that the username actually exists + user = session.query("User where username is \"{}\"".format(username)) + user_exists = user is not None session.close() except Exception: - return False - return True + pass + return user_exists diff --git a/openpype/modules/ftrack/lib/custom_attributes.py b/openpype/modules/ftrack/lib/custom_attributes.py index 29c6b5e7f8..2f53815368 100644 --- a/openpype/modules/ftrack/lib/custom_attributes.py +++ b/openpype/modules/ftrack/lib/custom_attributes.py @@ -135,7 +135,7 @@ def query_custom_attributes( output.extend( session.query( ( - "select value, entity_id from {}" + "select value, entity_id, configuration_id from {}" " where entity_id in ({}) and configuration_id in ({})" ).format( table_name, diff --git a/openpype/modules/ftrack/lib/ftrack_base_handler.py b/openpype/modules/ftrack/lib/ftrack_base_handler.py index 2130abc20c..c0b03f8a41 100644 --- a/openpype/modules/ftrack/lib/ftrack_base_handler.py +++ b/openpype/modules/ftrack/lib/ftrack_base_handler.py @@ -6,7 +6,7 @@ import uuid import datetime import traceback import time -from openpype.api import Logger +from openpype.lib import Logger from openpype.settings import get_project_settings import ftrack_api @@ -52,7 +52,7 @@ class BaseHandler(object): def __init__(self, session): '''Expects a ftrack_api.Session instance''' - self.log = Logger().get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) if not( isinstance(session, ftrack_api.session.Session) or isinstance(session, ftrack_server.lib.SocketSession) @@ -535,7 +535,7 @@ class BaseHandler(object): ) def trigger_event( - self, topic, event_data={}, session=None, source=None, + self, topic, event_data=None, session=None, source=None, event=None, on_error="ignore" ): if session is None: @@ -543,6 +543,9 @@ class BaseHandler(object): if not source and event: source = event.get("source") + + if event_data is None: + event_data = {} # Create and trigger event event = ftrack_api.event.base.Event( topic=topic, diff --git a/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py new file mode 100644 index 0000000000..43fa3bc3f8 --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/collect_custom_attributes_data.py @@ -0,0 +1,148 @@ +""" +Requires: + context > ftrackSession + context > ftrackEntity + instance > ftrackEntity + +Provides: + instance > customData > ftrack +""" +import copy + +import pyblish.api + + +class CollectFtrackCustomAttributeData(pyblish.api.ContextPlugin): + """Collect custom attribute values and store them to customData. + + Data are stored into each instance in context under + instance.data["customData"]["ftrack"]. + + Hierarchical attributes are not looked up properly for that functionality + custom attribute values lookup must be extended. + """ + + order = pyblish.api.CollectorOrder + 0.4992 + label = "Collect Ftrack Custom Attribute Data" + + # Name of custom attributes for which will be look for + custom_attribute_keys = [] + + def process(self, context): + if not self.custom_attribute_keys: + self.log.info("Custom attribute keys are not set. Skipping") + return + + ftrack_entities_by_id = {} + default_entity_id = None + + context_entity = context.data.get("ftrackEntity") + if context_entity: + entity_id = context_entity["id"] + default_entity_id = entity_id + ftrack_entities_by_id[entity_id] = context_entity + + instances_by_entity_id = { + default_entity_id: [] + } + for instance in context: + entity = instance.data.get("ftrackEntity") + if not entity: + instances_by_entity_id[default_entity_id].append(instance) + continue + + entity_id = entity["id"] + ftrack_entities_by_id[entity_id] = entity + if entity_id not in instances_by_entity_id: + instances_by_entity_id[entity_id] = [] + instances_by_entity_id[entity_id].append(instance) + + if not ftrack_entities_by_id: + self.log.info("Ftrack entities are not set. Skipping") + return + + session = context.data["ftrackSession"] + custom_attr_key_by_id = self.query_attr_confs(session) + if not custom_attr_key_by_id: + self.log.info(( + "Didn't find any of defined custom attributes {}" + ).format(", ".join(self.custom_attribute_keys))) + return + + entity_ids = list(instances_by_entity_id.keys()) + values_by_entity_id = self.query_attr_values( + session, entity_ids, custom_attr_key_by_id + ) + + for entity_id, instances in instances_by_entity_id.items(): + if entity_id not in values_by_entity_id: + # Use defaut empty values + entity_id = None + + for instance in instances: + value = copy.deepcopy(values_by_entity_id[entity_id]) + if "customData" not in instance.data: + instance.data["customData"] = {} + instance.data["customData"]["ftrack"] = value + instance_label = ( + instance.data.get("label") or instance.data["name"] + ) + self.log.debug(( + "Added ftrack custom data to instance \"{}\": {}" + ).format(instance_label, value)) + + def query_attr_values(self, session, entity_ids, custom_attr_key_by_id): + # Prepare values for query + entity_ids_joined = ",".join([ + '"{}"'.format(entity_id) + for entity_id in entity_ids + ]) + conf_ids_joined = ",".join([ + '"{}"'.format(conf_id) + for conf_id in custom_attr_key_by_id.keys() + ]) + # Query custom attribute values + value_items = session.query( + ( + "select value, entity_id, configuration_id" + " from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ).format( + entity_ids_joined, + conf_ids_joined + ) + ).all() + + # Prepare default value output per entity id + values_by_key = { + key: None for key in self.custom_attribute_keys + } + # Prepare all entity ids that were queried + values_by_entity_id = { + entity_id: copy.deepcopy(values_by_key) + for entity_id in entity_ids + } + # Add none entity id which is used as default value + values_by_entity_id[None] = copy.deepcopy(values_by_key) + # Go through queried data and store them + for item in value_items: + conf_id = item["configuration_id"] + conf_key = custom_attr_key_by_id[conf_id] + entity_id = item["entity_id"] + values_by_entity_id[entity_id][conf_key] = item["value"] + return values_by_entity_id + + def query_attr_confs(self, session): + custom_attributes = set(self.custom_attribute_keys) + cust_attrs_query = ( + "select id, key from CustomAttributeConfiguration" + " where key in ({})" + ).format(", ".join( + ["\"{}\"".format(attr_name) for attr_name in custom_attributes] + )) + + custom_attr_confs = session.query(cust_attrs_query).all() + return { + conf["id"]: conf["key"] + for conf in custom_attr_confs + } diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py index 07af217fb6..e13b7e65cd 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_api.py @@ -1,12 +1,13 @@ import logging import pyblish.api -import avalon.api + +from openpype.pipeline import legacy_io class CollectFtrackApi(pyblish.api.ContextPlugin): """ Collects an ftrack session and the current task id. """ - order = pyblish.api.CollectorOrder + 0.4999 + order = pyblish.api.CollectorOrder + 0.4991 label = "Collect Ftrack Api" def process(self, context): @@ -23,9 +24,9 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) # Collect task - project_name = avalon.api.Session["AVALON_PROJECT"] - asset_name = avalon.api.Session["AVALON_ASSET"] - task_name = avalon.api.Session["AVALON_TASK"] + project_name = legacy_io.Session["AVALON_PROJECT"] + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] # Find project entity project_query = 'Project where full_name is "{0}"'.format(project_name) @@ -104,11 +105,17 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): context.data["ftrackEntity"] = asset_entity context.data["ftrackTask"] = task_entity - self.per_instance_process(context, asset_name, task_name) + self.per_instance_process(context, asset_entity, task_entity) def per_instance_process( - self, context, context_asset_name, context_task_name + self, context, context_asset_entity, context_task_entity ): + context_task_name = None + context_asset_name = None + if context_asset_entity: + context_asset_name = context_asset_entity["name"] + if context_task_entity: + context_task_name = context_task_entity["name"] instance_by_asset_and_task = {} for instance in context: self.log.debug( @@ -119,6 +126,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): if not instance_asset_name and not instance_task_name: self.log.debug("Instance does not have set context keys.") + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue elif instance_asset_name and instance_task_name: @@ -130,6 +139,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): "Instance's context is same as in publish context." " Asset: {} | Task: {}" ).format(context_asset_name, context_task_name)) + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue asset_name = instance_asset_name task_name = instance_task_name @@ -140,6 +151,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): "Instance's context task is same as in publish" " context. Task: {}" ).format(context_task_name)) + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue asset_name = context_asset_name @@ -151,6 +164,8 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): "Instance's context asset is same as in publish" " context. Asset: {}" ).format(context_asset_name)) + instance.data["ftrackEntity"] = context_asset_entity + instance.data["ftrackTask"] = context_task_entity continue # Do not use context's task name diff --git a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py index 70030acad9..576a7d36c4 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py +++ b/openpype/modules/ftrack/plugins/publish/collect_ftrack_family.py @@ -6,9 +6,9 @@ Provides: instance -> families ([]) """ import pyblish.api -import avalon.api -from openpype.lib.plugin_tools import filter_profiles +from openpype.pipeline import legacy_io +from openpype.lib import filter_profiles class CollectFtrackFamily(pyblish.api.InstancePlugin): @@ -25,7 +25,7 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): based on 'families' (editorial drives it by presence of 'review') """ label = "Collect Ftrack Family" - order = pyblish.api.CollectorOrder + 0.4998 + order = pyblish.api.CollectorOrder + 0.4990 profiles = None @@ -34,9 +34,10 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): self.log.warning("No profiles present for adding Ftrack family") return + add_ftrack_family = False task_name = instance.data.get("task", - avalon.api.Session["AVALON_TASK"]) - host_name = avalon.api.Session["AVALON_APP"] + legacy_io.Session["AVALON_TASK"]) + host_name = legacy_io.Session["AVALON_APP"] family = instance.data["family"] filtering_criteria = { @@ -53,6 +54,8 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): additional_filters = profile.get("advanced_filtering") if additional_filters: + self.log.info("'{}' families used for additional filtering". + format(families)) add_ftrack_family = self._get_add_ftrack_f_from_addit_filters( additional_filters, families, @@ -69,6 +72,13 @@ class CollectFtrackFamily(pyblish.api.InstancePlugin): else: instance.data["families"] = ["ftrack"] + result_str = "Adding" + if not add_ftrack_family: + result_str = "Not adding" + self.log.info("{} 'ftrack' family for instance with '{}'".format( + result_str, family + )) + def _get_add_ftrack_f_from_addit_filters(self, additional_filters, families, diff --git a/openpype/modules/ftrack/plugins/publish/collect_username.py b/openpype/modules/ftrack/plugins/publish/collect_username.py index a9b746ea51..798f3960a8 100644 --- a/openpype/modules/ftrack/plugins/publish/collect_username.py +++ b/openpype/modules/ftrack/plugins/publish/collect_username.py @@ -1,5 +1,8 @@ """Loads publishing context from json and continues in publish process. +Should run before 'CollectAnatomyContextData' so the user on context is +changed before it's stored to context anatomy data or instance anatomy data. + Requires: anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) @@ -13,7 +16,7 @@ import os import pyblish.api -class CollectUsername(pyblish.api.ContextPlugin): +class CollectUsernameForWebpublish(pyblish.api.ContextPlugin): """ Translates user email to Ftrack username. @@ -32,10 +35,8 @@ class CollectUsername(pyblish.api.ContextPlugin): hosts = ["webpublisher", "photoshop"] targets = ["remotepublish", "filespublish", "tvpaint_worker"] - _context = None - def process(self, context): - self.log.info("CollectUsername") + self.log.info("{}".format(self.__class__.__name__)) os.environ["FTRACK_API_USER"] = os.environ["FTRACK_BOT_API_USER"] os.environ["FTRACK_API_KEY"] = os.environ["FTRACK_BOT_API_KEY"] @@ -54,12 +55,14 @@ class CollectUsername(pyblish.api.ContextPlugin): return session = ftrack_api.Session(auto_connect_event_hub=False) - user = session.query("User where email like '{}'".format(user_email)) + user = session.query( + "User where email like '{}'".format(user_email) + ).first() if not user: raise ValueError( "Couldn't find user with {} email".format(user_email)) - user = user[0] + username = user.get("username") self.log.debug("Resolved ftrack username:: {}".format(username)) os.environ["FTRACK_API_USER"] = username @@ -67,5 +70,4 @@ class CollectUsername(pyblish.api.ContextPlugin): burnin_name = username if '@' in burnin_name: burnin_name = burnin_name[:burnin_name.index('@')] - os.environ["WEBPUBLISH_OPENPYPE_USERNAME"] = burnin_name context.data["user"] = burnin_name diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py index 6c25b9191e..0e8209866f 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py @@ -1,5 +1,19 @@ +"""Integrate components into ftrack + +Requires: + context -> ftrackSession - connected ftrack.Session + instance -> ftrackComponentsList - list of components to integrate + +Provides: + instance -> ftrackIntegratedAssetVersionsData + # legacy + instance -> ftrackIntegratedAssetVersions +""" + import os import sys +import collections + import six import pyblish.api import clique @@ -8,54 +22,165 @@ import clique class IntegrateFtrackApi(pyblish.api.InstancePlugin): """ Commit components to server. """ - order = pyblish.api.IntegratorOrder+0.499 + order = pyblish.api.IntegratorOrder + 0.499 label = "Integrate Ftrack Api" families = ["ftrack"] - def query(self, entitytype, data): - """ Generate a query expression from data supplied. + def process(self, instance): + component_list = instance.data.get("ftrackComponentsList") + if not component_list: + self.log.info( + "Instance don't have components to integrate to Ftrack." + " Skipping." + ) + return - If a value is not a string, we'll add the id of the entity to the - query. + context = instance.context + task_entity, parent_entity = self.get_instance_entities( + instance, context) + if parent_entity is None: + self.log.info(( + "Skipping ftrack integration. Instance \"{}\" does not" + " have specified ftrack entities." + ).format(str(instance))) + return - Args: - entitytype (str): The type of entity to query. - data (dict): The data to identify the entity. - exclusions (list): All keys to exclude from the query. + session = context.data["ftrackSession"] + # Reset session operations and reconfigure locations + session.recorded_operations.clear() + session._configure_locations() - Returns: - str: String query to use with "session.query" - """ - queries = [] - if sys.version_info[0] < 3: - for key, value in data.iteritems(): - if not isinstance(value, (basestring, int)): - self.log.info("value: {}".format(value)) - if "id" in value.keys(): - queries.append( - "{0}.id is \"{1}\"".format(key, value["id"]) - ) - else: - queries.append("{0} is \"{1}\"".format(key, value)) - else: - for key, value in data.items(): - if not isinstance(value, (str, int)): - self.log.info("value: {}".format(value)) - if "id" in value.keys(): - queries.append( - "{0}.id is \"{1}\"".format(key, value["id"]) - ) - else: - queries.append("{0} is \"{1}\"".format(key, value)) + try: + self.integrate_to_ftrack( + session, + instance, + task_entity, + parent_entity, + component_list + ) - query = ( - "select id from " + entitytype + " where " + " and ".join(queries) - ) - self.log.debug(query) - return query + except Exception: + session.reset() + raise - def _set_task_status(self, instance, task_entity, session): + def get_instance_entities(self, instance, context): + parent_entity = None + # If instance has set "ftrackEntity" or "ftrackTask" then use them from + # instance. Even if they are set to None. If they are set to None it + # has a reason. (like has different context) + if "ftrackEntity" in instance.data or "ftrackTask" in instance.data: + task_entity = instance.data.get("ftrackTask") + parent_entity = instance.data.get("ftrackEntity") + + elif "ftrackEntity" in context.data or "ftrackTask" in context.data: + task_entity = context.data.get("ftrackTask") + parent_entity = context.data.get("ftrackEntity") + + if task_entity: + parent_entity = task_entity["parent"] + + return task_entity, parent_entity + + def integrate_to_ftrack( + self, + session, + instance, + task_entity, + parent_entity, + component_list + ): + default_asset_name = None + if task_entity: + default_asset_name = task_entity["name"] + + if not default_asset_name: + default_asset_name = parent_entity["name"] + + # Change status on task + asset_version_status_ids_by_name = {} project_entity = instance.context.data.get("ftrackProject") + if project_entity: + project_schema = project_entity["project_schema"] + asset_version_statuses = ( + project_schema.get_statuses("AssetVersion") + ) + asset_version_status_ids_by_name = { + status["name"].lower(): status["id"] + for status in asset_version_statuses + } + + self._set_task_status(instance, project_entity, task_entity, session) + + # Prepare AssetTypes + asset_types_by_short = self._ensure_asset_types_exists( + session, component_list + ) + self._fill_component_locations(session, component_list) + + asset_versions_data_by_id = {} + used_asset_versions = [] + + # Iterate over components and publish + for data in component_list: + self.log.debug("data: {}".format(data)) + + # AssetType + asset_type_short = data["assettype_data"]["short"] + asset_type_entity = asset_types_by_short[asset_type_short] + + # Asset + asset_data = data.get("asset_data") or {} + if "name" not in asset_data: + asset_data["name"] = default_asset_name + asset_entity = self._ensure_asset_exists( + session, + asset_data, + asset_type_entity["id"], + parent_entity["id"] + ) + + # Asset Version + asset_version_data = data.get("assetversion_data") or {} + asset_version_entity = self._ensure_asset_version_exists( + session, + asset_version_data, + asset_entity["id"], + task_entity, + asset_version_status_ids_by_name + ) + + # Store asset version and components items that were + version_id = asset_version_entity["id"] + if version_id not in asset_versions_data_by_id: + asset_versions_data_by_id[version_id] = { + "asset_version": asset_version_entity, + "component_items": [] + } + + asset_versions_data_by_id[version_id]["component_items"].append( + data + ) + + # Backwards compatibility + if asset_version_entity not in used_asset_versions: + used_asset_versions.append(asset_version_entity) + + self._create_components(session, asset_versions_data_by_id) + + instance.data["ftrackIntegratedAssetVersionsData"] = ( + asset_versions_data_by_id + ) + + # Backwards compatibility + asset_versions_key = "ftrackIntegratedAssetVersions" + if asset_versions_key not in instance.data: + instance.data[asset_versions_key] = [] + + for asset_version in used_asset_versions: + if asset_version not in instance.data[asset_versions_key]: + instance.data[asset_versions_key].append(asset_version) + + def _set_task_status(self, instance, project_entity, task_entity, session): if not project_entity: self.log.info("Task status won't be set, project is not known.") return @@ -92,375 +217,460 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): self.log.info("Setting task status to \"{}\"".format(status_name)) task_entity["status"] = status - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) + session.commit() - def process(self, instance): - session = instance.context.data["ftrackSession"] - context = instance.context + def _fill_component_locations(self, session, component_list): + components_by_location_name = collections.defaultdict(list) + components_by_location_id = collections.defaultdict(list) + for component_item in component_list: + # Location entity can be prefilled + # - this is not recommended as connection to ftrack server may + # be lost and in that case the entity is not valid when gets + # to this plugin + location = component_item.get("component_location") + if location is not None: + continue - name = None - # If instance has set "ftrackEntity" or "ftrackTask" then use them from - # instance. Even if they are set to None. If they are set to None it - # has a reason. (like has different context) - if "ftrackEntity" in instance.data or "ftrackTask" in instance.data: - task = instance.data.get("ftrackTask") - parent = instance.data.get("ftrackEntity") + # Collect location id + location_id = component_item.get("component_location_id") + if location_id: + components_by_location_id[location_id].append( + component_item + ) + continue - elif "ftrackEntity" in context.data or "ftrackTask" in context.data: - task = context.data.get("ftrackTask") - parent = context.data.get("ftrackEntity") + location_name = component_item.get("component_location_name") + if location_name: + components_by_location_name[location_name].append( + component_item + ) + continue - if task: - parent = task["parent"] - name = task - elif parent: - name = parent["name"] - - if not name: - self.log.info(( - "Skipping ftrack integration. Instance \"{}\" does not" - " have specified ftrack entities." - ).format(str(instance))) + # Skip if there is nothing to do + if not components_by_location_name and not components_by_location_id: return - info_msg = ( - "Created new {entity_type} with data: {data}" - ", metadata: {metadata}." + # Query locations + query_filters = [] + if components_by_location_id: + joined_location_ids = ",".join([ + '"{}"'.format(location_id) + for location_id in components_by_location_id + ]) + query_filters.append("id in ({})".format(joined_location_ids)) + + if components_by_location_name: + joined_location_names = ",".join([ + '"{}"'.format(location_name) + for location_name in components_by_location_name + ]) + query_filters.append("name in ({})".format(joined_location_names)) + + locations = session.query( + "select id, name from Location where {}".format( + " or ".join(query_filters) + ) + ).all() + # Fill locations in components + for location in locations: + location_id = location["id"] + location_name = location["name"] + if location_id in components_by_location_id: + for component in components_by_location_id[location_id]: + component["component_location"] = location + + if location_name in components_by_location_name: + for component in components_by_location_name[location_name]: + component["component_location"] = location + + def _ensure_asset_types_exists(self, session, component_list): + """Make sure that all AssetType entities exists for integration. + + Returns: + dict: All asset types by short name. + """ + # Query existing asset types + asset_types = session.query("select id, short from AssetType").all() + # Stpore all existing short names + asset_type_shorts = {asset_type["short"] for asset_type in asset_types} + # Check which asset types are missing and store them + asset_type_names_by_missing_shorts = {} + default_short_name = "upload" + for data in component_list: + asset_type_data = data.get("assettype_data") or {} + asset_type_short = asset_type_data.get("short") + if not asset_type_short: + # Use default asset type name if not set and change the + # input data + asset_type_short = default_short_name + asset_type_data["short"] = asset_type_short + data["assettype_data"] = asset_type_data + + if ( + # Skip if short name exists + asset_type_short in asset_type_shorts + # Skip if short name was already added to missing types + # and asset type name is filled + # - if asset type name is missing then try use name from other + # data + or asset_type_names_by_missing_shorts.get(asset_type_short) + ): + continue + + asset_type_names_by_missing_shorts[asset_type_short] = ( + asset_type_data.get("name") + ) + + # Create missing asset types if there are any + if asset_type_names_by_missing_shorts: + self.log.info("Creating asset types with short names: {}".format( + ", ".join(asset_type_names_by_missing_shorts.keys()) + )) + for missing_short, type_name in ( + asset_type_names_by_missing_shorts.items() + ): + # Use short for name if name is not defined + if not type_name: + type_name = missing_short + # Use short name also for name + # - there is not other source for 'name' + session.create( + "AssetType", + { + "short": missing_short, + "name": type_name + } + ) + + # Commit creation + session.commit() + # Requery asset types + asset_types = session.query( + "select id, short from AssetType" + ).all() + + return {asset_type["short"]: asset_type for asset_type in asset_types} + + def _ensure_asset_exists( + self, session, asset_data, asset_type_id, parent_id + ): + asset_name = asset_data["name"] + asset_entity = self._query_asset( + session, asset_name, asset_type_id, parent_id + ) + if asset_entity is not None: + return asset_entity + + asset_data = { + "name": asset_name, + "type_id": asset_type_id, + "context_id": parent_id + } + self.log.info("Created new Asset with data: {}.".format(asset_data)) + session.create("Asset", asset_data) + session.commit() + return self._query_asset(session, asset_name, asset_type_id, parent_id) + + def _query_asset(self, session, asset_name, asset_type_id, parent_id): + return session.query( + ( + "select id from Asset" + " where name is \"{}\"" + " and type_id is \"{}\"" + " and context_id is \"{}\"" + ).format(asset_name, asset_type_id, parent_id) + ).first() + + def _ensure_asset_version_exists( + self, + session, + asset_version_data, + asset_id, + task_entity, + status_ids_by_name + ): + task_id = None + if task_entity: + task_id = task_entity["id"] + + status_name = asset_version_data.pop("status_name", None) + + # Try query asset version by criteria (asset id and version) + version = asset_version_data.get("version") or 0 + asset_version_entity = self._query_asset_version( + session, version, asset_id ) - used_asset_versions = [] + # Prepare comment value + comment = asset_version_data.get("comment") or "" + if asset_version_entity is not None: + changed = False + if comment != asset_version_entity["comment"]: + asset_version_entity["comment"] = comment + changed = True - self._set_task_status(instance, task, session) + if task_id != asset_version_entity["task_id"]: + asset_version_entity["task_id"] = task_id + changed = True - # Iterate over components and publish - for data in instance.data.get("ftrackComponentsList", []): - # AssetType - # Get existing entity. - assettype_data = {"short": "upload"} - assettype_data.update(data.get("assettype_data", {})) - self.log.debug("data: {}".format(data)) - - assettype_entity = session.query( - self.query("AssetType", assettype_data) - ).first() - - # Create a new entity if none exits. - if not assettype_entity: - assettype_entity = session.create("AssetType", assettype_data) - self.log.debug("Created new AssetType with data: {}".format( - assettype_data - )) - - # Asset - # Get existing entity. - asset_data = { - "name": name, - "type": assettype_entity, - "parent": parent, - } - asset_data.update(data.get("asset_data", {})) - - asset_entity = session.query( - self.query("Asset", asset_data) - ).first() - - self.log.info("asset entity: {}".format(asset_entity)) - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - asset_metadata = asset_data.pop("metadata", {}) - - # Create a new entity if none exits. - if not asset_entity: - asset_entity = session.create("Asset", asset_data) - self.log.debug( - info_msg.format( - entity_type="Asset", - data=asset_data, - metadata=asset_metadata - ) - ) - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - - # Adding metadata - existing_asset_metadata = asset_entity["metadata"] - existing_asset_metadata.update(asset_metadata) - asset_entity["metadata"] = existing_asset_metadata - - # AssetVersion - # Get existing entity. - assetversion_data = { - "version": 0, - "asset": asset_entity, - } - _assetversion_data = data.get("assetversion_data", {}) - assetversion_cust_attrs = _assetversion_data.pop( - "custom_attributes", {} - ) - asset_version_comment = _assetversion_data.pop( - "comment", None - ) - assetversion_data.update(_assetversion_data) - - assetversion_entity = session.query( - self.query("AssetVersion", assetversion_data) - ).first() - - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - assetversion_metadata = assetversion_data.pop("metadata", {}) - - if task: - assetversion_data['task'] = task - - # Create a new entity if none exits. - if not assetversion_entity: - assetversion_entity = session.create( - "AssetVersion", assetversion_data - ) - self.log.debug( - info_msg.format( - entity_type="AssetVersion", - data=assetversion_data, - metadata=assetversion_metadata - ) - ) - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - - # Adding metadata - existing_assetversion_metadata = assetversion_entity["metadata"] - existing_assetversion_metadata.update(assetversion_metadata) - assetversion_entity["metadata"] = existing_assetversion_metadata - - # Add comment - if asset_version_comment: - assetversion_entity["comment"] = asset_version_comment - try: - session.commit() - except Exception: - session.rollback() - session._configure_locations() - self.log.warning(( - "Comment was not possible to set for AssetVersion" - "\"{0}\". Can't set it's value to: \"{1}\"" - ).format( - assetversion_entity["id"], str(asset_version_comment) - )) - - # Adding Custom Attributes - for attr, val in assetversion_cust_attrs.items(): - if attr in assetversion_entity["custom_attributes"]: - try: - assetversion_entity["custom_attributes"][attr] = val - session.commit() - continue - except Exception: - session.rollback() - session._configure_locations() - - self.log.warning(( - "Custom Attrubute \"{0}\"" - " is not available for AssetVersion <{1}>." - " Can't set it's value to: \"{2}\"" - ).format(attr, assetversion_entity["id"], str(val))) - - # Have to commit the version and asset, because location can't - # determine the final location without. - try: + if changed: session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) - # Component - # Get existing entity. - component_data = { - "name": "main", - "version": assetversion_entity + else: + new_asset_version_data = { + "version": version, + "asset_id": asset_id } - component_data.update(data.get("component_data", {})) + if task_id: + new_asset_version_data["task_id"] = task_id - component_entity = session.query( - self.query("Component", component_data) - ).first() + if comment: + new_asset_version_data["comment"] = comment - component_overwrite = data.get("component_overwrite", False) - location = data.get("component_location", session.pick_location()) + self.log.info("Created new AssetVersion with data {}".format( + new_asset_version_data + )) + session.create("AssetVersion", new_asset_version_data) + session.commit() + asset_version_entity = self._query_asset_version( + session, version, asset_id + ) - # Overwrite existing component data if requested. - if component_entity and component_overwrite: + if status_name: + status_id = status_ids_by_name.get(status_name.lower()) + if not status_id: + self.log.info(( + "Ftrack status with name \"{}\"" + " for AssetVersion was not found." + ).format(status_name)) - origin_location = session.query( - "Location where name is \"ftrack.origin\"" - ).one() - - # Removing existing members from location - components = list(component_entity.get("members", [])) - components += [component_entity] - for component in components: - for loc in component["component_locations"]: - if location["id"] == loc["location_id"]: - location.remove_component( - component, recursive=False - ) - - # Deleting existing members on component entity - for member in component_entity.get("members", []): - session.delete(member) - del(member) + elif asset_version_entity["status_id"] != status_id: + asset_version_entity["status_id"] = status_id + session.commit() + # Set custom attributes if there were any set + custom_attrs = asset_version_data.get("custom_attributes") or {} + for attr_key, attr_value in custom_attrs.items(): + if attr_key in asset_version_entity["custom_attributes"]: try: + asset_version_entity["custom_attributes"][attr_key] = ( + attr_value + ) session.commit() + continue except Exception: - tp, value, tb = sys.exc_info() session.rollback() session._configure_locations() - six.reraise(tp, value, tb) - # Reset members in memory - if "members" in component_entity.keys(): - component_entity["members"] = [] + self.log.warning( + ( + "Custom Attrubute \"{0}\" is not available for" + " AssetVersion <{1}>. Can't set it's value to: \"{2}\"" + ).format( + attr_key, asset_version_entity["id"], str(attr_value) + ) + ) - # Add components to origin location - try: - collection = clique.parse(data["component_path"]) - except ValueError: - # Assume its a single file - # Changing file type - name, ext = os.path.splitext(data["component_path"]) - component_entity["file_type"] = ext + return asset_version_entity - origin_location.add_component( - component_entity, data["component_path"] - ) - else: - # Changing file type - component_entity["file_type"] = collection.format("{tail}") + def _query_asset_version(self, session, version, asset_id): + return session.query( + ( + "select id, task_id, comment from AssetVersion" + " where version is \"{}\" and asset_id is \"{}\"" + ).format(version, asset_id) + ).first() - # Create member components for sequence. - for member_path in collection: + def create_component(self, session, asset_version_entity, data): + component_data = data.get("component_data") or {} - size = 0 - try: - size = os.path.getsize(member_path) - except OSError: - pass + if not component_data.get("name"): + component_data["name"] = "main" - name = collection.match(member_path).group("index") + version_id = asset_version_entity["id"] + component_data["version_id"] = version_id + component_entity = session.query( + ( + "select id, name from Component where name is \"{}\"" + " and version_id is \"{}\"" + ).format(component_data["name"], version_id) + ).first() - member_data = { - "name": name, - "container": component_entity, - "size": size, - "file_type": os.path.splitext(member_path)[-1] - } + component_overwrite = data.get("component_overwrite", False) + location = data.get("component_location", session.pick_location()) - component = session.create( - "FileComponent", member_data + # Overwrite existing component data if requested. + if component_entity and component_overwrite: + origin_location = session.query( + "Location where name is \"ftrack.origin\"" + ).one() + + # Removing existing members from location + components = list(component_entity.get("members", [])) + components += [component_entity] + for component in components: + for loc in component["component_locations"]: + if location["id"] == loc["location_id"]: + location.remove_component( + component, recursive=False ) - origin_location.add_component( - component, member_path, recursive=False - ) - component_entity["members"].append(component) - # Add components to location. - location.add_component( - component_entity, origin_location, recursive=True - ) + # Deleting existing members on component entity + for member in component_entity.get("members", []): + session.delete(member) + del(member) - data["component"] = component_entity - msg = "Overwriting Component with path: {0}, data: {1}, " - msg += "location: {2}" - self.log.info( - msg.format( - data["component_path"], - component_data, - location - ) - ) + session.commit() - # Extracting metadata, and adding after entity creation. This is - # due to a ftrack_api bug where you can't add metadata on creation. - component_metadata = component_data.pop("metadata", {}) + # Reset members in memory + if "members" in component_entity.keys(): + component_entity["members"] = [] - # Create new component if none exists. - new_component = False - if not component_entity: - component_entity = assetversion_entity.create_component( - data["component_path"], - data=component_data, - location=location - ) - data["component"] = component_entity - msg = "Created new Component with path: {0}, data: {1}" - msg += ", metadata: {2}, location: {3}" - self.log.info( - msg.format( - data["component_path"], - component_data, - component_metadata, - location - ) - ) - new_component = True + # Add components to origin location + try: + collection = clique.parse(data["component_path"]) + except ValueError: + # Assume its a single file + # Changing file type + name, ext = os.path.splitext(data["component_path"]) + component_entity["file_type"] = ext - # Adding metadata - existing_component_metadata = component_entity["metadata"] - existing_component_metadata.update(component_metadata) - component_entity["metadata"] = existing_component_metadata - - # if component_data['name'] = 'ftrackreview-mp4-mp4': - # assetversion_entity["thumbnail_id"] - - # Setting assetversion thumbnail - if data.get("thumbnail", False): - assetversion_entity["thumbnail_id"] = component_entity["id"] - - # Inform user about no changes to the database. - if (component_entity and not component_overwrite and - not new_component): - data["component"] = component_entity - self.log.info( - "Found existing component, and no request to overwrite. " - "Nothing has been changed." + origin_location.add_component( + component_entity, data["component_path"] ) else: - # Commit changes. - try: - session.commit() - except Exception: - tp, value, tb = sys.exc_info() - session.rollback() - session._configure_locations() - six.reraise(tp, value, tb) + # Changing file type + component_entity["file_type"] = collection.format("{tail}") - if assetversion_entity not in used_asset_versions: - used_asset_versions.append(assetversion_entity) + # Create member components for sequence. + for member_path in collection: - asset_versions_key = "ftrackIntegratedAssetVersions" - if asset_versions_key not in instance.data: - instance.data[asset_versions_key] = [] + size = 0 + try: + size = os.path.getsize(member_path) + except OSError: + pass - for asset_version in used_asset_versions: - if asset_version not in instance.data[asset_versions_key]: - instance.data[asset_versions_key].append(asset_version) + name = collection.match(member_path).group("index") + + member_data = { + "name": name, + "container": component_entity, + "size": size, + "file_type": os.path.splitext(member_path)[-1] + } + + component = session.create( + "FileComponent", member_data + ) + origin_location.add_component( + component, member_path, recursive=False + ) + component_entity["members"].append(component) + + # Add components to location. + location.add_component( + component_entity, origin_location, recursive=True + ) + + data["component"] = component_entity + self.log.info( + ( + "Overwriting Component with path: {0}, data: {1}," + " location: {2}" + ).format( + data["component_path"], + component_data, + location + ) + ) + + # Extracting metadata, and adding after entity creation. This is + # due to a ftrack_api bug where you can't add metadata on creation. + component_metadata = component_data.pop("metadata", {}) + + # Create new component if none exists. + new_component = False + if not component_entity: + component_entity = asset_version_entity.create_component( + data["component_path"], + data=component_data, + location=location + ) + data["component"] = component_entity + self.log.info( + ( + "Created new Component with path: {0}, data: {1}," + " metadata: {2}, location: {3}" + ).format( + data["component_path"], + component_data, + component_metadata, + location + ) + ) + new_component = True + + # Adding metadata + existing_component_metadata = component_entity["metadata"] + existing_component_metadata.update(component_metadata) + component_entity["metadata"] = existing_component_metadata + + # if component_data['name'] = 'ftrackreview-mp4-mp4': + # assetversion_entity["thumbnail_id"] + + # Setting assetversion thumbnail + if data.get("thumbnail"): + asset_version_entity["thumbnail_id"] = component_entity["id"] + + # Inform user about no changes to the database. + if ( + component_entity + and not component_overwrite + and not new_component + ): + data["component"] = component_entity + self.log.info( + "Found existing component, and no request to overwrite. " + "Nothing has been changed." + ) + else: + # Commit changes. + session.commit() + + def _create_components(self, session, asset_versions_data_by_id): + for item in asset_versions_data_by_id.values(): + asset_version_entity = item["asset_version"] + component_items = item["component_items"] + + component_entities = session.query( + ( + "select id, name from Component where version_id is \"{}\"" + ).format(asset_version_entity["id"]) + ).all() + + existing_component_names = { + component["name"] + for component in component_entities + } + + contain_review = "ftrackreview-mp4" in existing_component_names + thumbnail_component_item = None + for component_item in component_items: + component_data = component_item.get("component_data") or {} + component_name = component_data.get("name") + if component_name == "ftrackreview-mp4": + contain_review = True + elif component_name == "ftrackreview-image": + thumbnail_component_item = component_item + + if contain_review and thumbnail_component_item: + thumbnail_component_item["component_data"]["name"] = ( + "thumbnail" + ) + + # Component + for component_item in component_items: + self.create_component( + session, asset_version_entity, component_item + ) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py index 047fd8462c..8cb2336391 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_component_overwrite.py @@ -13,7 +13,10 @@ class IntegrateFtrackComponentOverwrite(pyblish.api.InstancePlugin): active = False def process(self, instance): - component_list = instance.data['ftrackComponentsList'] + component_list = instance.data.get('ftrackComponentsList') + if not component_list: + self.log.info("No component to overwrite...") + return for cl in component_list: cl['component_overwrite'] = True diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py new file mode 100644 index 0000000000..6ed02bc8b6 --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_description.py @@ -0,0 +1,113 @@ +""" +Requires: + context > comment + context > ftrackSession + instance > ftrackIntegratedAssetVersionsData +""" + +import sys +import json + +import six +import pyblish.api +from openpype.lib import StringTemplate + + +class IntegrateFtrackDescription(pyblish.api.InstancePlugin): + """Add description to AssetVersions in Ftrack.""" + + # Must be after integrate asset new + order = pyblish.api.IntegratorOrder + 0.4999 + label = "Integrate Ftrack description" + families = ["ftrack"] + optional = True + + # Can be set in settings: + # - Allows `intent` and `comment` keys + description_template = "{comment}" + + def process(self, instance): + if not self.description_template: + self.log.info("Skipping. Description template is not set.") + return + + # Check if there are any integrated AssetVersion entities + asset_versions_key = "ftrackIntegratedAssetVersionsData" + asset_versions_data_by_id = instance.data.get(asset_versions_key) + if not asset_versions_data_by_id: + self.log.info("There are any integrated AssetVersions") + return + + comment = instance.data["comment"] + if not comment: + self.log.info("Comment is not set.") + else: + self.log.debug("Comment is set to `{}`".format(comment)) + + intent = instance.context.data.get("intent") + if intent and "{intent}" in self.description_template: + value = intent.get("value") + if value: + intent = intent.get("label") or value + + if not intent and not comment: + self.log.info("Skipping. Intent and comment are empty.") + return + + # if intent label is set then format comment + # - it is possible that intent_label is equal to "" (empty string) + if intent: + self.log.debug("Intent is set to `{}`.".format(intent)) + else: + self.log.debug("Intent is not set.") + + # If we would like to use more "optional" possibilities we would have + # come up with some expressions in templates or speicifc templates + # for all 3 possible combinations when comment and intent are + # set or not (when both are not set then description does not + # make sense). + fill_data = {} + if comment: + fill_data["comment"] = comment + if intent: + fill_data["intent"] = intent + + description = StringTemplate.format_template( + self.description_template, fill_data + ) + if not description.solved: + self.log.warning(( + "Couldn't solve template \"{}\" with data {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + if not description: + self.log.debug(( + "Skipping. Result of template is empty string." + " Template \"{}\" Fill data: {}" + ).format( + self.description_template, json.dumps(fill_data, indent=4) + )) + return + + session = instance.context.data["ftrackSession"] + for asset_version_data in asset_versions_data_by_id.values(): + asset_version = asset_version_data["asset_version"] + + # Backwards compatibility for older settings using + # attribute 'note_with_intent_template' + + asset_version["comment"] = description + + try: + session.commit() + self.log.debug("Comment added to AssetVersion \"{}\"".format( + str(asset_version) + )) + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + session._configure_locations() + six.reraise(tp, value, tb) diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_farm_status.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_farm_status.py new file mode 100644 index 0000000000..ab5738c33f --- /dev/null +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_farm_status.py @@ -0,0 +1,150 @@ +import pyblish.api +from openpype.lib import filter_profiles + + +class IntegrateFtrackFarmStatus(pyblish.api.ContextPlugin): + """Change task status when should be published on farm. + + Instance which has set "farm" key in data to 'True' is considered as will + be rendered on farm thus it's status should be changed. + """ + + order = pyblish.api.IntegratorOrder + 0.48 + label = "Integrate Ftrack Farm Status" + + farm_status_profiles = [] + + def process(self, context): + # Quick end + if not self.farm_status_profiles: + project_name = context.data["projectName"] + self.log.info(( + "Status profiles are not filled for project \"{}\". Skipping" + ).format(project_name)) + return + + filtered_instances = self.filter_instances(context) + instances_with_status_names = self.get_instances_with_statuse_names( + context, filtered_instances + ) + if instances_with_status_names: + self.fill_statuses(context, instances_with_status_names) + + def filter_instances(self, context): + filtered_instances = [] + for instance in context: + # Skip disabled instances + if instance.data.get("publish") is False: + continue + subset_name = instance.data["subset"] + msg_start = "Skipping instance {}.".format(subset_name) + if not instance.data.get("farm"): + self.log.debug( + "{} Won't be rendered on farm.".format(msg_start) + ) + continue + + task_entity = instance.data.get("ftrackTask") + if not task_entity: + self.log.debug( + "{} Does not have filled task".format(msg_start) + ) + continue + + filtered_instances.append(instance) + return filtered_instances + + def get_instances_with_statuse_names(self, context, instances): + instances_with_status_names = [] + for instance in instances: + family = instance.data["family"] + subset_name = instance.data["subset"] + task_entity = instance.data["ftrackTask"] + host_name = context.data["hostName"] + task_name = task_entity["name"] + task_type = task_entity["type"]["name"] + status_profile = filter_profiles( + self.farm_status_profiles, + { + "hosts": host_name, + "task_types": task_type, + "task_names": task_name, + "families": family, + "subsets": subset_name, + }, + logger=self.log + ) + if not status_profile: + # There already is log in 'filter_profiles' + continue + + status_name = status_profile["status_name"] + if status_name: + instances_with_status_names.append((instance, status_name)) + return instances_with_status_names + + def fill_statuses(self, context, instances_with_status_names): + # Prepare available task statuses on the project + project_name = context.data["projectName"] + session = context.data["ftrackSession"] + project_entity = session.query(( + "select project_schema from Project where full_name is \"{}\"" + ).format(project_name)).one() + project_schema = project_entity["project_schema"] + + task_type_ids = set() + for item in instances_with_status_names: + instance, _ = item + task_entity = instance.data["ftrackTask"] + task_type_ids.add(task_entity["type"]["id"]) + + task_statuses_by_type_id = { + task_type_id: project_schema.get_statuses("Task", task_type_id) + for task_type_id in task_type_ids + } + + # Keep track if anything has changed + skipped_status_names = set() + status_changed = False + for item in instances_with_status_names: + instance, status_name = item + task_entity = instance.data["ftrackTask"] + task_statuses = task_statuses_by_type_id[task_entity["type"]["id"]] + status_name_low = status_name.lower() + + status_id = None + status_name = None + # Skip if status name was already tried to be found + for status in task_statuses: + if status["name"].lower() == status_name_low: + status_id = status["id"] + status_name = status["name"] + break + + if status_id is None: + if status_name_low not in skipped_status_names: + skipped_status_names.add(status_name_low) + joined_status_names = ", ".join({ + '"{}"'.format(status["name"]) + for status in task_statuses + }) + self.log.warning(( + "Status \"{}\" is not available on project \"{}\"." + " Available statuses are {}" + ).format(status_name, project_name, joined_status_names)) + continue + + # Change task status id + if status_id != task_entity["status_id"]: + task_entity["status_id"] = status_id + status_changed = True + path = "/".join([ + item["name"] + for item in task_entity["link"] + ]) + self.log.debug("Set status \"{}\" to \"{}\"".format( + status_name, path + )) + + if status_changed: + session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py index cff7cd32cb..d6cb3daf0d 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_instances.py @@ -3,6 +3,15 @@ import json import copy import pyblish.api +from openpype.pipeline.publish import get_publish_repre_path +from openpype.lib.openpype_version import get_openpype_version +from openpype.lib.transcoding import ( + get_ffprobe_streams, + convert_ffprobe_fps_to_float, +) +from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib.transcoding import VIDEO_EXTENSIONS + class IntegrateFtrackInstance(pyblish.api.InstancePlugin): """Collect ftrack component data (not integrate yet). @@ -14,10 +23,21 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): label = "Integrate Ftrack Component" families = ["ftrack"] + metadata_keys_to_label = { + "openpype_version": "OpenPype version", + "frame_start": "Frame start", + "frame_end": "Frame end", + "duration": "Duration", + "width": "Resolution width", + "height": "Resolution height", + "fps": "FPS", + "codec": "Codec" + } + family_mapping = { "camera": "cam", "look": "look", - "mayaascii": "scene", + "mayaAscii": "scene", "model": "geo", "rig": "rig", "setdress": "setdress", @@ -35,34 +55,13 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "image": "img", "reference": "reference" } + keep_first_subset_name_for_review = True + asset_versions_status_profiles = [] + additional_metadata_keys = [] def process(self, instance): self.log.debug("instance {}".format(instance)) - instance_version = instance.data.get("version") - if instance_version is None: - raise ValueError("Instance version not set") - - version_number = int(instance_version) - - family = instance.data["family"] - family_low = instance.data["family"].lower() - - asset_type = instance.data.get("ftrackFamily") - if not asset_type and family_low in self.family_mapping: - asset_type = self.family_mapping[family_low] - - self.log.debug(self.family_mapping) - self.log.debug(family_low) - - # Ignore this instance if neither "ftrackFamily" or a family mapping is - # found. - if not asset_type: - self.log.info(( - "Family \"{}\" does not match any asset type mapping" - ).format(family)) - return - instance_repres = instance.data.get("representations") if not instance_repres: self.log.info(( @@ -70,10 +69,30 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ).format(str(instance))) return - # Prepare FPS - instance_fps = instance.data.get("fps") - if instance_fps is None: - instance_fps = instance.context.data["fps"] + instance_version = instance.data.get("version") + if instance_version is None: + raise ValueError("Instance version not set") + + version_number = int(instance_version) + + family = instance.data["family"] + + # Perform case-insensitive family mapping + family_low = family.lower() + asset_type = instance.data.get("ftrackFamily") + if not asset_type: + for map_family, map_value in self.family_mapping.items(): + if map_family.lower() == family_low: + asset_type = map_value + break + + if not asset_type: + asset_type = "upload" + + self.log.debug( + "Family: {}\nMapping: {}".format(family_low, self.family_mapping) + ) + status_name = self._get_asset_version_status_name(instance) # Base of component item data # - create a copy of this object when want to use it @@ -86,7 +105,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): }, "assetversion_data": { "version": version_number, - "comment": instance.context.data.get("comment") or "" + "comment": instance.context.data.get("comment") or "", + "status_name": status_name }, "component_overwrite": False, # This can be change optionally @@ -94,15 +114,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # These must be changed for each component "component_data": None, "component_path": None, - "component_location": None + "component_location": None, + "component_location_name": None, + "additional_data": {} } - ft_session = instance.context.data["ftrackSession"] - # Filter types of representations review_representations = [] thumbnail_representations = [] other_representations = [] + has_movie_review = False for repre in instance_repres: self.log.debug("Representation {}".format(repre)) repre_tags = repre.get("tags") or [] @@ -111,17 +132,15 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): elif "ftrackreview" in repre_tags: review_representations.append(repre) + if self._is_repre_video(repre): + has_movie_review = True else: other_representations.append(repre) # Prepare ftrack locations - unmanaged_location = ft_session.query( - "Location where name is \"ftrack.unmanaged\"" - ).one() - ftrack_server_location = ft_session.query( - "Location where name is \"ftrack.server\"" - ).one() + unmanaged_location_name = "ftrack.unmanaged" + ftrack_server_location_name = "ftrack.server" # Components data component_list = [] @@ -131,89 +150,148 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Create thumbnail components # TODO what if there is multiple thumbnails? first_thumbnail_component = None - for repre in thumbnail_representations: - published_path = repre.get("published_path") - if not published_path: - comp_files = repre["files"] - if isinstance(comp_files, (tuple, list, set)): - filename = comp_files[0] - else: - filename = comp_files + first_thumbnail_component_repre = None - published_path = os.path.join( - repre["stagingDir"], filename - ) - if not os.path.exists(published_path): + if not review_representations or has_movie_review: + for repre in thumbnail_representations: + repre_path = get_publish_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." + ) continue - repre["published_path"] = published_path - # Create copy of base comp item and append it - thumbnail_item = copy.deepcopy(base_component_item) - thumbnail_item["component_path"] = repre["published_path"] - thumbnail_item["component_data"] = { - "name": "thumbnail" - } - thumbnail_item["thumbnail"] = True - # Create copy of item before setting location - src_components_to_add.append(copy.deepcopy(thumbnail_item)) - # Create copy of first thumbnail - if first_thumbnail_component is None: - first_thumbnail_component = copy.deepcopy(thumbnail_item) - # Set location - thumbnail_item["component_location"] = ftrack_server_location - # Add item to component list - component_list.append(thumbnail_item) + # Create copy of base comp item and append it + thumbnail_item = copy.deepcopy(base_component_item) + thumbnail_item["component_path"] = repre_path + thumbnail_item["component_data"] = { + "name": "thumbnail" + } + thumbnail_item["thumbnail"] = True + + # Create copy of item before setting location + if "delete" not in repre.get("tags", []): + src_components_to_add.append(copy.deepcopy(thumbnail_item)) + # Create copy of first thumbnail + if first_thumbnail_component is None: + first_thumbnail_component_repre = repre + first_thumbnail_component = thumbnail_item + # Set location + thumbnail_item["component_location_name"] = ( + ftrack_server_location_name + ) + + # Add item to component list + component_list.append(thumbnail_item) + + if first_thumbnail_component is not None: + metadata = self._prepare_image_component_metadata( + first_thumbnail_component_repre, + first_thumbnail_component["component_path"] + ) + + if metadata: + component_data = first_thumbnail_component["component_data"] + component_data["metadata"] = metadata + + if review_representations: + component_data["name"] = "thumbnail" + else: + component_data["name"] = "ftrackreview-image" # Create review components # Change asset name of each new component for review is_first_review_repre = True not_first_components = [] + extended_asset_name = "" + multiple_reviewable = len(review_representations) > 1 for repre in review_representations: - frame_start = repre.get("frameStartFtrack") - frame_end = repre.get("frameEndFtrack") - if frame_start is None or frame_end is None: - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] + if not self._is_repre_video(repre) and has_movie_review: + self.log.debug("Movie repre has priority " + "from {}".format(repre)) + continue - # Frame end of uploaded video file should be duration in frames - # - frame start is always 0 - # - frame end is duration in frames - duration = frame_end - frame_start + 1 - - fps = repre.get("fps") - if fps is None: - fps = instance_fps + repre_path = get_publish_repre_path(instance, repre, False) + if not repre_path: + self.log.warning( + "Published path is not set and source was removed." + ) + continue # Create copy of base comp item and append it review_item = copy.deepcopy(base_component_item) + + # get asset name and define extended name variant + asset_name = review_item["asset_data"]["name"] + extended_asset_name = "_".join( + (asset_name, repre["name"]) + ) + + # reset extended if no need for extended asset name + if ( + self.keep_first_subset_name_for_review + and is_first_review_repre + ): + extended_asset_name = "" + else: + # only rename if multiple reviewable + if multiple_reviewable: + review_item["asset_data"]["name"] = extended_asset_name + else: + extended_asset_name = "" + + # rename all already created components + # only if first repre and extended name available + if is_first_review_repre and extended_asset_name: + # and rename all already created components + for _ci in component_list: + _ci["asset_data"]["name"] = extended_asset_name + + # and rename all already created src components + for _sci in src_components_to_add: + _sci["asset_data"]["name"] = extended_asset_name + + # rename also first thumbnail component if any + if first_thumbnail_component is not None: + first_thumbnail_component[ + "asset_data"]["name"] = extended_asset_name + # Change location - review_item["component_path"] = repre["published_path"] + review_item["component_path"] = repre_path # Change component data + + if self._is_repre_video(repre): + component_name = "ftrackreview-mp4" + metadata = self._prepare_video_component_metadata( + instance, repre, repre_path, True + ) + else: + component_name = "ftrackreview-image" + metadata = self._prepare_image_component_metadata( + repre, repre_path + ) + review_item["thumbnail"] = True + review_item["component_data"] = { # Default component name is "main". - "name": "ftrackreview-mp4", - "metadata": { - "ftr_meta": json.dumps({ - "frameIn": 0, - "frameOut": int(duration), - "frameRate": float(fps) - }) - } + "name": component_name, + "metadata": metadata } - # Create copy of item before setting location or changing asset - src_components_to_add.append(copy.deepcopy(review_item)) + if is_first_review_repre: is_first_review_repre = False else: - # Add representation name to asset name of "not first" review - asset_name = review_item["asset_data"]["name"] - review_item["asset_data"]["name"] = "_".join( - (asset_name, repre["name"]) - ) + # later detection for thumbnail duplication not_first_components.append(review_item) + # Create copy of item before setting location + if "delete" not in repre.get("tags", []): + src_components_to_add.append(copy.deepcopy(review_item)) + # Set location - review_item["component_location"] = ftrack_server_location + review_item["component_location_name"] = ( + ftrack_server_location_name + ) # Add item to component list component_list.append(review_item) @@ -225,8 +303,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): first_thumbnail_component ) new_thumbnail_component["asset_data"]["name"] = asset_name - new_thumbnail_component["component_location"] = ( - ftrack_server_location + new_thumbnail_component["component_location_name"] = ( + ftrack_server_location_name ) component_list.append(new_thumbnail_component) @@ -235,24 +313,39 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Make sure thumbnail is disabled copy_src_item["thumbnail"] = False # Set location - copy_src_item["component_location"] = unmanaged_location + copy_src_item["component_location_name"] = unmanaged_location_name # Modify name of component to have suffix "_src" component_data = copy_src_item["component_data"] component_name = component_data["name"] component_data["name"] = component_name + "_src" + component_data["metadata"] = self._prepare_component_metadata( + instance, repre, copy_src_item["component_path"], False + ) component_list.append(copy_src_item) # Add others representations as component for repre in other_representations: - published_path = repre.get("published_path") + published_path = get_publish_repre_path(instance, repre, True) if not published_path: continue # Create copy of base comp item and append it other_item = copy.deepcopy(base_component_item) - other_item["component_data"] = { - "name": repre["name"] + + # add extended name if any + if ( + not self.keep_first_subset_name_for_review + and extended_asset_name + ): + other_item["asset_data"]["name"] = extended_asset_name + + component_data = { + "name": repre["name"], + "metadata": self._prepare_component_metadata( + instance, repre, published_path, False + ) } - other_item["component_location"] = unmanaged_location + other_item["component_data"] = component_data + other_item["component_location_name"] = unmanaged_location_name other_item["component_path"] = published_path component_list.append(other_item) @@ -268,3 +361,206 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ) )) instance.data["ftrackComponentsList"] = component_list + + def _collect_additional_metadata(self, streams): + pass + + def _get_asset_version_status_name(self, instance): + if not self.asset_versions_status_profiles: + return None + + # Prepare filtering data for new asset version status + anatomy_data = instance.data["anatomyData"] + task_type = anatomy_data.get("task", {}).get("type") + filtering_criteria = { + "families": instance.data["family"], + "hosts": instance.context.data["hostName"], + "task_types": task_type + } + matching_profile = filter_profiles( + self.asset_versions_status_profiles, + filtering_criteria + ) + if not matching_profile: + return None + + return matching_profile["status"] or None + + def _prepare_component_metadata( + self, instance, repre, component_path, is_review=None + ): + if self._is_repre_video(repre): + return self._prepare_video_component_metadata(instance, repre, + component_path, + is_review) + else: + return self._prepare_image_component_metadata(repre, + component_path) + + def _prepare_video_component_metadata( + self, instance, repre, component_path, is_review=None + ): + metadata = {} + if "openpype_version" in self.additional_metadata_keys: + label = self.metadata_keys_to_label["openpype_version"] + metadata[label] = get_openpype_version() + + extension = os.path.splitext(component_path)[-1] + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug( + "Failed to retrieve information about " + "input {}".format(component_path)) + + # Find video streams + video_streams = [ + stream + for stream in streams + if stream["codec_type"] == "video" + ] + # Skip if there are not video streams + # - exr is special case which can have issues with reading through + # ffmpegh but we want to set fps for it + if not video_streams and extension not in [".exr"]: + return metadata + + stream_width = None + stream_height = None + stream_fps = None + frame_out = None + codec_label = None + for video_stream in video_streams: + codec_label = video_stream.get("codec_long_name") + if not codec_label: + codec_label = video_stream.get("codec") + + if codec_label: + pix_fmt = video_stream.get("pix_fmt") + if pix_fmt: + codec_label += " ({})".format(pix_fmt) + + tmp_width = video_stream.get("width") + tmp_height = video_stream.get("height") + if tmp_width and tmp_height: + stream_width = tmp_width + stream_height = tmp_height + + input_framerate = video_stream.get("r_frame_rate") + stream_duration = video_stream.get("duration") + if input_framerate is None or stream_duration is None: + continue + try: + stream_fps = convert_ffprobe_fps_to_float( + input_framerate + ) + except ValueError: + self.log.warning( + "Could not convert ffprobe " + "fps to float \"{}\"".format(input_framerate)) + continue + + stream_width = tmp_width + stream_height = tmp_height + + self.log.debug("FPS from stream is {} and duration is {}".format( + input_framerate, stream_duration + )) + frame_out = float(stream_duration) * stream_fps + break + + # Prepare FPS + instance_fps = instance.data.get("fps") + if instance_fps is None: + instance_fps = instance.context.data["fps"] + + repre_fps = repre.get("fps") + if repre_fps is not None: + repre_fps = float(repre_fps) + + fps = stream_fps or repre_fps or instance_fps + + # Prepare frame ranges + frame_start = repre.get("frameStartFtrack") + frame_end = repre.get("frameEndFtrack") + if frame_start is None or frame_end is None: + frame_start = instance.data["frameStart"] + frame_end = instance.data["frameEnd"] + duration = (frame_end - frame_start) + 1 + + for key, value in [ + ("fps", fps), + ("frame_start", frame_start), + ("frame_end", frame_end), + ("duration", duration), + ("width", stream_width), + ("height", stream_height), + ("fps", fps), + ("codec", codec_label) + ]: + if not value or key not in self.additional_metadata_keys: + continue + label = self.metadata_keys_to_label[key] + metadata[label] = value + + if not is_review: + ftr_meta = {} + if fps: + ftr_meta["frameRate"] = fps + + if stream_width and stream_height: + ftr_meta["width"] = int(stream_width) + ftr_meta["height"] = int(stream_height) + metadata["ftr_meta"] = json.dumps(ftr_meta) + return metadata + + # Frame end of uploaded video file should be duration in frames + # - frame start is always 0 + # - frame end is duration in frames + if not frame_out: + frame_out = duration + + # Ftrack documentation says that it is required to have + # 'width' and 'height' in review component. But with those values + # review video does not play. + metadata["ftr_meta"] = json.dumps({ + "frameIn": 0, + "frameOut": frame_out, + "frameRate": float(fps) + }) + return metadata + + def _prepare_image_component_metadata(self, repre, component_path): + width = repre.get("width") + height = repre.get("height") + if not width or not height: + streams = [] + try: + streams = get_ffprobe_streams(component_path) + except Exception: + self.log.debug( + "Failed to retrieve information " + "about input {}".format(component_path)) + + for stream in streams: + if "width" in stream and "height" in stream: + width = stream["width"] + height = stream["height"] + break + + metadata = {} + if width and height: + metadata = { + "ftr_meta": json.dumps({ + "width": width, + "height": height, + "format": "image" + }) + } + + return metadata + + def _is_repre_video(self, repre): + repre_ext = ".{}".format(repre["ext"]) + return repre_ext in VIDEO_EXTENSIONS diff --git a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py index acd295854d..6e82897d89 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_ftrack_note.py @@ -1,7 +1,19 @@ +""" +Requires: + context > hostName + context > appName + context > appLabel + context > comment + context > ftrackSession + instance > ftrackIntegratedAssetVersionsData +""" + import sys -import json -import pyblish.api +import copy + import six +import pyblish.api +from openpype.lib import StringTemplate class IntegrateFtrackNote(pyblish.api.InstancePlugin): @@ -15,100 +27,48 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): # Can be set in presets: # - Allows only `intent` and `comment` keys + note_template = None + # Backwards compatibility note_with_intent_template = "{intent}: {comment}" # - note label must exist in Ftrack note_labels = [] - def get_intent_label(self, session, intent_value): - if not intent_value: - return - - intent_configurations = session.query( - "CustomAttributeConfiguration where key is intent" - ).all() - if not intent_configurations: - return - - intent_configuration = intent_configurations[0] - if len(intent_configuration) > 1: - self.log.warning(( - "Found more than one `intent` custom attribute." - " Using first found." - )) - - config = intent_configuration.get("config") - if not config: - return - - configuration = json.loads(config) - items = configuration.get("data") - if not items: - return - - if sys.version_info[0] < 3: - string_type = basestring - else: - string_type = str - - if isinstance(items, string_type): - items = json.loads(items) - - intent_label = None - for item in items: - if item["value"] == intent_value: - intent_label = item["menu"] - break - - return intent_label - def process(self, instance): - comment = (instance.context.data.get("comment") or "").strip() + # Check if there are any integrated AssetVersion entities + asset_versions_key = "ftrackIntegratedAssetVersionsData" + asset_versions_data_by_id = instance.data.get(asset_versions_key) + if not asset_versions_data_by_id: + self.log.info("There are any integrated AssetVersions") + return + + context = instance.context + host_name = context.data["hostName"] + app_name = context.data["appName"] + app_label = context.data["appLabel"] + comment = instance.data["comment"] if not comment: self.log.info("Comment is not set.") - return + else: + self.log.debug("Comment is set to `{}`".format(comment)) - self.log.debug("Comment is set to `{}`".format(comment)) - - session = instance.context.data["ftrackSession"] + session = context.data["ftrackSession"] intent = instance.context.data.get("intent") - if intent and isinstance(intent, dict): - intent_val = intent.get("value") - intent_label = intent.get("label") - else: - intent_val = intent_label = intent - - final_label = None - if intent_val: - final_label = self.get_intent_label(session, intent_val) - if final_label is None: - final_label = intent_label + intent_label = None + if intent: + value = intent["value"] + if value: + intent_label = intent["label"] or value # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if final_label: - msg = "Intent label is set to `{}`.".format(final_label) - comment = self.note_with_intent_template.format(**{ - "intent": final_label, - "comment": comment - }) - - elif intent_val: - msg = ( - "Intent is set to `{}` and was not added" - " to comment because label is set to `{}`." - ).format(intent_val, final_label) + if intent_label: + self.log.debug( + "Intent label is set to `{}`.".format(intent_label) + ) else: - msg = "Intent is not set." - - self.log.debug(msg) - - asset_versions_key = "ftrackIntegratedAssetVersions" - asset_versions = instance.data.get(asset_versions_key) - if not asset_versions: - self.log.info("There are any integrated AssetVersions") - return + self.log.debug("Intent is not set.") user = session.query( "User where username is \"{}\"".format(session.api_user) @@ -122,7 +82,7 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels = [] if self.note_labels: - all_labels = session.query("NoteLabel").all() + all_labels = session.query("select id, name from NoteLabel").all() labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels} for _label in self.note_labels: label = labels_by_low_name.get(_label.lower()) @@ -134,8 +94,52 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): labels.append(label) - for asset_version in asset_versions: - asset_version.create_note(comment, author=user, labels=labels) + base_format_data = { + "host_name": host_name, + "app_name": app_name, + "app_label": app_label, + "source": instance.data.get("source", '') + } + if comment: + base_format_data["comment"] = comment + for asset_version_data in asset_versions_data_by_id.values(): + asset_version = asset_version_data["asset_version"] + component_items = asset_version_data["component_items"] + + published_paths = set() + for component_item in component_items: + published_paths.add(component_item["component_path"]) + + # Backwards compatibility for older settings using + # attribute 'note_with_intent_template' + template = self.note_template + if template is None: + template = self.note_with_intent_template + format_data = copy.deepcopy(base_format_data) + format_data["published_paths"] = "
".join( + sorted(published_paths) + ) + if intent: + if "{intent}" in template: + format_data["intent"] = intent_label + else: + format_data["intent"] = intent + + note_text = StringTemplate.format_template(template, format_data) + if not note_text.solved: + self.log.warning(( + "Note template require more keys then can be provided." + "\nTemplate: {}\nMissing values for keys:{}\nData: {}" + ).format(template, note_text.missing_keys, format_data)) + continue + + if not note_text: + self.log.info(( + "Note for AssetVersion {} would be empty. Skipping." + "\nTemplate: {}\nData: {}" + ).format(asset_version["id"], template, format_data)) + continue + asset_version.create_note(note_text, author=user, labels=labels) try: session.commit() diff --git a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py index 61892240d7..046dfd9ad8 100644 --- a/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py +++ b/openpype/modules/ftrack/plugins/publish/integrate_hierarchy_ftrack.py @@ -1,11 +1,14 @@ import sys import collections import six -import pyblish.api -from avalon import io +from copy import deepcopy + +import pyblish.api + +from openpype.client import get_asset_by_id +from openpype.lib import filter_profiles +from openpype.pipeline import KnownPublishError -# Copy of constant `openpype_modules.ftrack.lib.avalon_sync.CUST_ATTR_AUTO_SYNC` -CUST_ATTR_AUTO_SYNC = "avalon_auto_sync" CUST_ATTR_GROUP = "openpype" @@ -14,7 +17,6 @@ CUST_ATTR_GROUP = "openpype" def get_pype_attr(session, split_hierarchical=True): custom_attributes = [] hier_custom_attributes = [] - # TODO remove deprecated "avalon" group from query cust_attrs_query = ( "select id, entity_type, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" @@ -63,126 +65,295 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): order = pyblish.api.IntegratorOrder - 0.04 label = 'Integrate Hierarchy To Ftrack' families = ["shot"] - hosts = ["hiero", "resolve", "standalonepublisher", "flame"] + hosts = [ + "hiero", + "resolve", + "standalonepublisher", + "flame", + "traypublisher" + ] optional = False + create_task_status_profiles = [] def process(self, context): - self.context = context - if "hierarchyContext" not in self.context.data: + if "hierarchyContext" not in context.data: return - hierarchy_context = self.context.data["hierarchyContext"] + hierarchy_context = self._get_active_assets(context) + self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) - self.session = self.context.data["ftrackSession"] - project_name = self.context.data["projectEntity"]["name"] - query = 'Project where full_name is "{}"'.format(project_name) - project = self.session.query(query).one() - auto_sync_state = project[ - "custom_attributes"][CUST_ATTR_AUTO_SYNC] + session = context.data["ftrackSession"] + project_name = context.data["projectName"] + project = session.query( + 'select id, full_name from Project where full_name is "{}"'.format( + project_name + ) + ).first() + if not project: + raise KnownPublishError( + "Project \"{}\" was not found on ftrack.".format(project_name) + ) - if not io.Session: - io.install() + self.context = context + self.session = session + self.ft_project = project + self.task_types = self.get_all_task_types(project) + self.task_statuses = self.get_task_statuses(project) - self.ft_project = None + # import ftrack hierarchy + self.import_to_ftrack(project_name, hierarchy_context) - input_data = hierarchy_context + def query_ftrack_entitites(self, session, ft_project): + project_id = ft_project["id"] + entities = session.query(( + "select id, name, parent_id" + " from TypedContext where project_id is \"{}\"" + ).format(project_id)).all() - # disable termporarily ftrack project's autosyncing - if auto_sync_state: - self.auto_sync_off(project) + entities_by_id = {} + entities_by_parent_id = collections.defaultdict(list) + for entity in entities: + entities_by_id[entity["id"]] = entity + parent_id = entity["parent_id"] + entities_by_parent_id[parent_id].append(entity) - try: - # import ftrack hierarchy - self.import_to_ftrack(input_data) - except Exception: - raise - finally: - if auto_sync_state: - self.auto_sync_on(project) + ftrack_hierarchy = [] + ftrack_id_queue = collections.deque() + ftrack_id_queue.append((project_id, ftrack_hierarchy)) + while ftrack_id_queue: + item = ftrack_id_queue.popleft() + ftrack_id, parent_list = item + if ftrack_id == project_id: + entity = ft_project + name = entity["full_name"] + else: + entity = entities_by_id[ftrack_id] + name = entity["name"] - def import_to_ftrack(self, input_data, parent=None): + children = [] + parent_list.append({ + "name": name, + "low_name": name.lower(), + "entity": entity, + "children": children, + }) + for child in entities_by_parent_id[ftrack_id]: + ftrack_id_queue.append((child["id"], children)) + return ftrack_hierarchy + + def find_matching_ftrack_entities( + self, hierarchy_context, ftrack_hierarchy + ): + walk_queue = collections.deque() + for entity_name, entity_data in hierarchy_context.items(): + walk_queue.append( + (entity_name, entity_data, ftrack_hierarchy) + ) + + matching_ftrack_entities = [] + while walk_queue: + item = walk_queue.popleft() + entity_name, entity_data, ft_children = item + matching_ft_child = None + for ft_child in ft_children: + if ft_child["low_name"] == entity_name.lower(): + matching_ft_child = ft_child + break + + if matching_ft_child is None: + continue + + entity = matching_ft_child["entity"] + entity_data["ft_entity"] = entity + matching_ftrack_entities.append(entity) + + hierarchy_children = entity_data.get("childs") + if not hierarchy_children: + continue + + for child_name, child_data in hierarchy_children.items(): + walk_queue.append( + (child_name, child_data, matching_ft_child["children"]) + ) + return matching_ftrack_entities + + def query_custom_attribute_values(self, session, entities, hier_attrs): + attr_ids = { + attr["id"] + for attr in hier_attrs + } + entity_ids = { + entity["id"] + for entity in entities + } + output = { + entity_id: {} + for entity_id in entity_ids + } + if not attr_ids or not entity_ids: + return {} + + joined_attr_ids = ",".join( + ['"{}"'.format(attr_id) for attr_id in attr_ids] + ) + + # Query values in chunks + chunk_size = int(5000 / len(attr_ids)) + # Make sure entity_ids is `list` for chunk selection + entity_ids = list(entity_ids) + results = [] + for idx in range(0, len(entity_ids), chunk_size): + joined_entity_ids = ",".join([ + '"{}"'.format(entity_id) + for entity_id in entity_ids[idx:idx + chunk_size] + ]) + results.extend( + session.query( + ( + "select value, entity_id, configuration_id" + " from CustomAttributeValue" + " where entity_id in ({}) and configuration_id in ({})" + ).format( + joined_entity_ids, + joined_attr_ids + ) + ).all() + ) + + for result in results: + attr_id = result["configuration_id"] + entity_id = result["entity_id"] + output[entity_id][attr_id] = result["value"] + + return output + + def import_to_ftrack(self, project_name, hierarchy_context): # Prequery hiearchical custom attributes - hier_custom_attributes = get_pype_attr(self.session)[1] + hier_attrs = get_pype_attr(self.session)[1] hier_attr_by_key = { attr["key"]: attr - for attr in hier_custom_attributes + for attr in hier_attrs } + # Query user entity (for comments) + user = self.session.query( + "User where username is \"{}\"".format(self.session.api_user) + ).first() + if not user: + self.log.warning( + "Was not able to query current User {}".format( + self.session.api_user + ) + ) + + # Query ftrack hierarchy with parenting + ftrack_hierarchy = self.query_ftrack_entitites( + self.session, self.ft_project) + + # Fill ftrack entities to hierarchy context + # - there is no need to query entities again + matching_entities = self.find_matching_ftrack_entities( + hierarchy_context, ftrack_hierarchy) + # Query custom attribute values of each entity + custom_attr_values_by_id = self.query_custom_attribute_values( + self.session, matching_entities, hier_attrs) + # Get ftrack api module (as they are different per python version) ftrack_api = self.context.data["ftrackPythonModule"] - for entity_name in input_data: - entity_data = input_data[entity_name] + # Use queue of hierarchy items to process + import_queue = collections.deque() + for entity_name, entity_data in hierarchy_context.items(): + import_queue.append( + (entity_name, entity_data, None) + ) + + while import_queue: + item = import_queue.popleft() + entity_name, entity_data, parent = item + entity_type = entity_data['entity_type'] self.log.debug(entity_data) - self.log.debug(entity_type) - if entity_type.lower() == 'project': - query = 'Project where full_name is "{}"'.format(entity_name) - entity = self.session.query(query).one() - self.ft_project = entity - self.task_types = self.get_all_task_types(entity) - - elif self.ft_project is None or parent is None: + entity = entity_data.get("ft_entity") + if entity is None and entity_type.lower() == "project": raise AssertionError( "Collected items are not in right order!" ) - # try to find if entity already exists - else: - query = ( - 'TypedContext where name is "{0}" and ' - 'project_id is "{1}"' - ).format(entity_name, self.ft_project["id"]) - try: - entity = self.session.query(query).one() - except Exception: - entity = None - # Create entity if not exists if entity is None: - entity = self.create_entity( - name=entity_name, - type=entity_type, - parent=parent - ) + entity = self.session.create(entity_type, { + "name": entity_name, + "parent": parent + }) + entity_data["ft_entity"] = entity + # self.log.info('entity: {}'.format(dict(entity))) # CUSTOM ATTRIBUTES - custom_attributes = entity_data.get('custom_attributes', []) - instances = [ - i for i in self.context if i.data['asset'] in entity['name'] - ] - for key in custom_attributes: + custom_attributes = entity_data.get('custom_attributes', {}) + instances = [] + for instance in self.context: + instance_asset_name = instance.data.get("asset") + if ( + instance_asset_name + and instance_asset_name.lower() == entity["name"].lower() + ): + instances.append(instance) + + for instance in instances: + instance.data["ftrackEntity"] = entity + + for key, cust_attr_value in custom_attributes.items(): + if cust_attr_value is None: + continue + hier_attr = hier_attr_by_key.get(key) # Use simple method if key is not hierarchical if not hier_attr: - assert (key in entity['custom_attributes']), ( - 'Missing custom attribute key: `{0}` in attrs: ' - '`{1}`'.format(key, entity['custom_attributes'].keys()) + if key not in entity["custom_attributes"]: + raise KnownPublishError(( + "Missing custom attribute in ftrack with name '{}'" + ).format(key)) + + entity['custom_attributes'][key] = cust_attr_value + continue + + attr_id = hier_attr["id"] + entity_values = custom_attr_values_by_id.get(entity["id"], {}) + # New value is defined by having id in values + # - it can be set to 'None' (ftrack allows that using API) + is_new_value = attr_id not in entity_values + attr_value = entity_values.get(attr_id) + + # Use ftrack operations method to set hiearchical + # attribute value. + # - this is because there may be non hiearchical custom + # attributes with different properties + entity_key = collections.OrderedDict(( + ("configuration_id", hier_attr["id"]), + ("entity_id", entity["id"]) + )) + op = None + if is_new_value: + op = ftrack_api.operation.CreateEntityOperation( + "CustomAttributeValue", + entity_key, + {"value": cust_attr_value} ) - entity['custom_attributes'][key] = custom_attributes[key] - - else: - # Use ftrack operations method to set hiearchical - # attribute value. - # - this is because there may be non hiearchical custom - # attributes with different properties - entity_key = collections.OrderedDict() - entity_key["configuration_id"] = hier_attr["id"] - entity_key["entity_id"] = entity["id"] - self.session.recorded_operations.push( - ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - entity_key, - "value", - ftrack_api.symbol.NOT_SET, - custom_attributes[key] - ) + elif attr_value != cust_attr_value: + op = ftrack_api.operation.UpdateEntityOperation( + "CustomAttributeValue", + entity_key, + "value", + attr_value, + cust_attr_value ) - for instance in instances: - instance.data['ftrackEntity'] = entity + if op is not None: + self.session.recorded_operations.push(op) + if self.session.recorded_operations: try: self.session.commit() except Exception: @@ -192,13 +363,22 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): six.reraise(tp, value, tb) # TASKS + instances_by_task_name = collections.defaultdict(list) + for instance in instances: + task_name = instance.data.get("task") + if task_name: + instances_by_task_name[task_name.lower()].append(instance) + tasks = entity_data.get('tasks', []) existing_tasks = [] tasks_to_create = [] for child in entity['children']: - if child.entity_type.lower() == 'task': - existing_tasks.append(child['name'].lower()) - # existing_tasks.append(child['type']['name']) + if child.entity_type.lower() == "task": + task_name_low = child["name"].lower() + existing_tasks.append(task_name_low) + + for instance in instances_by_task_name[task_name_low]: + instance["ftrackTask"] = child for task_name in tasks: task_type = tasks[task_name]["type"] @@ -208,21 +388,17 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): tasks_to_create.append((task_name, task_type)) for task_name, task_type in tasks_to_create: - self.create_task( + task_entity = self.create_task( name=task_name, task_type=task_type, parent=entity ) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) + + for instance in instances_by_task_name[task_name.lower()]: + instance.data["ftrackTask"] = task_entity # Incoming links. - self.create_links(entity_data, entity) + self.create_links(project_name, entity_data, entity) try: self.session.commit() except Exception: @@ -232,32 +408,30 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): six.reraise(tp, value, tb) # Create notes. - user = self.session.query( - "User where username is \"{}\"".format(self.session.api_user) - ).first() - if user: - for comment in entity_data.get("comments", []): + entity_comments = entity_data.get("comments") + if user and entity_comments: + for comment in entity_comments: entity.create_note(comment, user) - else: - self.log.warning( - "Was not able to query current User {}".format( - self.session.api_user - ) - ) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) + + try: + self.session.commit() + except Exception: + tp, value, tb = sys.exc_info() + self.session.rollback() + self.session._configure_locations() + six.reraise(tp, value, tb) # Import children. - if 'childs' in entity_data: - self.import_to_ftrack( - entity_data['childs'], entity) + children = entity_data.get("childs") + if not children: + continue - def create_links(self, entity_data, entity): + for entity_name, entity_data in children.items(): + import_queue.append( + (entity_name, entity_data, entity) + ) + + def create_links(self, project_name, entity_data, entity): # Clear existing links. for link in entity.get("incoming_links", []): self.session.delete(link) @@ -270,9 +444,15 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): six.reraise(tp, value, tb) # Create new links. - for input in entity_data.get("inputs", []): - input_id = io.find_one({"_id": input})["data"]["ftrackId"] - assetbuild = self.session.get("AssetBuild", input_id) + for asset_id in entity_data.get("inputs", []): + asset_doc = get_asset_by_id(project_name, asset_id) + ftrack_id = None + if asset_doc: + ftrack_id = asset_doc["data"].get("ftrackId") + if not ftrack_id: + continue + + assetbuild = self.session.get("AssetBuild", ftrack_id) self.log.debug( "Creating link from {0} to {1}".format( assetbuild["name"], entity["name"] @@ -293,7 +473,37 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): return tasks + def get_task_statuses(self, project_entity): + project_schema = project_entity["project_schema"] + task_workflow_statuses = project_schema["_task_workflow"]["statuses"] + return { + status["id"]: status + for status in task_workflow_statuses + } + def create_task(self, name, task_type, parent): + filter_data = { + "task_names": name, + "task_types": task_type + } + profile = filter_profiles( + self.create_task_status_profiles, + filter_data + ) + status_id = None + if profile: + status_name = profile["status_name"] + status_name_low = status_name.lower() + for _status_id, status in self.task_statuses.items(): + if status["name"].lower() == status_name_low: + status_id = _status_id + break + + if status_id is None: + self.log.warning( + "Task status \"{}\" was not found".format(status_name) + ) + task = self.session.create('Task', { 'name': name, 'parent': parent @@ -302,6 +512,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): self.log.info(task_type) self.log.info(self.task_types) task['type'] = self.task_types[task_type] + if status_id is not None: + task["status_id"] = status_id try: self.session.commit() @@ -313,44 +525,38 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): return task - def create_entity(self, name, type, parent): - entity = self.session.create(type, { - 'name': name, - 'parent': parent - }) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) + def _get_active_assets(self, context): + """ Returns only asset dictionary. + Usually the last part of deep dictionary which + is not having any children + """ + def get_pure_hierarchy_data(input_dict): + input_dict_copy = deepcopy(input_dict) + for key in input_dict.keys(): + self.log.debug("__ key: {}".format(key)) + # check if child key is available + if input_dict[key].get("childs"): + # loop deeper + input_dict_copy[ + key]["childs"] = get_pure_hierarchy_data( + input_dict[key]["childs"]) + elif key not in active_assets: + input_dict_copy.pop(key, None) + return input_dict_copy - return entity + hierarchy_context = context.data["hierarchyContext"] - def auto_sync_off(self, project): - project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = False + active_assets = set() + # filter only the active publishing insatnces + for instance in context: + if instance.data.get("publish") is False: + continue - self.log.info("Ftrack autosync swithed off") + asset_name = instance.data.get("asset") + if asset_name: + active_assets.add(asset_name) - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) + # remove duplicity in list + self.log.debug("__ active_assets: {}".format(list(active_assets))) - def auto_sync_on(self, project): - - project["custom_attributes"][CUST_ATTR_AUTO_SYNC] = True - - self.log.info("Ftrack autosync swithed on") - - try: - self.session.commit() - except Exception: - tp, value, tb = sys.exc_info() - self.session.rollback() - self.session._configure_locations() - six.reraise(tp, value, tb) + return get_pure_hierarchy_data(hierarchy_context) diff --git a/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py b/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py index dc80bf4eb3..489f291c0f 100644 --- a/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py +++ b/openpype/modules/ftrack/plugins/publish/validate_custom_ftrack_attributes.py @@ -1,5 +1,5 @@ import pyblish.api -import openpype.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateFtrackAttributes(pyblish.api.InstancePlugin): @@ -34,7 +34,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin): """ label = "Validate Custom Ftrack Attributes" - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder families = ["ftrack"] optional = True # Ignore standalone host, because it does not have an Ftrack entity diff --git a/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py b/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py index 1a5da44432..78f9d135b7 100644 --- a/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py +++ b/openpype/modules/ftrack/python2_vendor/ftrack-python-api/source/ftrack_api/session.py @@ -13,10 +13,9 @@ import functools import itertools import distutils.version import hashlib -import tempfile +import appdirs import threading import atexit -import warnings import requests import requests.auth @@ -241,7 +240,7 @@ class Session(object): ) self._auto_connect_event_hub_thread = None - if auto_connect_event_hub in (None, True): + if auto_connect_event_hub is True: # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -252,9 +251,7 @@ class Session(object): # To help with migration from auto_connect_event_hub default changing # from True to False. - self._event_hub._deprecation_warning_auto_connect = ( - auto_connect_event_hub is None - ) + self._event_hub._deprecation_warning_auto_connect = False # Register to auto-close session on exit. atexit.register(WeakMethod(self.close)) @@ -271,8 +268,9 @@ class Session(object): # rebuilding types)? if schema_cache_path is not False: if schema_cache_path is None: + schema_cache_path = appdirs.user_cache_dir() schema_cache_path = os.environ.get( - 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir() + 'FTRACK_API_SCHEMA_CACHE_PATH', schema_cache_path ) schema_cache_path = os.path.join( diff --git a/openpype/modules/ftrack/scripts/sub_event_processor.py b/openpype/modules/ftrack/scripts/sub_event_processor.py index d1e2e3aaeb..a5ce0511b8 100644 --- a/openpype/modules/ftrack/scripts/sub_event_processor.py +++ b/openpype/modules/ftrack/scripts/sub_event_processor.py @@ -4,6 +4,8 @@ import signal import socket import datetime +import ftrack_api + from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -12,17 +14,12 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype.modules import ModulesManager -from openpype.api import Logger from openpype.lib import ( + Logger, get_openpype_version, get_build_version ) - -import ftrack_api - -log = Logger().get_logger("Event processor") - subprocess_started = datetime.datetime.now() @@ -68,6 +65,8 @@ def register(session): def main(args): + log = Logger.get_logger("Event processor") + port = int(args[-1]) # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) diff --git a/openpype/modules/ftrack/scripts/sub_event_status.py b/openpype/modules/ftrack/scripts/sub_event_status.py index 3163642e3f..dc5836e7f2 100644 --- a/openpype/modules/ftrack/scripts/sub_event_status.py +++ b/openpype/modules/ftrack/scripts/sub_event_status.py @@ -7,16 +7,19 @@ import signal import socket import datetime +import appdirs + import ftrack_api from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, StatusEventHub, TOPIC_STATUS_SERVER, - TOPIC_STATUS_SERVER_RESULT + TOPIC_STATUS_SERVER_RESULT, + get_host_ip ) -from openpype.api import Logger from openpype.lib import ( + Logger, is_current_version_studio_latest, is_running_from_build, get_expected_version, @@ -27,10 +30,10 @@ log = Logger.get_logger("Event storer") action_identifier = ( "event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"] ) -host_ip = socket.gethostbyname(socket.gethostname()) +host_ip = get_host_ip() action_data = { "label": "OpenPype Admin", - "variant": "- Event server Status ({})".format(host_ip), + "variant": "- Event server Status ({})".format(host_ip or "IP N/A"), "description": "Get Infromation about event server", "actionIdentifier": action_identifier } @@ -253,6 +256,15 @@ class StatusFactory: ) }) + items.append({ + "type": "label", + "value": ( + "Local versions dir: {}
Version repository path: {}" + ).format( + appdirs.user_data_dir("openpype", "pypeclub"), + os.environ.get("OPENPYPE_PATH") + ) + }) items.append({"type": "label", "value": "---"}) return items diff --git a/openpype/modules/ftrack/scripts/sub_event_storer.py b/openpype/modules/ftrack/scripts/sub_event_storer.py index 5543ed74e2..a7e77951af 100644 --- a/openpype/modules/ftrack/scripts/sub_event_storer.py +++ b/openpype/modules/ftrack/scripts/sub_event_storer.py @@ -6,6 +6,8 @@ import socket import pymongo import ftrack_api + +from openpype.client import OpenPypeMongoConnection from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -15,11 +17,10 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype_modules.ftrack.lib import get_ftrack_event_mongo_info from openpype.lib import ( - OpenPypeMongoConnection, + Logger, get_openpype_version, get_build_version ) -from openpype.api import Logger log = Logger.get_logger("Event storer") subprocess_started = datetime.datetime.now() @@ -67,7 +68,7 @@ def launch(event): except pymongo.errors.AutoReconnect: log.error("Mongo server \"{}\" is not responding, exiting.".format( - os.environ["AVALON_MONGO"] + os.environ["OPENPYPE_MONGO"] )) sys.exit(0) diff --git a/openpype/modules/ftrack/scripts/sub_legacy_server.py b/openpype/modules/ftrack/scripts/sub_legacy_server.py index e3a623c376..1f0fc1b369 100644 --- a/openpype/modules/ftrack/scripts/sub_legacy_server.py +++ b/openpype/modules/ftrack/scripts/sub_legacy_server.py @@ -5,11 +5,11 @@ import signal import threading import ftrack_api -from openpype.api import Logger +from openpype.lib import Logger from openpype.modules import ModulesManager from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer -log = Logger().get_logger("Event Server Legacy") +log = Logger.get_logger("Event Server Legacy") class TimerChecker(threading.Thread): diff --git a/openpype/modules/ftrack/scripts/sub_user_server.py b/openpype/modules/ftrack/scripts/sub_user_server.py index a3701a0950..930a2d51e2 100644 --- a/openpype/modules/ftrack/scripts/sub_user_server.py +++ b/openpype/modules/ftrack/scripts/sub_user_server.py @@ -2,6 +2,7 @@ import sys import signal import socket +from openpype.lib import Logger from openpype_modules.ftrack.ftrack_server.ftrack_server import FtrackServer from openpype_modules.ftrack.ftrack_server.lib import ( SocketSession, @@ -9,9 +10,7 @@ from openpype_modules.ftrack.ftrack_server.lib import ( ) from openpype.modules import ModulesManager -from openpype.api import Logger - -log = Logger().get_logger("FtrackUserServer") +log = Logger.get_logger("FtrackUserServer") def main(args): diff --git a/openpype/modules/ftrack/tray/ftrack_tray.py b/openpype/modules/ftrack/tray/ftrack_tray.py index c6201a94f6..156b3a86fd 100644 --- a/openpype/modules/ftrack/tray/ftrack_tray.py +++ b/openpype/modules/ftrack/tray/ftrack_tray.py @@ -2,24 +2,22 @@ import os import time import datetime import threading -from Qt import QtCore, QtWidgets, QtGui import ftrack_api -from ..ftrack_server.lib import check_ftrack_url -from ..ftrack_server import socket_thread -from ..lib import credentials -from ..ftrack_module import FTRACK_MODULE_DIR +from qtpy import QtCore, QtWidgets, QtGui + +from openpype import resources +from openpype.lib import Logger +from openpype_modules.ftrack import resolve_ftrack_url, FTRACK_MODULE_DIR +from openpype_modules.ftrack.ftrack_server import socket_thread +from openpype_modules.ftrack.lib import credentials from . import login_dialog -from openpype.api import Logger, resources - - -log = Logger().get_logger("FtrackModule") - class FtrackTrayWrapper: def __init__(self, module): self.module = module + self.log = Logger.get_logger(self.__class__.__name__) self.thread_action_server = None self.thread_socket_server = None @@ -48,6 +46,9 @@ class FtrackTrayWrapper: self.widget_login.activateWindow() self.widget_login.raise_() + def show_ftrack_browser(self): + QtGui.QDesktopServices.openUrl(self.module.ftrack_url) + def validate(self): validation = False cred = credentials.get_credentials() @@ -57,19 +58,19 @@ class FtrackTrayWrapper: if validation: self.widget_login.set_credentials(ft_user, ft_api_key) self.module.set_credentials_to_env(ft_user, ft_api_key) - log.info("Connected to Ftrack successfully") + self.log.info("Connected to Ftrack successfully") self.on_login_change() return validation if not validation and ft_user and ft_api_key: - log.warning( + self.log.warning( "Current Ftrack credentials are not valid. {}: {} - {}".format( str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key ) ) - log.info("Please sign in to Ftrack") + self.log.info("Please sign in to Ftrack") self.bool_logged = False self.show_login_widget() self.set_menu_visibility() @@ -99,7 +100,7 @@ class FtrackTrayWrapper: self.action_credentials.setIcon(self.icon_not_logged) self.action_credentials.setToolTip("Logged out") - log.info("Logged out of Ftrack") + self.log.info("Logged out of Ftrack") self.bool_logged = False self.set_menu_visibility() @@ -121,10 +122,6 @@ class FtrackTrayWrapper: ftrack_url = self.module.ftrack_url os.environ["FTRACK_SERVER"] = ftrack_url - parent_file_path = os.path.dirname( - os.path.dirname(os.path.realpath(__file__)) - ) - min_fail_seconds = 5 max_fail_count = 3 wait_time_after_max_fail = 10 @@ -149,17 +146,19 @@ class FtrackTrayWrapper: # Main loop while True: if not self.bool_action_server_running: - log.debug("Action server was pushed to stop.") + self.log.debug("Action server was pushed to stop.") break # Check if accessible Ftrack and Mongo url if not ftrack_accessible: - ftrack_accessible = check_ftrack_url(ftrack_url) + ftrack_accessible = resolve_ftrack_url(ftrack_url) # Run threads only if Ftrack is accessible if not ftrack_accessible: if not printed_ftrack_error: - log.warning("Can't access Ftrack {}".format(ftrack_url)) + self.log.warning( + "Can't access Ftrack {}".format(ftrack_url) + ) if self.thread_socket_server is not None: self.thread_socket_server.stop() @@ -186,7 +185,7 @@ class FtrackTrayWrapper: self.set_menu_visibility() elif failed_count == max_fail_count: - log.warning(( + self.log.warning(( "Action server failed {} times." " I'll try to run again {}s later" ).format( @@ -200,7 +199,7 @@ class FtrackTrayWrapper: failed_count = 0 # If thread failed test Ftrack and Mongo connection - elif not self.thread_socket_server.isAlive(): + elif not self.thread_socket_server.is_alive(): self.thread_socket_server.join() self.thread_socket_server = None ftrack_accessible = False @@ -238,10 +237,10 @@ class FtrackTrayWrapper: self.thread_action_server.join() self.thread_action_server = None - log.info("Ftrack action server was forced to stop") + self.log.info("Ftrack action server was forced to stop") except Exception: - log.warning( + self.log.warning( "Error has happened during Killing action server", exc_info=True ) @@ -284,6 +283,13 @@ class FtrackTrayWrapper: tray_server_menu.addAction(self.action_server_stop) self.tray_server_menu = tray_server_menu + + # Ftrack Browser + browser_open = QtWidgets.QAction("Open Ftrack...", tray_menu) + browser_open.triggered.connect(self.show_ftrack_browser) + tray_menu.addAction(browser_open) + self.browser_open = browser_open + self.bool_logged = False self.set_menu_visibility() @@ -331,7 +337,7 @@ class FtrackTrayWrapper: self.thread_timer = None except Exception as e: - log.error("During Killing Timer event server: {0}".format(e)) + self.log.error("During Killing Timer event server: {0}".format(e)) def changed_user(self): self.stop_action_server() diff --git a/openpype/modules/ftrack/tray/login_dialog.py b/openpype/modules/ftrack/tray/login_dialog.py index 05d9226ca4..f374a71178 100644 --- a/openpype/modules/ftrack/tray/login_dialog.py +++ b/openpype/modules/ftrack/tray/login_dialog.py @@ -1,10 +1,13 @@ import os + import requests +from qtpy import QtCore, QtGui, QtWidgets + from openpype import style from openpype_modules.ftrack.lib import credentials -from . import login_tools from openpype import resources -from Qt import QtCore, QtGui, QtWidgets + +from . import login_tools class CredentialsDialog(QtWidgets.QDialog): @@ -136,8 +139,7 @@ class CredentialsDialog(QtWidgets.QDialog): self.fill_ftrack_url() def fill_ftrack_url(self): - url = os.getenv("FTRACK_SERVER") - checked_url = self.check_url(url) + checked_url = self.check_url() if checked_url == self.ftsite_input.text(): return @@ -151,7 +153,7 @@ class CredentialsDialog(QtWidgets.QDialog): self.api_input.setEnabled(enabled) self.user_input.setEnabled(enabled) - if not url: + if not checked_url: self.btn_advanced.hide() self.btn_simple.hide() self.btn_ftrack_login.hide() @@ -251,13 +253,13 @@ class CredentialsDialog(QtWidgets.QDialog): ) def _on_ftrack_login_clicked(self): - url = self.check_url(self.ftsite_input.text()) + url = self.check_url() if not url: return # If there is an existing server thread running we need to stop it. if self._login_server_thread: - if self._login_server_thread.isAlive(): + if self._login_server_thread.is_alive(): self._login_server_thread.stop() self._login_server_thread.join() self._login_server_thread = None @@ -299,21 +301,21 @@ class CredentialsDialog(QtWidgets.QDialog): if is_logged is not None: self.set_is_logged(is_logged) - def check_url(self, url): - if url is not None: - url = url.strip("/ ") - - if not url: + def check_url(self): + settings_url = self._module.settings_ftrack_url + url = self._module.ftrack_url + if not settings_url: self.set_error( "Ftrack URL is not defined in settings!" ) return - if "http" not in url: - if url.endswith("ftrackapp.com"): - url = "https://" + url - else: - url = "https://{}.ftrackapp.com".format(url) + if url is None: + self.set_error( + "Specified URL does not lead to a valid Ftrack server." + ) + return + try: result = requests.get( url, diff --git a/openpype/modules/interfaces.py b/openpype/modules/interfaces.py index 13cbea690b..58b8b59931 100644 --- a/openpype/modules/interfaces.py +++ b/openpype/modules/interfaces.py @@ -1,8 +1,33 @@ -from abc import abstractmethod +from abc import ABCMeta, abstractmethod, abstractproperty + +import six from openpype import resources -from openpype.modules import OpenPypeInterface + +class _OpenPypeInterfaceMeta(ABCMeta): + """OpenPypeInterface meta class to print proper string.""" + + def __str__(self): + return "<'OpenPypeInterface.{}'>".format(self.__name__) + + def __repr__(self): + return str(self) + + +@six.add_metaclass(_OpenPypeInterfaceMeta) +class OpenPypeInterface: + """Base class of Interface that can be used as Mixin with abstract parts. + + This is way how OpenPype module or addon can tell OpenPype that contain + implementation for specific functionality. + + Child classes of OpenPypeInterface may be used as mixin in different + OpenPype modules which means they have to have implemented methods defined + in the interface. By default, interface does not have any abstract parts. + """ + + pass class IPluginPaths(OpenPypeInterface): @@ -14,21 +39,113 @@ class IPluginPaths(OpenPypeInterface): "publish": ["path/to/publish_plugins"] } """ - # TODO validation of an output + @abstractmethod def get_plugin_paths(self): pass + def _get_plugin_paths_by_type(self, plugin_type): + paths = self.get_plugin_paths() + if not paths or plugin_type not in paths: + return [] + + paths = paths[plugin_type] + if not paths: + return [] + + if not isinstance(paths, (list, tuple, set)): + paths = [paths] + return paths + + def get_create_plugin_paths(self, host_name): + """Receive create plugin paths. + + Give addons ability to add create plugin paths based on host name. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all create plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + if hasattr(self, "get_creator_plugin_paths"): + # TODO remove in 3.16 + self.log.warning(( + "DEPRECATION WARNING: Using method 'get_creator_plugin_paths'" + " which was renamed to 'get_create_plugin_paths'." + )) + return self.get_creator_plugin_paths(host_name) + return self._get_plugin_paths_by_type("create") + + def get_load_plugin_paths(self, host_name): + """Receive load plugin paths. + + Give addons ability to add load plugin paths based on host name. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all load plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + return self._get_plugin_paths_by_type("load") + + def get_publish_plugin_paths(self, host_name): + """Receive publish plugin paths. + + Give addons ability to add publish plugin paths based on host name. + + Notes: + Default implementation uses 'get_plugin_paths' and always return + all publish plugin paths. + + Args: + host_name (str): For which host are the plugins meant. + """ + + return self._get_plugin_paths_by_type("publish") + class ILaunchHookPaths(OpenPypeInterface): """Module has launch hook paths to return. + Modules don't have to inherit from this interface (changed 8.11.2022). + Module just have to have implemented 'get_launch_hook_paths' to be able to + use the advantage. + Expected result is list of paths. ["path/to/launch_hooks_dir"] + + Deprecated: + This interface is not needed since OpenPype 3.14.*. Addon just have to + implement 'get_launch_hook_paths' which can expect Application object + or nothing as argument. + + Interface class will be removed after 3.16.*. """ @abstractmethod - def get_launch_hook_paths(self): + def get_launch_hook_paths(self, app): + """Paths to directory with application launch hooks. + + Method can be also defined without arguments. + ```python + def get_launch_hook_paths(self): + return [] + ``` + + Args: + app (Application): Application object which can be used for + filtering of which launch hook paths are returned. + + Returns: + Iterable[str]: Path to directories where launch hooks can be found. + """ + pass @@ -39,6 +156,7 @@ class ITrayModule(OpenPypeInterface): The module still must be usable if is not used in tray even if would do nothing. """ + tray_initialized = False _tray_manager = None @@ -51,16 +169,19 @@ class ITrayModule(OpenPypeInterface): This is where GUIs should be loaded or tray specific parts should be prepared. """ + pass @abstractmethod def tray_menu(self, tray_menu): """Add module's action to tray menu.""" + pass @abstractmethod def tray_start(self): """Start procedure in Pype tray.""" + pass @abstractmethod @@ -69,6 +190,7 @@ class ITrayModule(OpenPypeInterface): This is place where all threads should be shut. """ + pass def execute_in_main_thread(self, callback): @@ -77,6 +199,7 @@ class ITrayModule(OpenPypeInterface): Some callbacks need to be processed on main thread (menu actions must be added on main thread or they won't get triggered etc.) """ + if not self.tray_initialized: # TODO Called without initialized tray, still main thread needed try: @@ -101,6 +224,7 @@ class ITrayModule(OpenPypeInterface): msecs (int): Duration of message visibility in miliseconds. Default is 10000 msecs, may differ by Qt version. """ + if self._tray_manager: self._tray_manager.show_tray_message(title, message, icon, msecs) @@ -136,7 +260,7 @@ class ITrayAction(ITrayModule): pass def tray_menu(self, tray_menu): - from Qt import QtWidgets + from qtpy import QtWidgets if self.admin_action: menu = self.admin_submenu(tray_menu) @@ -161,7 +285,7 @@ class ITrayAction(ITrayModule): @staticmethod def admin_submenu(tray_menu): if ITrayAction._admin_submenu is None: - from Qt import QtWidgets + from qtpy import QtWidgets admin_submenu = QtWidgets.QMenu("Admin", tray_menu) admin_submenu.menuAction().setVisible(False) @@ -193,7 +317,7 @@ class ITrayService(ITrayModule): @staticmethod def services_submenu(tray_menu): if ITrayService._services_submenu is None: - from Qt import QtWidgets + from qtpy import QtWidgets services_submenu = QtWidgets.QMenu("Services", tray_menu) services_submenu.menuAction().setVisible(False) @@ -208,7 +332,7 @@ class ITrayService(ITrayModule): @staticmethod def _load_service_icons(): - from Qt import QtGui + from qtpy import QtGui ITrayService._failed_icon = QtGui.QIcon( resources.get_resource("icons", "circle_red.png") @@ -239,7 +363,7 @@ class ITrayService(ITrayModule): return ITrayService._failed_icon def tray_menu(self, tray_menu): - from Qt import QtWidgets + from qtpy import QtWidgets action = QtWidgets.QAction( self.label, @@ -253,16 +377,19 @@ class ITrayService(ITrayModule): def set_service_running_icon(self): """Change icon of an QAction to green circle.""" + if self.menu_action: self.menu_action.setIcon(self.get_icon_running()) def set_service_failed_icon(self): """Change icon of an QAction to red circle.""" + if self.menu_action: self.menu_action.setIcon(self.get_icon_failed()) def set_service_idle_icon(self): """Change icon of an QAction to orange circle.""" + if self.menu_action: self.menu_action.setIcon(self.get_icon_idle()) @@ -276,6 +403,7 @@ class ISettingsChangeListener(OpenPypeInterface): "publish": ["path/to/publish_plugins"] } """ + @abstractmethod def on_system_settings_save( self, old_value, new_value, changes, new_value_metadata @@ -293,3 +421,24 @@ class ISettingsChangeListener(OpenPypeInterface): self, old_value, new_value, changes, project_name, new_value_metadata ): pass + + +class IHostAddon(OpenPypeInterface): + """Addon which also contain a host implementation.""" + + @abstractproperty + def host_name(self): + """Name of host which module represents.""" + + pass + + def get_workfile_extensions(self): + """Define workfile extensions for host. + + Not all hosts support workfiles thus this is optional implementation. + + Returns: + List[str]: Extensions used for workfiles with dot. + """ + + return [] diff --git a/openpype/modules/job_queue/module.py b/openpype/modules/job_queue/module.py index f1d7251e85..7075fcea14 100644 --- a/openpype/modules/job_queue/module.py +++ b/openpype/modules/job_queue/module.py @@ -43,7 +43,7 @@ import platform import click from openpype.modules import OpenPypeModule -from openpype.api import get_system_settings +from openpype.settings import get_system_settings class JobQueueModule(OpenPypeModule): diff --git a/openpype/modules/kitsu/__init__.py b/openpype/modules/kitsu/__init__.py new file mode 100644 index 0000000000..9220cb1762 --- /dev/null +++ b/openpype/modules/kitsu/__init__.py @@ -0,0 +1,9 @@ +""" Addon class definition and Settings definition must be imported here. + +If addon class or settings definition won't be here their definition won't +be found by OpenPype discovery. +""" + +from .kitsu_module import KitsuModule + +__all__ = ("KitsuModule",) diff --git a/openpype/modules/kitsu/actions/launcher_show_in_kitsu.py b/openpype/modules/kitsu/actions/launcher_show_in_kitsu.py new file mode 100644 index 0000000000..c95079e042 --- /dev/null +++ b/openpype/modules/kitsu/actions/launcher_show_in_kitsu.py @@ -0,0 +1,125 @@ +import webbrowser + +from openpype.pipeline import LauncherAction +from openpype.modules import ModulesManager +from openpype.client import get_project, get_asset_by_name + + +class ShowInKitsu(LauncherAction): + name = "showinkitsu" + label = "Show in Kitsu" + icon = "external-link-square" + color = "#e0e1e1" + order = 10 + + @staticmethod + def get_kitsu_module(): + return ModulesManager().modules_by_name.get("kitsu") + + def is_compatible(self, session): + if not session.get("AVALON_PROJECT"): + return False + + return True + + def process(self, session, **kwargs): + + # Context inputs + project_name = session["AVALON_PROJECT"] + asset_name = session.get("AVALON_ASSET", None) + task_name = session.get("AVALON_TASK", None) + + project = get_project(project_name=project_name, + fields=["data.zou_id"]) + if not project: + raise RuntimeError(f"Project {project_name} not found.") + + project_zou_id = project["data"].get("zou_id") + if not project_zou_id: + raise RuntimeError(f"Project {project_name} has no " + f"connected kitsu id.") + + asset_zou_name = None + asset_zou_id = None + asset_zou_type = 'Assets' + task_zou_id = None + zou_sub_type = ['AssetType', 'Sequence'] + if asset_name: + asset_zou_name = asset_name + asset_fields = ["data.zou.id", "data.zou.type"] + if task_name: + asset_fields.append(f"data.tasks.{task_name}.zou.id") + + asset = get_asset_by_name(project_name, + asset_name=asset_name, + fields=asset_fields) + + asset_zou_data = asset["data"].get("zou") + + if asset_zou_data: + asset_zou_type = asset_zou_data["type"] + if asset_zou_type not in zou_sub_type: + asset_zou_id = asset_zou_data["id"] + else: + asset_zou_type = asset_name + + if task_name: + task_data = asset["data"]["tasks"][task_name] + task_zou_data = task_data.get("zou", {}) + if not task_zou_data: + self.log.debug(f"No zou task data for task: {task_name}") + task_zou_id = task_zou_data["id"] + + # Define URL + url = self.get_url(project_id=project_zou_id, + asset_name=asset_zou_name, + asset_id=asset_zou_id, + asset_type=asset_zou_type, + task_id=task_zou_id) + + # Open URL in webbrowser + self.log.info(f"Opening URL: {url}") + webbrowser.open(url, + # Try in new tab + new=2) + + def get_url(self, + project_id, + asset_name=None, + asset_id=None, + asset_type=None, + task_id=None): + + shots_url = {'Shots', 'Sequence', 'Shot'} + sub_type = {'AssetType', 'Sequence'} + kitsu_module = self.get_kitsu_module() + + # Get kitsu url with /api stripped + kitsu_url = kitsu_module.server_url + if kitsu_url.endswith("/api"): + kitsu_url = kitsu_url[:-len("/api")] + + sub_url = f"/productions/{project_id}" + asset_type_url = "Shots" if asset_type in shots_url else "Assets" + + if task_id: + # Go to task page + # /productions/{project-id}/{asset_type}/tasks/{task_id} + sub_url += f"/{asset_type_url}/tasks/{task_id}" + + elif asset_id: + # Go to asset or shot page + # /productions/{project-id}/assets/{entity_id} + # /productions/{project-id}/shots/{entity_id} + sub_url += f"/{asset_type_url}/{asset_id}" + + else: + # Go to project page + # Project page must end with a view + # /productions/{project-id}/assets/ + # Add search method if is a sub_type + sub_url += f"/{asset_type_url}" + if asset_type in sub_type: + sub_url += f'?search={asset_name}' + + return f"{kitsu_url}{sub_url}" diff --git a/openpype/modules/kitsu/kitsu_module.py b/openpype/modules/kitsu/kitsu_module.py new file mode 100644 index 0000000000..b91373af20 --- /dev/null +++ b/openpype/modules/kitsu/kitsu_module.py @@ -0,0 +1,142 @@ +"""Kitsu module.""" + +import click +import os + +from openpype.modules import ( + OpenPypeModule, + IPluginPaths, + ITrayAction, +) + + +class KitsuModule(OpenPypeModule, IPluginPaths, ITrayAction): + """Kitsu module class.""" + + label = "Kitsu Connect" + name = "kitsu" + + def initialize(self, settings): + """Initialization of module.""" + module_settings = settings[self.name] + + # Enabled by settings + self.enabled = module_settings.get("enabled", False) + + # Add API URL schema + kitsu_url = module_settings["server"].strip() + if kitsu_url: + # Ensure web url + if not kitsu_url.startswith("http"): + kitsu_url = "https://" + kitsu_url + + # Check for "/api" url validity + if not kitsu_url.endswith("api"): + kitsu_url = "{}{}api".format( + kitsu_url, "" if kitsu_url.endswith("/") else "/" + ) + + self.server_url = kitsu_url + + # UI which must not be created at this time + self._dialog = None + + def tray_init(self): + """Tray init.""" + + self._create_dialog() + + def tray_start(self): + """Tray start.""" + from .utils.credentials import ( + load_credentials, + validate_credentials, + set_credentials_envs, + ) + + login, password = load_credentials() + + # Check credentials, ask them if needed + if validate_credentials(login, password): + set_credentials_envs(login, password) + else: + self.show_dialog() + + def get_global_environments(self): + """Kitsu's global environments.""" + return {"KITSU_SERVER": self.server_url} + + def _create_dialog(self): + # Don't recreate dialog if already exists + if self._dialog is not None: + return + + from .kitsu_widgets import KitsuPasswordDialog + + self._dialog = KitsuPasswordDialog() + + def show_dialog(self): + """Show dialog to log-in.""" + + # Make sure dialog is created + self._create_dialog() + + # Show dialog + self._dialog.open() + + def on_action_trigger(self): + """Implementation of abstract method for `ITrayAction`.""" + self.show_dialog() + + def get_plugin_paths(self): + """Implementation of abstract method for `IPluginPaths`.""" + current_dir = os.path.dirname(os.path.abspath(__file__)) + + return { + "publish": [os.path.join(current_dir, "plugins", "publish")], + "actions": [os.path.join(current_dir, "actions")] + } + + def cli(self, click_group): + click_group.add_command(cli_main) + + +@click.group(KitsuModule.name, help="Kitsu dynamic cli commands.") +def cli_main(): + pass + + +@cli_main.command() +@click.option("--login", envvar="KITSU_LOGIN", help="Kitsu login") +@click.option( + "--password", envvar="KITSU_PWD", help="Password for kitsu username" +) +def push_to_zou(login, password): + """Synchronize Zou database (Kitsu backend) with openpype database. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + from .utils.update_zou_with_op import sync_zou + + sync_zou(login, password) + + +@cli_main.command() +@click.option("-l", "--login", envvar="KITSU_LOGIN", help="Kitsu login") +@click.option( + "-p", "--password", envvar="KITSU_PWD", help="Password for kitsu username" +) +def sync_service(login, password): + """Synchronize openpype database from Zou sever database. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + from .utils.update_op_with_zou import sync_all_projects + from .utils.sync_service import start_listeners + + sync_all_projects(login, password) + start_listeners(login, password) diff --git a/openpype/modules/kitsu/kitsu_widgets.py b/openpype/modules/kitsu/kitsu_widgets.py new file mode 100644 index 0000000000..5ff3613583 --- /dev/null +++ b/openpype/modules/kitsu/kitsu_widgets.py @@ -0,0 +1,188 @@ +from qtpy import QtWidgets, QtCore, QtGui + +from openpype import style +from openpype.modules.kitsu.utils.credentials import ( + clear_credentials, + load_credentials, + save_credentials, + set_credentials_envs, + validate_credentials, +) +from openpype.resources import get_resource +from openpype.settings.lib import ( + get_system_settings, +) + +from openpype.widgets.password_dialog import PressHoverButton + + +class KitsuPasswordDialog(QtWidgets.QDialog): + """Kitsu login dialog.""" + + finished = QtCore.Signal(bool) + + def __init__(self, parent=None): + super(KitsuPasswordDialog, self).__init__(parent) + + self.setWindowTitle("Kitsu Credentials") + self.resize(300, 120) + + system_settings = get_system_settings() + user_login, user_pwd = load_credentials() + remembered = bool(user_login or user_pwd) + + self._final_result = None + self._connectable = bool( + system_settings["modules"].get("kitsu", {}).get("server") + ) + + # Server label + server_message = ( + system_settings["modules"]["kitsu"]["server"] + if self._connectable + else "no server url set in Studio Settings..." + ) + server_label = QtWidgets.QLabel( + f"Server: {server_message}", + self, + ) + + # Login input + login_widget = QtWidgets.QWidget(self) + + login_label = QtWidgets.QLabel("Login:", login_widget) + + login_input = QtWidgets.QLineEdit( + login_widget, + text=user_login if remembered else None, + ) + login_input.setPlaceholderText("Your Kitsu account login...") + + login_layout = QtWidgets.QHBoxLayout(login_widget) + login_layout.setContentsMargins(0, 0, 0, 0) + login_layout.addWidget(login_label) + login_layout.addWidget(login_input) + + # Password input + password_widget = QtWidgets.QWidget(self) + + password_label = QtWidgets.QLabel("Password:", password_widget) + + password_input = QtWidgets.QLineEdit( + password_widget, + text=user_pwd if remembered else None, + ) + password_input.setPlaceholderText("Your password...") + password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + show_password_icon_path = get_resource("icons", "eye.png") + show_password_icon = QtGui.QIcon(show_password_icon_path) + show_password_btn = PressHoverButton(password_widget) + show_password_btn.setObjectName("PasswordBtn") + show_password_btn.setIcon(show_password_icon) + show_password_btn.setFocusPolicy(QtCore.Qt.ClickFocus) + + password_layout = QtWidgets.QHBoxLayout(password_widget) + password_layout.setContentsMargins(0, 0, 0, 0) + password_layout.addWidget(password_label) + password_layout.addWidget(password_input) + password_layout.addWidget(show_password_btn) + + # Message label + message_label = QtWidgets.QLabel("", self) + + # Buttons + buttons_widget = QtWidgets.QWidget(self) + + remember_checkbox = QtWidgets.QCheckBox("Remember", buttons_widget) + remember_checkbox.setObjectName("RememberCheckbox") + remember_checkbox.setChecked(remembered) + + ok_btn = QtWidgets.QPushButton("Ok", buttons_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", buttons_widget) + + buttons_layout = QtWidgets.QHBoxLayout(buttons_widget) + buttons_layout.setContentsMargins(0, 0, 0, 0) + buttons_layout.addWidget(remember_checkbox) + buttons_layout.addStretch(1) + buttons_layout.addWidget(ok_btn) + buttons_layout.addWidget(cancel_btn) + + # Main layout + layout = QtWidgets.QVBoxLayout(self) + layout.addSpacing(5) + layout.addWidget(server_label, 0) + layout.addSpacing(5) + layout.addWidget(login_widget, 0) + layout.addWidget(password_widget, 0) + layout.addWidget(message_label, 0) + layout.addStretch(1) + layout.addWidget(buttons_widget, 0) + + ok_btn.clicked.connect(self._on_ok_click) + cancel_btn.clicked.connect(self._on_cancel_click) + show_password_btn.change_state.connect(self._on_show_password) + + self.login_input = login_input + self.password_input = password_input + self.remember_checkbox = remember_checkbox + self.message_label = message_label + + self.setStyleSheet(style.load_stylesheet()) + + def result(self): + return self._final_result + + def keyPressEvent(self, event): + if event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter): + self._on_ok_click() + return event.accept() + super(KitsuPasswordDialog, self).keyPressEvent(event) + + def closeEvent(self, event): + super(KitsuPasswordDialog, self).closeEvent(event) + self.finished.emit(self.result()) + + def _on_ok_click(self): + # Check if is connectable + if not self._connectable: + self.message_label.setText( + "Please set server url in Studio Settings!" + ) + return + + # Collect values + login_value = self.login_input.text() + pwd_value = self.password_input.text() + remember = self.remember_checkbox.isChecked() + + # Authenticate + if validate_credentials(login_value, pwd_value): + set_credentials_envs(login_value, pwd_value) + else: + self.message_label.setText("Authentication failed...") + return + + # Remember password cases + if remember: + save_credentials(login_value, pwd_value) + else: + # Clear local settings + clear_credentials() + + # Clear input fields + self.login_input.clear() + self.password_input.clear() + + self._final_result = True + self.close() + + def _on_show_password(self, show_password): + if show_password: + echo_mode = QtWidgets.QLineEdit.Normal + else: + echo_mode = QtWidgets.QLineEdit.Password + self.password_input.setEchoMode(echo_mode) + + def _on_cancel_click(self): + self.close() diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_credential.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_credential.py new file mode 100644 index 0000000000..b7f6f67a40 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_credential.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +import os + +import gazu +import pyblish.api + + +class CollectKitsuSession(pyblish.api.ContextPlugin): # rename log in + """Collect Kitsu session using user credentials""" + + order = pyblish.api.CollectorOrder + label = "Kitsu user session" + # families = ["kitsu"] + + def process(self, context): + + gazu.client.set_host(os.environ["KITSU_SERVER"]) + gazu.log_in(os.environ["KITSU_LOGIN"], os.environ["KITSU_PWD"]) diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py new file mode 100644 index 0000000000..c9e78b59eb --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_entities.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +import os + +import gazu +import pyblish.api + + +class CollectKitsuEntities(pyblish.api.ContextPlugin): + """Collect Kitsu entities according to the current context""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Kitsu entities" + + def process(self, context): + + asset_data = context.data["assetEntity"]["data"] + zou_asset_data = asset_data.get("zou") + if not zou_asset_data: + raise AssertionError("Zou asset data not found in OpenPype!") + self.log.debug("Collected zou asset data: {}".format(zou_asset_data)) + + zou_task_data = asset_data["tasks"][os.environ["AVALON_TASK"]].get( + "zou" + ) + if not zou_task_data: + self.log.warning("Zou task data not found in OpenPype!") + self.log.debug("Collected zou task data: {}".format(zou_task_data)) + + kitsu_project = gazu.project.get_project(zou_asset_data["project_id"]) + if not kitsu_project: + raise AssertionError("Project not found in kitsu!") + context.data["kitsu_project"] = kitsu_project + self.log.debug("Collect kitsu project: {}".format(kitsu_project)) + + entity_type = zou_asset_data["type"] + if entity_type == "Shot": + kitsu_entity = gazu.shot.get_shot(zou_asset_data["id"]) + else: + kitsu_entity = gazu.asset.get_asset(zou_asset_data["id"]) + + if not kitsu_entity: + raise AssertionError("{} not found in kitsu!".format(entity_type)) + + context.data["kitsu_entity"] = kitsu_entity + self.log.debug( + "Collect kitsu {}: {}".format(entity_type, kitsu_entity) + ) + + if zou_task_data: + kitsu_task = gazu.task.get_task(zou_task_data["id"]) + if not kitsu_task: + raise AssertionError("Task not found in kitsu!") + context.data["kitsu_task"] = kitsu_task + self.log.debug("Collect kitsu task: {}".format(kitsu_task)) + + else: + kitsu_task_type = gazu.task.get_task_type_by_name( + os.environ["AVALON_TASK"] + ) + if not kitsu_task_type: + raise AssertionError( + "Task type {} not found in Kitsu!".format( + os.environ["AVALON_TASK"] + ) + ) + + kitsu_task = gazu.task.get_task_by_name( + kitsu_entity, kitsu_task_type + ) + if not kitsu_task: + raise AssertionError("Task not found in kitsu!") + context.data["kitsu_task"] = kitsu_task + self.log.debug("Collect kitsu task: {}".format(kitsu_task)) diff --git a/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py b/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py new file mode 100644 index 0000000000..896050f7e2 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/collect_kitsu_username.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +import os +import re + +import pyblish.api + + +class CollectKitsuUsername(pyblish.api.ContextPlugin): + """Collect Kitsu username from the kitsu login""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Kitsu username" + + def process(self, context): + kitsu_login = os.environ.get('KITSU_LOGIN') + + if not kitsu_login: + return + + kitsu_username = kitsu_login.split("@")[0].replace('.', ' ') + new_username = re.sub('[^a-zA-Z]', ' ', kitsu_username).title() + + for instance in context: + # Don't override customData if it already exists + if 'customData' not in instance.data: + instance.data['customData'] = {} + + instance.data['customData']["kitsuUsername"] = new_username diff --git a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py new file mode 100644 index 0000000000..ea98e0b7cc --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_note.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +import gazu +import pyblish.api + + +class IntegrateKitsuNote(pyblish.api.ContextPlugin): + """Integrate Kitsu Note""" + + order = pyblish.api.IntegratorOrder + label = "Kitsu Note and Status" + # families = ["kitsu"] + set_status_note = False + note_status_shortname = "wfa" + + def process(self, context): + + # Get comment text body + publish_comment = context.data.get("comment") + if not publish_comment: + self.log.info("Comment is not set.") + + self.log.debug("Comment is `{}`".format(publish_comment)) + + # Get note status, by default uses the task status for the note + # if it is not specified in the configuration + note_status = context.data["kitsu_task"]["task_status_id"] + if self.set_status_note: + kitsu_status = gazu.task.get_task_status_by_short_name( + self.note_status_shortname + ) + if kitsu_status: + note_status = kitsu_status + self.log.info("Note Kitsu status: {}".format(note_status)) + else: + self.log.info( + "Cannot find {} status. The status will not be " + "changed!".format(self.note_status_shortname) + ) + + # Add comment to kitsu task + self.log.debug( + "Add new note in taks id {}".format( + context.data["kitsu_task"]["id"] + ) + ) + kitsu_comment = gazu.task.add_comment( + context.data["kitsu_task"], note_status, comment=publish_comment + ) + + context.data["kitsu_comment"] = kitsu_comment diff --git a/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py new file mode 100644 index 0000000000..e5e6439439 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/integrate_kitsu_review.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +import gazu +import pyblish.api + + +class IntegrateKitsuReview(pyblish.api.InstancePlugin): + """Integrate Kitsu Review""" + + order = pyblish.api.IntegratorOrder + 0.01 + label = "Kitsu Review" + # families = ["kitsu"] + optional = True + + def process(self, instance): + + context = instance.context + task = context.data["kitsu_task"] + comment = context.data.get("kitsu_comment") + + # Check comment has been created + if not comment: + self.log.debug( + "Comment not created, review not pushed to preview." + ) + return + + # Add review representations as preview of comment + for representation in instance.data.get("representations", []): + # Skip if not tagged as review + if "review" not in representation.get("tags", []): + continue + + review_path = representation.get("published_path") + self.log.debug("Found review at: {}".format(review_path)) + + gazu.task.add_preview( + task, comment, review_path, normalize_movie=True + ) + self.log.info("Review upload on comment") diff --git a/openpype/modules/kitsu/plugins/publish/other_kitsu_log_out.py b/openpype/modules/kitsu/plugins/publish/other_kitsu_log_out.py new file mode 100644 index 0000000000..c4a5b390e0 --- /dev/null +++ b/openpype/modules/kitsu/plugins/publish/other_kitsu_log_out.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +import gazu +import pyblish.api + + +class KitsuLogOut(pyblish.api.ContextPlugin): + """ + Log out from Kitsu API + """ + + order = pyblish.api.IntegratorOrder + 10 + label = "Kitsu Log Out" + + def process(self, context): + gazu.log_out() diff --git a/openpype/modules/kitsu/utils/__init__.py b/openpype/modules/kitsu/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/modules/kitsu/utils/credentials.py b/openpype/modules/kitsu/utils/credentials.py new file mode 100644 index 0000000000..adcfb07cd5 --- /dev/null +++ b/openpype/modules/kitsu/utils/credentials.py @@ -0,0 +1,107 @@ +"""Kitsu credentials functions.""" + +import os +from typing import Tuple +import gazu + +from openpype.lib.local_settings import OpenPypeSecureRegistry +from openpype.lib import emit_event + + +def validate_credentials( + login: str, password: str, kitsu_url: str = None +) -> bool: + """Validate credentials by trying to connect to Kitsu host URL. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + kitsu_url (str, optional): Kitsu host URL. Defaults to None. + + Returns: + bool: Are credentials valid? + """ + if kitsu_url is None: + kitsu_url = os.environ.get("KITSU_SERVER") + + # Connect to server + validate_host(kitsu_url) + + # Authenticate + try: + gazu.log_in(login, password) + except gazu.exception.AuthFailedException: + return False + + emit_event("kitsu.user.logged", data={"username": login}, source="kitsu") + + return True + + +def validate_host(kitsu_url: str) -> bool: + """Validate credentials by trying to connect to Kitsu host URL. + + Args: + kitsu_url (str, optional): Kitsu host URL. + + Returns: + bool: Is host valid? + """ + # Connect to server + gazu.set_host(kitsu_url) + + # Test host + if gazu.client.host_is_valid(): + return True + else: + raise gazu.exception.HostException(f"Host '{kitsu_url}' is invalid.") + + +def clear_credentials(): + """Clear credentials in Secure Registry.""" + # Get user registry + user_registry = OpenPypeSecureRegistry("kitsu_user") + + # Set local settings + user_registry.delete_item("login") + user_registry.delete_item("password") + + +def save_credentials(login: str, password: str): + """Save credentials in Secure Registry. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + # Get user registry + user_registry = OpenPypeSecureRegistry("kitsu_user") + + # Set local settings + user_registry.set_item("login", login) + user_registry.set_item("password", password) + + +def load_credentials() -> Tuple[str, str]: + """Load registered credentials. + + Returns: + Tuple[str, str]: (Login, Password) + """ + # Get user registry + user_registry = OpenPypeSecureRegistry("kitsu_user") + + return user_registry.get_item("login", None), user_registry.get_item( + "password", None + ) + + +def set_credentials_envs(login: str, password: str): + """Set environment variables with Kitsu login and password. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + os.environ["KITSU_LOGIN"] = login + os.environ["KITSU_PWD"] = password diff --git a/openpype/modules/kitsu/utils/sync_service.py b/openpype/modules/kitsu/utils/sync_service.py new file mode 100644 index 0000000000..237746bea0 --- /dev/null +++ b/openpype/modules/kitsu/utils/sync_service.py @@ -0,0 +1,407 @@ +import os +import threading + +import gazu + +from openpype.client import get_project, get_assets, get_asset_by_name +from openpype.pipeline import AvalonMongoDB +from .credentials import validate_credentials +from .update_op_with_zou import ( + create_op_asset, + set_op_project, + get_kitsu_project_name, + write_project_to_op, + update_op_assets, +) + + +class Listener: + """Host Kitsu listener.""" + + def __init__(self, login, password): + """Create client and add listeners to events without starting it. + + Run `listener.start()` to actually start the service. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + + Raises: + AuthFailedException: Wrong user login and/or password + """ + self.dbcon = AvalonMongoDB() + self.dbcon.install() + + gazu.client.set_host(os.environ["KITSU_SERVER"]) + + # Authenticate + if not validate_credentials(login, password): + raise gazu.exception.AuthFailedException( + f"Kitsu authentication failed for login: '{login}'..." + ) + + gazu.set_event_host( + os.environ["KITSU_SERVER"].replace("api", "socket.io") + ) + self.event_client = gazu.events.init() + + gazu.events.add_listener( + self.event_client, "project:new", self._new_project + ) + gazu.events.add_listener( + self.event_client, "project:update", self._update_project + ) + gazu.events.add_listener( + self.event_client, "project:delete", self._delete_project + ) + + gazu.events.add_listener( + self.event_client, "asset:new", self._new_asset + ) + gazu.events.add_listener( + self.event_client, "asset:update", self._update_asset + ) + gazu.events.add_listener( + self.event_client, "asset:delete", self._delete_asset + ) + + gazu.events.add_listener( + self.event_client, "episode:new", self._new_episode + ) + gazu.events.add_listener( + self.event_client, "episode:update", self._update_episode + ) + gazu.events.add_listener( + self.event_client, "episode:delete", self._delete_episode + ) + + gazu.events.add_listener( + self.event_client, "sequence:new", self._new_sequence + ) + gazu.events.add_listener( + self.event_client, "sequence:update", self._update_sequence + ) + gazu.events.add_listener( + self.event_client, "sequence:delete", self._delete_sequence + ) + + gazu.events.add_listener(self.event_client, "shot:new", self._new_shot) + gazu.events.add_listener( + self.event_client, "shot:update", self._update_shot + ) + gazu.events.add_listener( + self.event_client, "shot:delete", self._delete_shot + ) + + gazu.events.add_listener(self.event_client, "task:new", self._new_task) + gazu.events.add_listener( + self.event_client, "task:update", self._update_task + ) + gazu.events.add_listener( + self.event_client, "task:delete", self._delete_task + ) + + def start(self): + gazu.events.run_client(self.event_client) + + # == Project == + def _new_project(self, data): + """Create new project into OP DB.""" + + # Use update process to avoid duplicating code + self._update_project(data) + + def _update_project(self, data): + """Update project into OP DB.""" + # Get project entity + project = gazu.project.get_project(data["project_id"]) + project_name = project["name"] + + update_project = write_project_to_op(project, self.dbcon) + + # Write into DB + if update_project: + self.dbcon.Session["AVALON_PROJECT"] = project_name + self.dbcon.bulk_write([update_project]) + + def _delete_project(self, data): + """Delete project.""" + + project_name = get_kitsu_project_name(data["project_id"]) + + # Delete project collection + self.dbcon.database[project_name].drop() + + # == Asset == + + def _new_asset(self, data): + """Create new asset into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + asset = gazu.asset.get_asset(data["asset_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(asset)) + + # Update + self._update_asset(data) + + def _update_asset(self, data): + """Update asset into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + asset = gazu.asset.get_asset(data["asset_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[asset["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [asset], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_asset(self, data): + """Delete asset of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["asset_id"]} + ) + + # == Episode == + def _new_episode(self, data): + """Create new episode into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + episode = gazu.shot.get_episode(data["episode_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(episode)) + + # Update + self._update_episode(data) + + def _update_episode(self, data): + """Update episode into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + episode = gazu.shot.get_episode(data["episode_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[episode["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [episode], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_episode(self, data): + """Delete shot of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + print("delete episode") # TODO check bugfix + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["episode_id"]} + ) + + # == Sequence == + def _new_sequence(self, data): + """Create new sequnce into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + sequence = gazu.shot.get_sequence(data["sequence_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(sequence)) + + # Update + self._update_sequence(data) + + def _update_sequence(self, data): + """Update sequence into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + sequence = gazu.shot.get_sequence(data["sequence_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[sequence["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [sequence], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_sequence(self, data): + """Delete sequence of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + print("delete sequence") # TODO check bugfix + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["sequence_id"]} + ) + + # == Shot == + def _new_shot(self, data): + """Create new shot into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + + # Get gazu entity + shot = gazu.shot.get_shot(data["shot_id"]) + + # Insert doc in DB + self.dbcon.insert_one(create_op_asset(shot)) + + # Update + self._update_shot(data) + + def _update_shot(self, data): + """Update shot into OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name) + + # Get gazu entity + shot = gazu.shot.get_shot(data["shot_id"]) + + # Find asset doc + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[shot["project_id"]] = project_doc + + # Update + update_op_result = update_op_assets( + self.dbcon, project_doc, [shot], zou_ids_and_asset_docs + ) + if update_op_result: + asset_doc_id, asset_update = update_op_result[0] + self.dbcon.update_one({"_id": asset_doc_id}, asset_update) + + def _delete_shot(self, data): + """Delete shot of OP DB.""" + set_op_project(self.dbcon, data["project_id"]) + + # Delete + self.dbcon.delete_one( + {"type": "asset", "data.zou.id": data["shot_id"]} + ) + + # == Task == + def _new_task(self, data): + """Create new task into OP DB.""" + # Get project entity + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + + # Get gazu entity + task = gazu.task.get_task(data["task_id"]) + + # Find asset doc + parent_name = task["entity"]["name"] + + asset_doc = get_asset_by_name(project_name, parent_name) + + # Update asset tasks with new one + asset_tasks = asset_doc["data"].get("tasks") + task_type_name = task["task_type"]["name"] + asset_tasks[task_type_name] = {"type": task_type_name, "zou": task} + self.dbcon.update_one( + {"_id": asset_doc["_id"]}, {"$set": {"data.tasks": asset_tasks}} + ) + + def _update_task(self, data): + """Update task into OP DB.""" + # TODO is it necessary? + pass + + def _delete_task(self, data): + """Delete task of OP DB.""" + + set_op_project(self.dbcon, data["project_id"]) + project_name = self.dbcon.active_project() + # Find asset doc + asset_docs = list(get_assets(project_name)) + for doc in asset_docs: + # Match task + for name, task in doc["data"]["tasks"].items(): + if task.get("zou") and data["task_id"] == task["zou"]["id"]: + # Pop task + asset_tasks = doc["data"].get("tasks", {}) + asset_tasks.pop(name) + + # Delete task in DB + self.dbcon.update_one( + {"_id": doc["_id"]}, + {"$set": {"data.tasks": asset_tasks}}, + ) + return + + +def start_listeners(login: str, password: str): + """Start listeners to keep OpenPype up-to-date with Kitsu. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + """ + # Refresh token every week + def refresh_token_every_week(): + print("Refreshing token...") + gazu.refresh_token() + threading.Timer(7 * 3600 * 24, refresh_token_every_week).start() + + refresh_token_every_week() + + # Connect to server + listener = Listener(login, password) + listener.start() diff --git a/openpype/modules/kitsu/utils/update_op_with_zou.py b/openpype/modules/kitsu/utils/update_op_with_zou.py new file mode 100644 index 0000000000..2d14b38bc4 --- /dev/null +++ b/openpype/modules/kitsu/utils/update_op_with_zou.py @@ -0,0 +1,445 @@ +"""Functions to update OpenPype data using Kitsu DB (a.k.a Zou).""" +from copy import deepcopy +import re +from typing import Dict, List + +from pymongo import DeleteOne, UpdateOne +import gazu +from gazu.task import ( + all_tasks_for_asset, + all_tasks_for_shot, +) + +from openpype.client import ( + get_project, + get_assets, + get_asset_by_id, + get_asset_by_name, + create_project, +) +from openpype.pipeline import AvalonMongoDB +from openpype.settings import get_project_settings +from openpype.modules.kitsu.utils.credentials import validate_credentials + +from openpype.lib import Logger + +log = Logger.get_logger(__name__) + +# Accepted namin pattern for OP +naming_pattern = re.compile("^[a-zA-Z0-9_.]*$") + + +def create_op_asset(gazu_entity: dict) -> dict: + """Create OP asset dict from gazu entity. + + :param gazu_entity: + """ + return { + "name": gazu_entity["name"], + "type": "asset", + "schema": "openpype:asset-3.0", + "data": {"zou": gazu_entity, "tasks": {}}, + } + + +def get_kitsu_project_name(project_id: str) -> str: + """Get project name based on project id in kitsu. + + Args: + project_id (str): UUID of project in Kitsu. + + Returns: + str: Name of Kitsu project. + """ + + project = gazu.project.get_project(project_id) + return project["name"] + + +def set_op_project(dbcon: AvalonMongoDB, project_id: str): + """Set project context. + + Args: + dbcon (AvalonMongoDB): Connection to DB + project_id (str): Project zou ID + """ + + dbcon.Session["AVALON_PROJECT"] = get_kitsu_project_name(project_id) + + +def update_op_assets( + dbcon: AvalonMongoDB, + project_doc: dict, + entities_list: List[dict], + asset_doc_ids: Dict[str, dict], +) -> List[Dict[str, dict]]: + """Update OpenPype assets. + Set 'data' and 'parent' fields. + + Args: + dbcon (AvalonMongoDB): Connection to DB + entities_list (List[dict]): List of zou entities to update + asset_doc_ids (Dict[str, dict]): Dicts of [{zou_id: asset_doc}, ...] + + Returns: + List[Dict[str, dict]]: List of (doc_id, update_dict) tuples + """ + project_name = project_doc["name"] + project_module_settings = get_project_settings(project_name)["kitsu"] + + assets_with_update = [] + for item in entities_list: + # Check asset exists + item_doc = asset_doc_ids.get(item["id"]) + if not item_doc: # Create asset + op_asset = create_op_asset(item) + insert_result = dbcon.insert_one(op_asset) + item_doc = get_asset_by_id(project_name, insert_result.inserted_id) + + # Update asset + item_data = deepcopy(item_doc["data"]) + item_data.update(item.get("data") or {}) + item_data["zou"] = item + + # == Asset settings == + # Frame in, fallback to project's value or default value (1001) + # TODO: get default from settings/project_anatomy/attributes.json + try: + frame_in = int( + item_data.pop( + "frame_in", project_doc["data"].get("frameStart") + ) + ) + except (TypeError, ValueError): + frame_in = 1001 + item_data["frameStart"] = frame_in + # Frames duration, fallback on 0 + try: + # NOTE nb_frames is stored directly in item + # because of zou's legacy design + frames_duration = int(item.get("nb_frames", 0)) + except (TypeError, ValueError): + frames_duration = 0 + # Frame out, fallback on frame_in + duration or project's value or 1001 + frame_out = item_data.pop("frame_out", None) + if not frame_out: + frame_out = frame_in + frames_duration + try: + frame_out = int(frame_out) + except (TypeError, ValueError): + frame_out = 1001 + item_data["frameEnd"] = frame_out + # Fps, fallback to project's value or default value (25.0) + try: + fps = float(item_data.get("fps", project_doc["data"].get("fps"))) + except (TypeError, ValueError): + fps = 25.0 + item_data["fps"] = fps + + # Tasks + tasks_list = [] + item_type = item["type"] + if item_type == "Asset": + tasks_list = all_tasks_for_asset(item) + elif item_type == "Shot": + tasks_list = all_tasks_for_shot(item) + item_data["tasks"] = { + t["task_type_name"]: {"type": t["task_type_name"], "zou": t} + for t in tasks_list + } + + # Get zou parent id for correct hierarchy + # Use parent substitutes if existing + substitute_parent_item = ( + item_data["parent_substitutes"][0] + if item_data.get("parent_substitutes") + else None + ) + if substitute_parent_item: + parent_zou_id = substitute_parent_item["parent_id"] + else: + parent_zou_id = ( + # For Asset, put under asset type directory + item.get("entity_type_id") + if item_type == "Asset" + else None + # Else, fallback on usual hierarchy + or item.get("parent_id") + or item.get("episode_id") + or item.get("source_id") + ) + + # Substitute item type for general classification (assets or shots) + if item_type in ["Asset", "AssetType"]: + entity_root_asset_name = "Assets" + elif item_type in ["Episode", "Sequence", "Shot"]: + entity_root_asset_name = "Shots" + + # Root parent folder if exist + visual_parent_doc_id = ( + asset_doc_ids[parent_zou_id]["_id"] if parent_zou_id else None + ) + if visual_parent_doc_id is None: + # Find root folder doc ("Assets" or "Shots") + root_folder_doc = get_asset_by_name( + project_name, + asset_name=entity_root_asset_name, + fields=["_id", "data.root_of"], + ) + + if root_folder_doc: + visual_parent_doc_id = root_folder_doc["_id"] + + # Visual parent for hierarchy + item_data["visualParent"] = visual_parent_doc_id + + # Add parents for hierarchy + item_data["parents"] = [] + ancestor_id = parent_zou_id + while ancestor_id is not None: + parent_doc = asset_doc_ids[ancestor_id] + item_data["parents"].insert(0, parent_doc["name"]) + + # Get parent entity + parent_entity = parent_doc["data"]["zou"] + ancestor_id = parent_entity.get("parent_id") + + # Build OpenPype compatible name + if item_type in ["Shot", "Sequence"] and parent_zou_id is not None: + # Name with parents hierarchy "({episode}_){sequence}_{shot}" + # to avoid duplicate name issue + item_name = f"{item_data['parents'][-1]}_{item['name']}" + + # Update doc name + asset_doc_ids[item["id"]]["name"] = item_name + else: + item_name = item["name"] + + # Set root folders parents + item_data["parents"] = [entity_root_asset_name] + item_data["parents"] + + # Update 'data' different in zou DB + updated_data = { + k: v for k, v in item_data.items() if item_doc["data"].get(k) != v + } + if updated_data or not item_doc.get("parent"): + assets_with_update.append( + ( + item_doc["_id"], + { + "$set": { + "name": item_name, + "data": item_data, + "parent": project_doc["_id"], + } + }, + ) + ) + return assets_with_update + + +def write_project_to_op(project: dict, dbcon: AvalonMongoDB) -> UpdateOne: + """Write gazu project to OP database. + Create project if doesn't exist. + + Args: + project (dict): Gazu project + dbcon (AvalonMongoDB): DB to create project in + + Returns: + UpdateOne: Update instance for the project + """ + project_name = project["name"] + project_doc = get_project(project_name) + if not project_doc: + log.info(f"Creating project '{project_name}'") + project_doc = create_project(project_name, project_name) + + # Project data and tasks + project_data = project_doc["data"] or {} + + # Build project code and update Kitsu + project_code = project.get("code") + if not project_code: + project_code = project["name"].replace(" ", "_").lower() + project["code"] = project_code + + # Update Zou + gazu.project.update_project(project) + + # Update data + project_data.update( + { + "code": project_code, + "fps": float(project["fps"]), + "zou_id": project["id"], + } + ) + + match_res = re.match(r"(\d+)x(\d+)", project["resolution"]) + if match_res: + project_data["resolutionWidth"] = int(match_res.group(1)) + project_data["resolutionHeight"] = int(match_res.group(2)) + else: + log.warning( + f"'{project['resolution']}' does not match the expected" + " format for the resolution, for example: 1920x1080" + ) + + return UpdateOne( + {"_id": project_doc["_id"]}, + { + "$set": { + "config.tasks": { + t["name"]: {"short_name": t.get("short_name", t["name"])} + for t in gazu.task.all_task_types_for_project(project) + or gazu.task.all_task_types() + }, + "data": project_data, + } + }, + ) + + +def sync_all_projects(login: str, password: str, ignore_projects: list = None): + """Update all OP projects in DB with Zou data. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + ignore_projects (list): List of unsynced project names + Raises: + gazu.exception.AuthFailedException: Wrong user login and/or password + """ + + # Authenticate + if not validate_credentials(login, password): + raise gazu.exception.AuthFailedException( + f"Kitsu authentication failed for login: '{login}'..." + ) + + # Iterate projects + dbcon = AvalonMongoDB() + dbcon.install() + all_projects = gazu.project.all_open_projects() + for project in all_projects: + if ignore_projects and project["name"] in ignore_projects: + continue + sync_project_from_kitsu(dbcon, project) + + +def sync_project_from_kitsu(dbcon: AvalonMongoDB, project: dict): + """Update OP project in DB with Zou data. + + `root_of` is meant to sort entities by type for a better readability in + the data tree. It puts all shot like (Shot and Episode and Sequence) and + asset entities under two different root folders or hierarchy, defined in + settings. + + Args: + dbcon (AvalonMongoDB): MongoDB connection + project (dict): Project dict got using gazu. + """ + bulk_writes = [] + + # Get project from zou + if not project: + project = gazu.project.get_project_by_name(project["name"]) + + log.info(f"Synchronizing {project['name']}...") + + # Get all assets from zou + all_assets = gazu.asset.all_assets_for_project(project) + all_asset_types = gazu.asset.all_asset_types_for_project(project) + all_episodes = gazu.shot.all_episodes_for_project(project) + all_seqs = gazu.shot.all_sequences_for_project(project) + all_shots = gazu.shot.all_shots_for_project(project) + all_entities = [ + item + for item in all_assets + + all_asset_types + + all_episodes + + all_seqs + + all_shots + if naming_pattern.match(item["name"]) + ] + + # Sync project. Create if doesn't exist + bulk_writes.append(write_project_to_op(project, dbcon)) + + # Try to find project document + project_name = project["name"] + dbcon.Session["AVALON_PROJECT"] = project_name + project_doc = get_project(project_name) + + # Query all assets of the local project + zou_ids_and_asset_docs = { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou", {}).get("id") + } + zou_ids_and_asset_docs[project["id"]] = project_doc + + # Create entities root folders + to_insert = [ + { + "name": r, + "type": "asset", + "schema": "openpype:asset-3.0", + "data": { + "root_of": r, + "tasks": {}, + }, + } + for r in ["Assets", "Shots"] + if not get_asset_by_name( + project_name, r, fields=["_id", "data.root_of"] + ) + ] + + # Create + to_insert.extend( + [ + create_op_asset(item) + for item in all_entities + if item["id"] not in zou_ids_and_asset_docs.keys() + ] + ) + if to_insert: + # Insert doc in DB + dbcon.insert_many(to_insert) + + # Update existing docs + zou_ids_and_asset_docs.update( + { + asset_doc["data"]["zou"]["id"]: asset_doc + for asset_doc in get_assets(project_name) + if asset_doc["data"].get("zou") + } + ) + + # Update + bulk_writes.extend( + [ + UpdateOne({"_id": id}, update) + for id, update in update_op_assets( + dbcon, project_doc, all_entities, zou_ids_and_asset_docs + ) + ] + ) + + # Delete + diff_assets = set(zou_ids_and_asset_docs.keys()) - { + e["id"] for e in all_entities + [project] + } + if diff_assets: + bulk_writes.extend( + [ + DeleteOne(zou_ids_and_asset_docs[asset_id]) + for asset_id in diff_assets + ] + ) + + # Write into DB + if bulk_writes: + dbcon.bulk_write(bulk_writes) diff --git a/openpype/modules/kitsu/utils/update_zou_with_op.py b/openpype/modules/kitsu/utils/update_zou_with_op.py new file mode 100644 index 0000000000..39baf31b93 --- /dev/null +++ b/openpype/modules/kitsu/utils/update_zou_with_op.py @@ -0,0 +1,265 @@ +"""Functions to update Kitsu DB (a.k.a Zou) using OpenPype Data.""" + +import re +from typing import List + +import gazu +from pymongo import UpdateOne + +from openpype.client import ( + get_projects, + get_project, + get_assets, +) +from openpype.pipeline import AvalonMongoDB +from openpype.settings import get_project_settings +from openpype.modules.kitsu.utils.credentials import validate_credentials + + +def sync_zou(login: str, password: str): + """Synchronize Zou database (Kitsu backend) with openpype database. + This is an utility function to help updating zou data with OP's, it may not + handle correctly all cases, a human intervention might + be required after all. + Will work better if OP DB has been previously synchronized from zou/kitsu. + + Args: + login (str): Kitsu user login + password (str): Kitsu user password + + Raises: + gazu.exception.AuthFailedException: Wrong user login and/or password + """ + + # Authenticate + if not validate_credentials(login, password): + raise gazu.exception.AuthFailedException( + f"Kitsu authentication failed for login: '{login}'..." + ) + + # Iterate projects + dbcon = AvalonMongoDB() + dbcon.install() + + op_projects = list(get_projects()) + for project_doc in op_projects: + sync_zou_from_op_project(project_doc["name"], dbcon, project_doc) + + +def sync_zou_from_op_project( + project_name: str, dbcon: AvalonMongoDB, project_doc: dict = None +) -> List[UpdateOne]: + """Update OP project in DB with Zou data. + + Args: + project_name (str): Name of project to sync + dbcon (AvalonMongoDB): MongoDB connection + project_doc (str, optional): Project doc to sync + """ + # Get project doc if not provided + if not project_doc: + project_doc = get_project(project_name) + + # Get all entities from zou + print(f"Synchronizing {project_name}...") + zou_project = gazu.project.get_project_by_name(project_name) + + # Create project + if zou_project is None: + raise RuntimeError( + f"Project '{project_name}' doesn't exist in Zou database, " + "please create it in Kitsu and add OpenPype user to it before " + "running synchronization." + ) + + # Update project settings and data + if project_doc["data"]: + zou_project.update( + { + "code": project_doc["data"]["code"], + "fps": project_doc["data"]["fps"], + "resolution": f"{project_doc['data']['resolutionWidth']}" + f"x{project_doc['data']['resolutionHeight']}", + } + ) + gazu.project.update_project_data(zou_project, data=project_doc["data"]) + gazu.project.update_project(zou_project) + + asset_types = gazu.asset.all_asset_types() + all_assets = gazu.asset.all_assets_for_project(zou_project) + all_episodes = gazu.shot.all_episodes_for_project(zou_project) + all_seqs = gazu.shot.all_sequences_for_project(zou_project) + all_shots = gazu.shot.all_shots_for_project(zou_project) + all_entities_ids = { + e["id"] for e in all_episodes + all_seqs + all_shots + all_assets + } + + # Query all assets of the local project + project_module_settings = get_project_settings(project_name)["kitsu"] + dbcon.Session["AVALON_PROJECT"] = project_name + asset_docs = { + asset_doc["_id"]: asset_doc + for asset_doc in get_assets(project_name) + } + + # Create new assets + new_assets_docs = [ + doc + for doc in asset_docs.values() + if doc["data"].get("zou", {}).get("id") not in all_entities_ids + ] + naming_pattern = project_module_settings["entities_naming_pattern"] + regex_ep = re.compile( + r"(.*{}.*)|(.*{}.*)|(.*{}.*)".format( + naming_pattern["shot"].replace("#", ""), + naming_pattern["sequence"].replace("#", ""), + naming_pattern["episode"].replace("#", ""), + ), + re.IGNORECASE, + ) + bulk_writes = [] + for doc in new_assets_docs: + visual_parent_id = doc["data"]["visualParent"] + parent_substitutes = [] + + # Match asset type by it's name + match = regex_ep.match(doc["name"]) + if not match: # Asset + new_entity = gazu.asset.new_asset( + zou_project, asset_types[0], doc["name"] + ) + # Match case in shot -" - " {{ {loggerName} }}: [" + " {{ {logger_name} }}: [" " {message}" " ]" ) @@ -299,7 +300,7 @@ class OutputWidget(QtWidgets.QWidget): elif level == "warning": line_f = ( "*** WRN:" - " >>> {{ {loggerName} }}: [" + " >>> {{ {logger_name} }}: [" " {message}" " ]" ) @@ -307,16 +308,25 @@ class OutputWidget(QtWidgets.QWidget): line_f = ( "!!! ERR:" " {timestamp}" - " >>> {{ {loggerName} }}: [" + " >>> {{ {logger_name} }}: [" " {message}" " ]" ) + logger_name = log["loggerName"] + timestamp = "" + if not show_timecode: + timestamp = log["timestamp"] + message = log["message"] exc = log.get("exception") if exc: - log["message"] = exc["message"] + message = exc["message"] - line = line_f.format(**log) + line = line_f.format( + message=html.escape(message), + logger_name=logger_name, + timestamp=timestamp + ) if show_timecode: timestamp = log["timestamp"] diff --git a/openpype/modules/muster/muster.py b/openpype/modules/muster/muster.py index 6e26ad2d7b..77b9214a5a 100644 --- a/openpype/modules/muster/muster.py +++ b/openpype/modules/muster/muster.py @@ -2,8 +2,7 @@ import os import json import appdirs import requests -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule +from openpype.modules import OpenPypeModule, ITrayModule class MusterModule(OpenPypeModule, ITrayModule): @@ -54,7 +53,7 @@ class MusterModule(OpenPypeModule, ITrayModule): # Definition of Tray menu def tray_menu(self, parent): """Add **change credentials** option to tray menu.""" - from Qt import QtWidgets + from qtpy import QtWidgets # Menu for Tray App menu = QtWidgets.QMenu('Muster', parent) diff --git a/openpype/modules/muster/widget_login.py b/openpype/modules/muster/widget_login.py index ae838c6cea..f38f43fb7f 100644 --- a/openpype/modules/muster/widget_login.py +++ b/openpype/modules/muster/widget_login.py @@ -1,5 +1,4 @@ -import os -from Qt import QtCore, QtGui, QtWidgets +from qtpy import QtCore, QtGui, QtWidgets from openpype import resources, style diff --git a/openpype/modules/project_manager_action.py b/openpype/modules/project_manager_action.py index 251964a059..5f74dd9ee5 100644 --- a/openpype/modules/project_manager_action.py +++ b/openpype/modules/project_manager_action.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class ProjectManagerAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/python_console_interpreter/module.py b/openpype/modules/python_console_interpreter/module.py index 8c4a2fba73..cb99c05e37 100644 --- a/openpype/modules/python_console_interpreter/module.py +++ b/openpype/modules/python_console_interpreter/module.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class PythonInterpreterAction(OpenPypeModule, ITrayAction): diff --git a/openpype/modules/python_console_interpreter/window/widgets.py b/openpype/modules/python_console_interpreter/window/widgets.py index ecf41eaf3e..b670352f44 100644 --- a/openpype/modules/python_console_interpreter/window/widgets.py +++ b/openpype/modules/python_console_interpreter/window/widgets.py @@ -5,7 +5,7 @@ import collections from code import InteractiveInterpreter import appdirs -from Qt import QtCore, QtWidgets, QtGui +from qtpy import QtCore, QtWidgets, QtGui from openpype import resources from openpype.style import load_stylesheet @@ -389,7 +389,8 @@ class PythonInterpreterWidget(QtWidgets.QWidget): self._append_lines([openpype_art]) - self.setStyleSheet(load_stylesheet()) + self._first_show = True + self._splitter_size_ratio = None self._init_from_registry() @@ -416,9 +417,9 @@ class PythonInterpreterWidget(QtWidgets.QWidget): self.resize(width, height) try: - sizes = setting_registry.get_item("splitter_sizes") - if len(sizes) == len(self._widgets_splitter.sizes()): - self._widgets_splitter.setSizes(sizes) + self._splitter_size_ratio = ( + setting_registry.get_item("splitter_sizes") + ) except ValueError: pass @@ -627,8 +628,29 @@ class PythonInterpreterWidget(QtWidgets.QWidget): def showEvent(self, event): self._line_check_timer.start() super(PythonInterpreterWidget, self).showEvent(event) + # First show setup + if self._first_show: + self._first_show = False + self._on_first_show() + self._output_widget.scroll_to_bottom() + def _on_first_show(self): + # Change stylesheet + self.setStyleSheet(load_stylesheet()) + # Check if splitter size ratio is set + # - first store value to local variable and then unset it + splitter_size_ratio = self._splitter_size_ratio + self._splitter_size_ratio = None + # Skip if is not set + if not splitter_size_ratio: + return + + # Skip if number of size items does not match to splitter + splitters_count = len(self._widgets_splitter.sizes()) + if len(splitter_size_ratio) == splitters_count: + self._widgets_splitter.setSizes(splitter_size_ratio) + def closeEvent(self, event): self.save_registry() super(PythonInterpreterWidget, self).closeEvent(event) diff --git a/openpype/modules/royalrender/api.py b/openpype/modules/royalrender/api.py index ed9e71f240..de1dba8724 100644 --- a/openpype/modules/royalrender/api.py +++ b/openpype/modules/royalrender/api.py @@ -5,13 +5,10 @@ import os from openpype.settings import get_project_settings from openpype.lib.local_settings import OpenPypeSettingsRegistry -from openpype.lib import PypeLogger, run_subprocess +from openpype.lib import Logger, run_subprocess from .rr_job import RRJob, SubmitFile, SubmitterParameter -log = PypeLogger.get_logger("RoyalRender") - - class Api: _settings = None @@ -19,6 +16,7 @@ class Api: RR_SUBMIT_API = 2 def __init__(self, settings, project=None): + self.log = Logger.get_logger("RoyalRender") self._settings = settings self._initialize_rr(project) @@ -137,7 +135,7 @@ class Api: rr_console += ".exe" args = [rr_console, file] - run_subprocess(" ".join(args), logger=log) + run_subprocess(" ".join(args), logger=self.log) def _submit_using_api(self, file): # type: (SubmitFile) -> None @@ -159,11 +157,11 @@ class Api: rr_server = tcp.getRRServer() if len(rr_server) == 0: - log.info("Got RR IP address {}".format(rr_server)) + self.log.info("Got RR IP address {}".format(rr_server)) # TODO: Port is hardcoded in RR? If not, move it to Settings if not tcp.setServer(rr_server, 7773): - log.error( + self.log.error( "Can not set RR server: {}".format(tcp.errorMessage())) raise RoyalRenderException(tcp.errorMessage()) diff --git a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py index 4d216c1c0a..65af90e8a6 100644 --- a/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py +++ b/openpype/modules/royalrender/plugins/publish/collect_sequences_from_job.py @@ -7,7 +7,8 @@ import json from pprint import pformat import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io def collect(root, @@ -127,7 +128,7 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin): session = metadata.get("session") if session: self.log.info("setting session using metadata") - api.Session.update(session) + legacy_io.Session.update(session) os.environ.update(session) else: @@ -187,7 +188,9 @@ class CollectSequencesFromJob(pyblish.api.ContextPlugin): "family": families[0], # backwards compatibility / pyblish "families": list(families), "subset": subset, - "asset": data.get("asset", api.Session["AVALON_ASSET"]), + "asset": data.get( + "asset", legacy_io.Session["AVALON_ASSET"] + ), "stagingDir": root, "frameStart": start, "frameEnd": end, diff --git a/openpype/modules/royalrender/royal_render_module.py b/openpype/modules/royalrender/royal_render_module.py index 4f72860ad6..10d74d01d1 100644 --- a/openpype/modules/royalrender/royal_render_module.py +++ b/openpype/modules/royalrender/royal_render_module.py @@ -2,8 +2,7 @@ """Module providing support for Royal Render.""" import os import openpype.modules -from openpype.modules import OpenPypeModule -from openpype_interfaces import IPluginPaths +from openpype.modules import OpenPypeModule, IPluginPaths class RoyalRenderModule(OpenPypeModule, IPluginPaths): diff --git a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py index 82a79daf3b..cdc37588cd 100644 --- a/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py +++ b/openpype/modules/royalrender/rr_root/plugins/control_job/perjob/m50__openpype_publish_render.py @@ -119,7 +119,7 @@ class OpenPypeContextSelector: # app names and versions, but since app_name is not used # currently down the line (but it is required by OP publish command # right now). - self.context["app_name"] = "maya/2020" + # self.context["app_name"] = "maya/2022" return True @staticmethod @@ -139,7 +139,8 @@ class OpenPypeContextSelector: env = {"AVALON_PROJECT": str(self.context.get("project")), "AVALON_ASSET": str(self.context.get("asset")), "AVALON_TASK": str(self.context.get("task")), - "AVALON_APP_NAME": str(self.context.get("app_name"))} + # "AVALON_APP_NAME": str(self.context.get("app_name")) + } print(">>> setting environment:") for k, v in env.items(): @@ -184,7 +185,7 @@ selector = OpenPypeContextSelector() selector.context["project"] = os.getenv("AVALON_PROJECT") selector.context["asset"] = os.getenv("AVALON_ASSET") selector.context["task"] = os.getenv("AVALON_TASK") -selector.context["app_name"] = os.getenv("AVALON_APP_NAME") +# selector.context["app_name"] = os.getenv("AVALON_APP_NAME") # if anything inside is None, scratch the whole thing and # ask user for context. diff --git a/openpype/modules/settings_action.py b/openpype/modules/settings_action.py index 2b4b51e3ad..1902caff1d 100644 --- a/openpype/modules/settings_action.py +++ b/openpype/modules/settings_action.py @@ -1,5 +1,4 @@ -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction +from openpype.modules import OpenPypeModule, ITrayAction class SettingsAction(OpenPypeModule, ITrayAction): @@ -23,6 +22,11 @@ class SettingsAction(OpenPypeModule, ITrayAction): """Initialization in tray implementation of ITrayAction.""" self.create_settings_window() + def tray_exit(self): + # Close settings UI to remove settings lock + if self.settings_window: + self.settings_window.close() + def on_action_trigger(self): """Implementation for action trigger of ITrayAction.""" self.show_settings_window() diff --git a/openpype/modules/shotgrid/README.md b/openpype/modules/shotgrid/README.md new file mode 100644 index 0000000000..cbee0e9bf4 --- /dev/null +++ b/openpype/modules/shotgrid/README.md @@ -0,0 +1,19 @@ +## Shotgrid Module + +### Pre-requisites + +Install and launch a [shotgrid leecher](https://github.com/Ellipsanime/shotgrid-leecher) server + +### Quickstart + +The goal of this tutorial is to synchronize an already existing shotgrid project with OpenPype. + +- Activate the shotgrid module in the **system settings** and inform the shotgrid leecher server API url + +- Create a new OpenPype project with the **project manager** + +- Inform the shotgrid authentication infos (url, script name, api key) and the shotgrid project ID related to this OpenPype project in the **project settings** + +- Use the batch interface (Tray > shotgrid > Launch batch), select your project and click "batch" + +- You can now access your shotgrid entities within the **avalon launcher** and publish informations to shotgrid with **pyblish** diff --git a/openpype/modules/shotgrid/__init__.py b/openpype/modules/shotgrid/__init__.py new file mode 100644 index 0000000000..f1337a9492 --- /dev/null +++ b/openpype/modules/shotgrid/__init__.py @@ -0,0 +1,5 @@ +from .shotgrid_module import ( + ShotgridModule, +) + +__all__ = ("ShotgridModule",) diff --git a/openpype/modules/shotgrid/lib/__init__.py b/openpype/modules/shotgrid/lib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/modules/shotgrid/lib/const.py b/openpype/modules/shotgrid/lib/const.py new file mode 100644 index 0000000000..2a34800fac --- /dev/null +++ b/openpype/modules/shotgrid/lib/const.py @@ -0,0 +1 @@ +MODULE_NAME = "shotgrid" diff --git a/openpype/modules/shotgrid/lib/credentials.py b/openpype/modules/shotgrid/lib/credentials.py new file mode 100644 index 0000000000..337c4f6ecb --- /dev/null +++ b/openpype/modules/shotgrid/lib/credentials.py @@ -0,0 +1,125 @@ + +from urllib.parse import urlparse + +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSecureRegistry, OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.record import Credentials + + +def _get_shotgrid_secure_key(hostname, key): + """Secure item key for entered hostname.""" + return f"shotgrid/{hostname}/{key}" + + +def _get_secure_value_and_registry( + hostname, + name, +): + key = _get_shotgrid_secure_key(hostname, name) + registry = OpenPypeSecureRegistry(key) + return registry.get_item(name, None), registry + + +def get_shotgrid_hostname(shotgrid_url): + + if not shotgrid_url: + raise Exception("Shotgrid url cannot be a null") + valid_shotgrid_url = ( + f"//{shotgrid_url}" if "//" not in shotgrid_url else shotgrid_url + ) + return urlparse(valid_shotgrid_url).hostname + + +# Credentials storing function (using keyring) + + +def get_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + if not hostname: + return None + login_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, _ = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + return Credentials(login_value, password_value) + + +def save_credentials(login, password, shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + _, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + _, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + clear_credentials(shotgrid_url) + login_registry.set_item(Credentials.login_key_prefix(), login) + password_registry.set_item(Credentials.password_key_prefix(), password) + + +def clear_credentials(shotgrid_url): + hostname = get_shotgrid_hostname(shotgrid_url) + login_value, login_registry = _get_secure_value_and_registry( + hostname, + Credentials.login_key_prefix(), + ) + password_value, password_registry = _get_secure_value_and_registry( + hostname, + Credentials.password_key_prefix(), + ) + + if login_value is not None: + login_registry.delete_item(Credentials.login_key_prefix()) + + if password_value is not None: + password_registry.delete_item(Credentials.password_key_prefix()) + + +# Login storing function (using json) + + +def get_local_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None + + +def save_local_login(login): + reg = OpenPypeSettingsRegistry() + reg.set_item("shotgrid_login", login) + + +def clear_local_login(): + reg = OpenPypeSettingsRegistry() + reg.delete_item("shotgrid_login") + + +def check_credentials( + login, + password, + shotgrid_url, +): + + if not shotgrid_url or not login or not password: + return False + try: + session = shotgun_api3.Shotgun( + shotgrid_url, + login=login, + password=password, + ) + session.preferences_read() + session.close() + except AuthenticationFault: + return False + return True diff --git a/openpype/modules/shotgrid/lib/record.py b/openpype/modules/shotgrid/lib/record.py new file mode 100644 index 0000000000..f62f4855d5 --- /dev/null +++ b/openpype/modules/shotgrid/lib/record.py @@ -0,0 +1,20 @@ + +class Credentials: + login = None + password = None + + def __init__(self, login, password) -> None: + super().__init__() + self.login = login + self.password = password + + def is_empty(self): + return not (self.login and self.password) + + @staticmethod + def login_key_prefix(): + return "login" + + @staticmethod + def password_key_prefix(): + return "password" diff --git a/openpype/modules/shotgrid/lib/settings.py b/openpype/modules/shotgrid/lib/settings.py new file mode 100644 index 0000000000..5b0b728f55 --- /dev/null +++ b/openpype/modules/shotgrid/lib/settings.py @@ -0,0 +1,18 @@ +from openpype.settings import get_system_settings, get_project_settings +from openpype.modules.shotgrid.lib.const import MODULE_NAME + + +def get_shotgrid_project_settings(project): + return get_project_settings(project).get(MODULE_NAME, {}) + + +def get_shotgrid_settings(): + return get_system_settings().get("modules", {}).get(MODULE_NAME, {}) + + +def get_shotgrid_servers(): + return get_shotgrid_settings().get("shotgrid_settings", {}) + + +def get_leecher_backend_url(): + return get_shotgrid_settings().get("leecher_backend_url") diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py new file mode 100644 index 0000000000..0b03ac2e5d --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_entities.py @@ -0,0 +1,100 @@ +import os + +import pyblish.api +from openpype.lib.mongo import OpenPypeMongoConnection + + +class CollectShotgridEntities(pyblish.api.ContextPlugin): + """Collect shotgrid entities according to the current context""" + + order = pyblish.api.CollectorOrder + 0.499 + label = "Shotgrid entities" + + def process(self, context): + + avalon_project = context.data.get("projectEntity") + avalon_asset = context.data.get("assetEntity") + avalon_task_name = os.getenv("AVALON_TASK") + + self.log.info(avalon_project) + self.log.info(avalon_asset) + + sg_project = _get_shotgrid_project(context) + sg_task = _get_shotgrid_task( + avalon_project, + avalon_asset, + avalon_task_name + ) + sg_entity = _get_shotgrid_entity(avalon_project, avalon_asset) + + if sg_project: + context.data["shotgridProject"] = sg_project + self.log.info( + "Collected correspondig shotgrid project : {}".format( + sg_project + ) + ) + + if sg_task: + context.data["shotgridTask"] = sg_task + self.log.info( + "Collected correspondig shotgrid task : {}".format(sg_task) + ) + + if sg_entity: + context.data["shotgridEntity"] = sg_entity + self.log.info( + "Collected correspondig shotgrid entity : {}".format(sg_entity) + ) + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + + sg = context.data.get("shotgridSession") + return sg.find_one("Version", filters, []) + + +def _get_shotgrid_collection(project): + client = OpenPypeMongoConnection.get_mongo_client() + return client.get_database("shotgrid_openpype").get_collection(project) + + +def _get_shotgrid_project(context): + shotgrid_project_id = context.data["project_settings"].get( + "shotgrid_project_id") + if shotgrid_project_id: + return {"type": "Project", "id": shotgrid_project_id} + return {} + + +def _get_shotgrid_task(avalon_project, avalon_asset, avalon_task): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_task_hierarchy_row = sg_col.find_one( + { + "type": "Task", + "_id": {"$regex": "^" + avalon_task + "_[0-9]*"}, + "parent": {"$regex": ".*," + avalon_asset["name"] + ","}, + } + ) + if shotgrid_task_hierarchy_row: + return {"type": "Task", "id": shotgrid_task_hierarchy_row["src_id"]} + return {} + + +def _get_shotgrid_entity(avalon_project, avalon_asset): + sg_col = _get_shotgrid_collection(avalon_project["name"]) + shotgrid_entity_hierarchy_row = sg_col.find_one( + {"_id": avalon_asset["name"]} + ) + if shotgrid_entity_hierarchy_row: + return { + "type": shotgrid_entity_hierarchy_row["type"], + "id": shotgrid_entity_hierarchy_row["src_id"], + } + return {} diff --git a/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py new file mode 100644 index 0000000000..9d5d2271bf --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/collect_shotgrid_session.py @@ -0,0 +1,123 @@ +import os + +import pyblish.api +import shotgun_api3 +from shotgun_api3.shotgun import AuthenticationFault + +from openpype.lib import OpenPypeSettingsRegistry +from openpype.modules.shotgrid.lib.settings import ( + get_shotgrid_servers, + get_shotgrid_project_settings, +) + + +class CollectShotgridSession(pyblish.api.ContextPlugin): + """Collect shotgrid session using user credentials""" + + order = pyblish.api.CollectorOrder + label = "Shotgrid user session" + + def process(self, context): + + certificate_path = os.getenv("SHOTGUN_API_CACERTS") + if certificate_path is None or not os.path.exists(certificate_path): + self.log.info( + "SHOTGUN_API_CACERTS does not contains a valid \ + path: {}".format( + certificate_path + ) + ) + certificate_path = get_shotgrid_certificate() + self.log.info("Get Certificate from shotgrid_api") + + if not os.path.exists(certificate_path): + self.log.error( + "Could not find certificate in shotgun_api3: \ + {}".format( + certificate_path + ) + ) + return + + set_shotgrid_certificate(certificate_path) + self.log.info("Set Certificate: {}".format(certificate_path)) + + avalon_project = os.getenv("AVALON_PROJECT") + + shotgrid_settings = get_shotgrid_project_settings(avalon_project) + self.log.info("shotgrid settings: {}".format(shotgrid_settings)) + shotgrid_servers_settings = get_shotgrid_servers() + self.log.info( + "shotgrid_servers_settings: {}".format(shotgrid_servers_settings) + ) + + shotgrid_server = shotgrid_settings.get("shotgrid_server", "") + if not shotgrid_server: + self.log.error( + "No Shotgrid server found, please choose a credential" + "in script name and script key in OpenPype settings" + ) + + shotgrid_server_setting = shotgrid_servers_settings.get( + shotgrid_server, {} + ) + shotgrid_url = shotgrid_server_setting.get("shotgrid_url", "") + + shotgrid_script_name = shotgrid_server_setting.get( + "shotgrid_script_name", "" + ) + shotgrid_script_key = shotgrid_server_setting.get( + "shotgrid_script_key", "" + ) + if not shotgrid_script_name and not shotgrid_script_key: + self.log.error( + "No Shotgrid api credential found, please enter " + "script name and script key in OpenPype settings" + ) + + login = get_login() or os.getenv("OPENPYPE_SG_USER") + + if not login: + self.log.error( + "No Shotgrid login found, please " + "login to shotgrid withing openpype Tray" + ) + + session = shotgun_api3.Shotgun( + base_url=shotgrid_url, + script_name=shotgrid_script_name, + api_key=shotgrid_script_key, + sudo_as_login=login, + ) + + try: + session.preferences_read() + except AuthenticationFault: + raise ValueError( + "Could not connect to shotgrid {} with user {}".format( + shotgrid_url, login + ) + ) + + self.log.info( + "Logged to shotgrid {} with user {}".format(shotgrid_url, login) + ) + context.data["shotgridSession"] = session + context.data["shotgridUser"] = login + + +def get_shotgrid_certificate(): + shotgun_api_path = os.path.dirname(shotgun_api3.__file__) + return os.path.join(shotgun_api_path, "lib", "certifi", "cacert.pem") + + +def set_shotgrid_certificate(certificate): + os.environ["SHOTGUN_API_CACERTS"] = certificate + + +def get_login(): + reg = OpenPypeSettingsRegistry() + try: + return str(reg.get_item("shotgrid_login")) + except Exception: + return None diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py new file mode 100644 index 0000000000..fc15d5515f --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_publish.py @@ -0,0 +1,81 @@ +import os +import pyblish.api + +from openpype.pipeline.publish import get_publish_repre_path + + +class IntegrateShotgridPublish(pyblish.api.InstancePlugin): + """ + Create published Files from representations and add it to version. If + representation is tagged add shotgrid review, it will add it in + path to movie for a movie file or path to frame for an image sequence. + """ + + order = pyblish.api.IntegratorOrder + 0.499 + label = "Shotgrid Published Files" + + def process(self, instance): + + context = instance.context + + self.sg = context.data.get("shotgridSession") + + shotgrid_version = instance.data.get("shotgridVersion") + + for representation in instance.data.get("representations", []): + + local_path = get_publish_repre_path( + instance, representation, False + ) + code = os.path.basename(local_path) + + if representation.get("tags", []): + continue + + published_file = self._find_existing_publish( + code, context, shotgrid_version + ) + + published_file_data = { + "project": context.data.get("shotgridProject"), + "code": code, + "entity": context.data.get("shotgridEntity"), + "task": context.data.get("shotgridTask"), + "version": shotgrid_version, + "path": {"local_path": local_path}, + } + if not published_file: + published_file = self._create_published(published_file_data) + self.log.info( + "Create Shotgrid PublishedFile: {}".format(published_file) + ) + else: + self.sg.update( + published_file["type"], + published_file["id"], + published_file_data, + ) + self.log.info( + "Update Shotgrid PublishedFile: {}".format(published_file) + ) + + if instance.data["family"] == "image": + self.sg.upload_thumbnail( + published_file["type"], published_file["id"], local_path + ) + instance.data["shotgridPublishedFile"] = published_file + + def _find_existing_publish(self, code, context, shotgrid_version): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["version", "is", shotgrid_version], + ["code", "is", code], + ] + return self.sg.find_one("PublishedFile", filters, []) + + def _create_published(self, published_file_data): + + return self.sg.create("PublishedFile", published_file_data) diff --git a/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py new file mode 100644 index 0000000000..adfdca718c --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/integrate_shotgrid_version.py @@ -0,0 +1,94 @@ +import pyblish.api + +from openpype.pipeline.publish import get_publish_repre_path + + +class IntegrateShotgridVersion(pyblish.api.InstancePlugin): + """Integrate Shotgrid Version""" + + order = pyblish.api.IntegratorOrder + 0.497 + label = "Shotgrid Version" + + sg = None + + def process(self, instance): + + context = instance.context + self.sg = context.data.get("shotgridSession") + + # TODO: Use path template solver to build version code from settings + anatomy = instance.data.get("anatomyData", {}) + code = "_".join( + [ + anatomy["project"]["code"], + anatomy["parent"], + anatomy["asset"], + anatomy["task"]["name"], + "v{:03}".format(int(anatomy["version"])), + ] + ) + + version = self._find_existing_version(code, context) + + if not version: + version = self._create_version(code, context) + self.log.info("Create Shotgrid version: {}".format(version)) + else: + self.log.info("Use existing Shotgrid version: {}".format(version)) + + data_to_update = {} + status = context.data.get("intent", {}).get("value") + if status: + data_to_update["sg_status_list"] = status + + for representation in instance.data.get("representations", []): + local_path = get_publish_repre_path( + instance, representation, False + ) + + if "shotgridreview" in representation.get("tags", []): + + if representation["ext"] in ["mov", "avi"]: + self.log.info( + "Upload review: {} for version shotgrid {}".format( + local_path, version.get("id") + ) + ) + self.sg.upload( + "Version", + version.get("id"), + local_path, + field_name="sg_uploaded_movie", + ) + + data_to_update["sg_path_to_movie"] = local_path + + elif representation["ext"] in ["jpg", "png", "exr", "tga"]: + path_to_frame = local_path.replace("0000", "#") + data_to_update["sg_path_to_frames"] = path_to_frame + + self.log.info("Update Shotgrid version with {}".format(data_to_update)) + self.sg.update("Version", version["id"], data_to_update) + + instance.data["shotgridVersion"] = version + + def _find_existing_version(self, code, context): + + filters = [ + ["project", "is", context.data.get("shotgridProject")], + ["sg_task", "is", context.data.get("shotgridTask")], + ["entity", "is", context.data.get("shotgridEntity")], + ["code", "is", code], + ] + return self.sg.find_one("Version", filters, []) + + def _create_version(self, code, context): + + version_data = { + "project": context.data.get("shotgridProject"), + "sg_task": context.data.get("shotgridTask"), + "entity": context.data.get("shotgridEntity"), + "code": code, + } + + return self.sg.create("Version", version_data) diff --git a/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py new file mode 100644 index 0000000000..48b320e15e --- /dev/null +++ b/openpype/modules/shotgrid/plugins/publish/validate_shotgrid_user.py @@ -0,0 +1,38 @@ +import pyblish.api +from openpype.pipeline.publish import ValidateContentsOrder + + +class ValidateShotgridUser(pyblish.api.ContextPlugin): + """ + Check if user is valid and have access to the project. + """ + + label = "Validate Shotgrid User" + order = ValidateContentsOrder + + def process(self, context): + sg = context.data.get("shotgridSession") + + login = context.data.get("shotgridUser") + self.log.info("Login shotgrid set in OpenPype is {}".format(login)) + project = context.data.get("shotgridProject") + self.log.info("Current shotgun project is {}".format(project)) + + if not (login and sg and project): + raise KeyError() + + user = sg.find_one("HumanUser", [["login", "is", login]], ["projects"]) + + self.log.info(user) + self.log.info(login) + user_projects_id = [p["id"] for p in user.get("projects", [])] + if not project.get("id") in user_projects_id: + raise PermissionError( + "Login {} don't have access to the project {}".format( + login, project + ) + ) + + self.log.info( + "Login {} have access to the project {}".format(login, project) + ) diff --git a/openpype/modules/shotgrid/server/README.md b/openpype/modules/shotgrid/server/README.md new file mode 100644 index 0000000000..15e056ff3e --- /dev/null +++ b/openpype/modules/shotgrid/server/README.md @@ -0,0 +1,5 @@ + +### Shotgrid server + +Please refer to the external project that covers Openpype/Shotgrid communication: + - https://github.com/Ellipsanime/shotgrid-leecher diff --git a/openpype/modules/shotgrid/shotgrid_module.py b/openpype/modules/shotgrid/shotgrid_module.py new file mode 100644 index 0000000000..d26647d06a --- /dev/null +++ b/openpype/modules/shotgrid/shotgrid_module.py @@ -0,0 +1,54 @@ +import os + +from openpype.modules import ( + OpenPypeModule, + ITrayModule, + IPluginPaths, +) + +SHOTGRID_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class ShotgridModule(OpenPypeModule, ITrayModule, IPluginPaths): + leecher_manager_url = None + name = "shotgrid" + enabled = False + project_id = None + tray_wrapper = None + + def initialize(self, modules_settings): + shotgrid_settings = modules_settings.get(self.name, dict()) + self.enabled = shotgrid_settings.get("enabled", False) + self.leecher_manager_url = shotgrid_settings.get( + "leecher_manager_url", "" + ) + + def connect_with_modules(self, enabled_modules): + pass + + def get_global_environments(self): + return {"PROJECT_ID": self.project_id} + + def get_plugin_paths(self): + return { + "publish": [ + os.path.join(SHOTGRID_MODULE_DIR, "plugins", "publish") + ] + } + + def get_launch_hook_paths(self): + return os.path.join(SHOTGRID_MODULE_DIR, "hooks") + + def tray_init(self): + from .tray.shotgrid_tray import ShotgridTrayWrapper + + self.tray_wrapper = ShotgridTrayWrapper(self) + + def tray_start(self): + return self.tray_wrapper.validate() + + def tray_exit(self, *args, **kwargs): + return self.tray_wrapper + + def tray_menu(self, tray_menu): + return self.tray_wrapper.tray_menu(tray_menu) diff --git a/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py new file mode 100644 index 0000000000..1f78cf77c9 --- /dev/null +++ b/openpype/modules/shotgrid/tests/shotgrid/lib/test_credentials.py @@ -0,0 +1,34 @@ +import pytest +from assertpy import assert_that + +import openpype.modules.shotgrid.lib.credentials as sut + + +def test_missing_shotgrid_url(): + with pytest.raises(Exception) as ex: + # arrange + url = "" + # act + sut.get_shotgrid_hostname(url) + # assert + assert_that(ex).is_equal_to("Shotgrid url cannot be a null") + + +def test_full_shotgrid_url(): + # arrange + url = "https://shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") + + +def test_incomplete_shotgrid_url(): + # arrange + url = "shotgrid.com/myinstance" + # act + actual = sut.get_shotgrid_hostname(url) + # assert + assert_that(actual).is_not_empty() + assert_that(actual).is_equal_to("shotgrid.com") diff --git a/openpype/modules/shotgrid/tray/credential_dialog.py b/openpype/modules/shotgrid/tray/credential_dialog.py new file mode 100644 index 0000000000..7b839b63c0 --- /dev/null +++ b/openpype/modules/shotgrid/tray/credential_dialog.py @@ -0,0 +1,201 @@ +import os +from qtpy import QtCore, QtWidgets, QtGui + +from openpype import style +from openpype import resources +from openpype.modules.shotgrid.lib import settings, credentials + + +class CredentialsDialog(QtWidgets.QDialog): + SIZE_W = 450 + SIZE_H = 200 + + _module = None + _is_logged = False + url_label = None + login_label = None + password_label = None + url_input = None + login_input = None + password_input = None + input_layout = None + login_button = None + buttons_layout = None + main_widget = None + + login_changed = QtCore.Signal() + + def __init__(self, module, parent=None): + super(CredentialsDialog, self).__init__(parent) + + self._module = module + self._is_logged = False + + self.setWindowTitle("OpenPype - Shotgrid Login") + + icon = QtGui.QIcon(resources.get_openpype_icon_filepath()) + self.setWindowIcon(icon) + + self.setWindowFlags( + QtCore.Qt.WindowCloseButtonHint + | QtCore.Qt.WindowMinimizeButtonHint + ) + self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H)) + self.setMaximumSize(QtCore.QSize(self.SIZE_W + 100, self.SIZE_H + 100)) + self.setStyleSheet(style.load_stylesheet()) + + self.ui_init() + + def ui_init(self): + self.url_label = QtWidgets.QLabel("Shotgrid server:") + self.login_label = QtWidgets.QLabel("Login:") + self.password_label = QtWidgets.QLabel("Password:") + + self.url_input = QtWidgets.QComboBox() + # self.url_input.setReadOnly(True) + + self.login_input = QtWidgets.QLineEdit() + self.login_input.setPlaceholderText("login") + + self.password_input = QtWidgets.QLineEdit() + self.password_input.setPlaceholderText("password") + self.password_input.setEchoMode(QtWidgets.QLineEdit.Password) + + self.error_label = QtWidgets.QLabel("") + self.error_label.setStyleSheet("color: red;") + self.error_label.setWordWrap(True) + self.error_label.hide() + + self.input_layout = QtWidgets.QFormLayout() + self.input_layout.setContentsMargins(10, 15, 10, 5) + + self.input_layout.addRow(self.url_label, self.url_input) + self.input_layout.addRow(self.login_label, self.login_input) + self.input_layout.addRow(self.password_label, self.password_input) + self.input_layout.addRow(self.error_label) + + self.login_button = QtWidgets.QPushButton("Login") + self.login_button.setToolTip("Log in shotgrid instance") + self.login_button.clicked.connect(self._on_shotgrid_login_clicked) + + self.logout_button = QtWidgets.QPushButton("Logout") + self.logout_button.setToolTip("Log out shotgrid instance") + self.logout_button.clicked.connect(self._on_shotgrid_logout_clicked) + + self.buttons_layout = QtWidgets.QHBoxLayout() + self.buttons_layout.addWidget(self.logout_button) + self.buttons_layout.addWidget(self.login_button) + + self.main_widget = QtWidgets.QVBoxLayout(self) + self.main_widget.addLayout(self.input_layout) + self.main_widget.addLayout(self.buttons_layout) + self.setLayout(self.main_widget) + + def show(self, *args, **kwargs): + super(CredentialsDialog, self).show(*args, **kwargs) + self._fill_shotgrid_url() + self._fill_shotgrid_login() + + def _fill_shotgrid_url(self): + servers = settings.get_shotgrid_servers() + + if servers: + for _, v in servers.items(): + self.url_input.addItem("{}".format(v.get('shotgrid_url'))) + self._valid_input(self.url_input) + self.login_button.show() + self.logout_button.show() + enabled = True + else: + self.set_error("Ask your admin to add shotgrid server in settings") + self._invalid_input(self.url_input) + self.login_button.hide() + self.logout_button.hide() + enabled = False + + self.login_input.setEnabled(enabled) + self.password_input.setEnabled(enabled) + + def _fill_shotgrid_login(self): + login = credentials.get_local_login() + + if login: + self.login_input.setText(login) + + def _clear_shotgrid_login(self): + self.login_input.setText("") + self.password_input.setText("") + + def _on_shotgrid_login_clicked(self): + login = self.login_input.text().strip() + password = self.password_input.text().strip() + missing = [] + + if login == "": + missing.append("login") + self._invalid_input(self.login_input) + + if password == "": + missing.append("password") + self._invalid_input(self.password_input) + + url = self.url_input.currentText() + if url == "": + missing.append("url") + self._invalid_input(self.url_input) + + if len(missing) > 0: + self.set_error("You didn't enter {}".format(" and ".join(missing))) + return + + # if credentials.check_credentials( + # login=login, + # password=password, + # shotgrid_url=url, + # ): + credentials.save_local_login( + login=login + ) + os.environ['OPENPYPE_SG_USER'] = login + self._on_login() + + self.set_error("CANT LOGIN") + + def _on_shotgrid_logout_clicked(self): + credentials.clear_local_login() + del os.environ['OPENPYPE_SG_USER'] + self._clear_shotgrid_login() + self._on_logout() + + def set_error(self, msg): + self.error_label.setText(msg) + self.error_label.show() + + def _on_login(self): + self._is_logged = True + self.login_changed.emit() + self._close_widget() + + def _on_logout(self): + self._is_logged = False + self.login_changed.emit() + + def _close_widget(self): + self.hide() + + def _valid_input(self, input_widget): + input_widget.setStyleSheet("") + + def _invalid_input(self, input_widget): + input_widget.setStyleSheet("border: 1px solid red;") + + def login_with_credentials( + self, url, login, password + ): + verification = credentials.check_credentials(url, login, password) + if verification: + credentials.save_credentials(login, password, False) + self._module.set_credentials_to_env(login, password) + self.set_credentials(login, password) + self.login_changed.emit() + return verification diff --git a/openpype/modules/shotgrid/tray/shotgrid_tray.py b/openpype/modules/shotgrid/tray/shotgrid_tray.py new file mode 100644 index 0000000000..8e363bd318 --- /dev/null +++ b/openpype/modules/shotgrid/tray/shotgrid_tray.py @@ -0,0 +1,75 @@ +import os +import webbrowser + +from qtpy import QtWidgets + +from openpype.modules.shotgrid.lib import credentials +from openpype.modules.shotgrid.tray.credential_dialog import ( + CredentialsDialog, +) + + +class ShotgridTrayWrapper: + module = None + credentials_dialog = None + logged_user_label = None + + def __init__(self, module): + self.module = module + self.credentials_dialog = CredentialsDialog(module) + self.credentials_dialog.login_changed.connect(self.set_login_label) + self.logged_user_label = QtWidgets.QAction("") + self.logged_user_label.setDisabled(True) + self.set_login_label() + + def show_batch_dialog(self): + if self.module.leecher_manager_url: + webbrowser.open(self.module.leecher_manager_url) + + def show_connect_dialog(self): + self.show_credential_dialog() + + def show_credential_dialog(self): + self.credentials_dialog.show() + self.credentials_dialog.activateWindow() + self.credentials_dialog.raise_() + + def set_login_label(self): + login = credentials.get_local_login() + if login: + self.logged_user_label.setText("{}".format(login)) + else: + self.logged_user_label.setText( + "No User logged in {0}".format(login) + ) + + def tray_menu(self, tray_menu): + # Add login to user menu + menu = QtWidgets.QMenu("Shotgrid", tray_menu) + show_connect_action = QtWidgets.QAction("Connect to Shotgrid", menu) + show_connect_action.triggered.connect(self.show_connect_dialog) + menu.addAction(self.logged_user_label) + menu.addSeparator() + menu.addAction(show_connect_action) + tray_menu.addMenu(menu) + + # Add manager to Admin menu + for m in tray_menu.findChildren(QtWidgets.QMenu): + if m.title() == "Admin": + shotgrid_manager_action = QtWidgets.QAction( + "Shotgrid manager", menu + ) + shotgrid_manager_action.triggered.connect( + self.show_batch_dialog + ) + m.addAction(shotgrid_manager_action) + + def validate(self): + login = credentials.get_local_login() + + if not login: + self.show_credential_dialog() + else: + os.environ["OPENPYPE_SG_USER"] = login + + return True diff --git a/openpype/modules/slack/manifest.yml b/openpype/modules/slack/manifest.yml index 7a65cc5915..233c39fbaf 100644 --- a/openpype/modules/slack/manifest.yml +++ b/openpype/modules/slack/manifest.yml @@ -19,6 +19,8 @@ oauth_config: - chat:write.public - files:write - channels:read + - users:read + - usergroups:read settings: org_deploy_enabled: false socket_mode_enabled: false diff --git a/openpype/modules/slack/plugins/publish/collect_slack_family.py b/openpype/modules/slack/plugins/publish/collect_slack_family.py index 7475bdc89e..b3e7bbdcec 100644 --- a/openpype/modules/slack/plugins/publish/collect_slack_family.py +++ b/openpype/modules/slack/plugins/publish/collect_slack_family.py @@ -1,10 +1,12 @@ -from avalon import io import pyblish.api from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib import attribute_definitions +from openpype.pipeline import OpenPypePyblishPluginMixin -class CollectSlackFamilies(pyblish.api.InstancePlugin): +class CollectSlackFamilies(pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin): """Collect family for Slack notification Expects configured profile in @@ -17,16 +19,28 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin): profiles = None + @classmethod + def get_attribute_defs(cls): + return [ + attribute_definitions.TextDef( + # Key under which it will be stored + "additional_message", + # Use plugin label as label for attribute + label="Additional Slack message", + placeholder="" + ) + ] + def process(self, instance): - task_name = io.Session.get("AVALON_TASK") + task_data = instance.data["anatomyData"].get("task", {}) family = self.main_family_from_instance(instance) key_values = { "families": family, - "tasks": task_name, + "tasks": task_data.get("name"), + "task_types": task_data.get("type"), "hosts": instance.data["anatomyData"]["app"], "subsets": instance.data["subset"] } - profile = filter_profiles(self.profiles, key_values, logger=self.log) @@ -55,6 +69,11 @@ class CollectSlackFamilies(pyblish.api.InstancePlugin): ["token"]) instance.data["slack_token"] = slack_token + attribute_values = self.get_attr_values_from_data(instance.data) + additional_message = attribute_values.get("additional_message") + if additional_message: + instance.data["slack_additional_message"] = additional_message + def main_family_from_instance(self, instance): # TODO yank from integrate """Returns main family of entered instance.""" family = instance.data.get("family") diff --git a/openpype/modules/slack/plugins/publish/integrate_slack_api.py b/openpype/modules/slack/plugins/publish/integrate_slack_api.py index 10bde7d4c0..4e2557ccc7 100644 --- a/openpype/modules/slack/plugins/publish/integrate_slack_api.py +++ b/openpype/modules/slack/plugins/publish/integrate_slack_api.py @@ -1,11 +1,15 @@ import os +import re import six import pyblish.api import copy from datetime import datetime +from abc import ABCMeta, abstractmethod +import time +from openpype.client import OpenPypeMongoConnection +from openpype.pipeline.publish import get_publish_repre_path from openpype.lib.plugin_tools import prepare_template_data -from openpype.lib import OpenPypeMongoConnection class IntegrateSlackAPI(pyblish.api.InstancePlugin): @@ -31,11 +35,16 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): review_path = self._get_review_path(instance) publish_files = set() + message = '' + additional_message = instance.data.get("slack_additional_message") + token = instance.data["slack_token"] + if additional_message: + message = "{} \n".format(additional_message) + users = groups = None for message_profile in instance.data["slack_channel_message_profiles"]: - message = self._get_filled_message(message_profile["message"], - instance, - review_path) - self.log.debug("message:: {}".format(message)) + message += self._get_filled_message(message_profile["message"], + instance, + review_path) if not message: return @@ -49,18 +58,26 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): project = instance.context.data["anatomyData"]["project"]["code"] for channel in message_profile["channels"]: if six.PY2: - msg_id, file_ids = \ - self._python2_call(instance.data["slack_token"], - channel, - message, - publish_files) + client = SlackPython2Operations(token, self.log) else: - msg_id, file_ids = \ - self._python3_call(instance.data["slack_token"], - channel, - message, - publish_files) + client = SlackPython3Operations(token, self.log) + if "@" in message: + cache_key = "__cache_slack_ids" + slack_ids = instance.context.data.get(cache_key, None) + if slack_ids is None: + users, groups = client.get_users_and_groups() + instance.context.data[cache_key] = {} + instance.context.data[cache_key]["users"] = users + instance.context.data[cache_key]["groups"] = groups + else: + users = slack_ids["users"] + groups = slack_ids["groups"] + message = self._translate_users(message, users, groups) + + msg_id, file_ids = client.send_message(channel, + message, + publish_files) if not msg_id: return @@ -95,13 +112,15 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): Reviews might be large, so allow only adding link to message instead of uploading only. """ + fill_data = copy.deepcopy(instance.context.data["anatomyData"]) + username = fill_data.get("user") fill_pairs = [ ("asset", instance.data.get("asset", fill_data.get("asset"))), ("subset", instance.data.get("subset", fill_data.get("subset"))), - ("username", instance.data.get("username", - fill_data.get("username"))), + ("user", username), + ("username", username), ("app", instance.data.get("app", fill_data.get("app"))), ("family", instance.data.get("family", fill_data.get("family"))), ("version", str(instance.data.get("version", @@ -110,22 +129,34 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): if review_path: fill_pairs.append(("review_filepath", review_path)) - task_data = instance.data.get("task") - if not task_data: - task_data = fill_data.get("task") - for key, value in task_data.items(): - fill_key = "task[{}]".format(key) - fill_pairs.append((fill_key, value)) - fill_pairs.append(("task", task_data["name"])) + task_data = ( + copy.deepcopy(instance.data.get("anatomyData", {})).get("task") + or fill_data.get("task") + ) + if not isinstance(task_data, dict): + # fallback for legacy - if task_data is only task name + task_data["name"] = task_data + if task_data: + if ( + "{task}" in message_templ + or "{Task}" in message_templ + or "{TASK}" in message_templ + ): + fill_pairs.append(("task", task_data["name"])) + + else: + for key, value in task_data.items(): + fill_key = "task[{}]".format(key) + fill_pairs.append((fill_key, value)) - self.log.debug("fill_pairs ::{}".format(fill_pairs)) multiple_case_variants = prepare_template_data(fill_pairs) fill_data.update(multiple_case_variants) - - message = None + message = '' try: - message = message_templ.format(**fill_data) + message = self._escape_missing_keys(message_templ, fill_data).\ + format(**fill_data) except Exception: + # shouldn't happen self.log.warning( "Some keys are missing in {}".format(message_templ), exc_info=True) @@ -134,51 +165,282 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): def _get_thumbnail_path(self, instance): """Returns abs url for thumbnail if present in instance repres""" - published_path = None + thumbnail_path = None for repre in instance.data.get("representations", []): if repre.get('thumbnail') or "thumbnail" in repre.get('tags', []): - if os.path.exists(repre["published_path"]): - published_path = repre["published_path"] + repre_thumbnail_path = get_publish_repre_path( + instance, repre, False + ) + if os.path.exists(repre_thumbnail_path): + thumbnail_path = repre_thumbnail_path break - return published_path + return thumbnail_path def _get_review_path(self, instance): """Returns abs url for review if present in instance repres""" - published_path = None + review_path = None for repre in instance.data.get("representations", []): tags = repre.get('tags', []) if (repre.get("review") or "review" in tags or "burnin" in tags): - if os.path.exists(repre["published_path"]): - published_path = repre["published_path"] + repre_review_path = get_publish_repre_path( + instance, repre, False + ) + if os.path.exists(repre_review_path): + review_path = repre_review_path if "burnin" in tags: # burnin has precedence if exists break - return published_path + return review_path - def _python2_call(self, token, channel, message, publish_files): - from slackclient import SlackClient + def _get_user_id(self, users, user_name): + """Returns internal slack id for user name""" + user_id = None + user_name_lower = user_name.lower() + for user in users: + if (not user.get("deleted") and + (user_name_lower == user["name"].lower() or + # bots dont have display_name + user_name_lower == user["profile"].get("display_name", + '').lower() or + user_name_lower == user["profile"].get("real_name", + '').lower())): + user_id = user["id"] + break + return user_id + + def _get_group_id(self, groups, group_name): + """Returns internal group id for string name""" + group_id = None + for group in groups: + if (not group.get("date_delete") and + (group_name.lower() == group["name"].lower() or + group_name.lower() == group["handle"])): + group_id = group["id"] + break + return group_id + + def _translate_users(self, message, users, groups): + """Replace all occurences of @mentions with proper <@name> format.""" + matches = re.findall(r"(?".format(slack_id) + else: + slack_id = self._get_group_id(groups, user_name) + if slack_id: + mention = "".format(slack_id) + if mention: + message = message.replace(orig_user, mention) + + return message + + def _escape_missing_keys(self, message, fill_data): + """Double escapes placeholder which are missing in 'fill_data'""" + placeholder_keys = re.findall(r"\{([^}]+)\}", message) + + fill_keys = [] + for key, value in fill_data.items(): + fill_keys.append(key) + if isinstance(value, dict): + for child_key in value.keys(): + fill_keys.append("{}[{}]".format(key, child_key)) + + not_matched = set(placeholder_keys) - set(fill_keys) + + for not_matched_item in not_matched: + message = message.replace("{}".format(not_matched_item), + "{{{}}}".format(not_matched_item)) + + return message + + +@six.add_metaclass(ABCMeta) +class AbstractSlackOperations: + + @abstractmethod + def _get_users_list(self): + """Return response with user list, different methods Python 2 vs 3""" + raise NotImplementedError + + @abstractmethod + def _get_usergroups_list(self): + """Return response with user list, different methods Python 2 vs 3""" + raise NotImplementedError + + @abstractmethod + def get_users_and_groups(self): + """Return users and groups, different retry in Python 2 vs 3""" + raise NotImplementedError + + @abstractmethod + def send_message(self, channel, message, publish_files): + """Sends message to channel, different methods in Python 2 vs 3""" + pass + + def _get_users(self): + """Parse users.list response into list of users (dicts)""" + first = True + next_page = None + users = [] + while first or next_page: + response = self._get_users_list() + first = False + next_page = response.get("response_metadata").get("next_cursor") + for user in response.get("members"): + users.append(user) + + return users + + def _get_groups(self): + """Parses usergroups.list response into list of groups (dicts)""" + response = self._get_usergroups_list() + groups = [] + for group in response.get("usergroups"): + groups.append(group) + return groups + + def _enrich_error(self, error_str, channel): + """Enhance known errors with more helpful notations.""" + if 'not_in_channel' in error_str: + # there is no file.write.public scope, app must be explicitly in + # the channel + msg = " - application must added to channel '{}'.".format(channel) + error_str += msg + " Ask Slack admin." + return error_str + + +class SlackPython3Operations(AbstractSlackOperations): + + def __init__(self, token, log): + from slack_sdk import WebClient + + self.client = WebClient(token=token) + self.log = log + + def _get_users_list(self): + return self.client.users_list() + + def _get_usergroups_list(self): + return self.client.usergroups_list() + + def get_users_and_groups(self): + from slack_sdk.errors import SlackApiError + while True: + try: + users = self._get_users() + groups = self._get_groups() + break + except SlackApiError as e: + retry_after = e.response.headers.get("Retry-After") + if retry_after: + print( + "Rate limit hit, sleeping for {}".format(retry_after)) + time.sleep(int(retry_after)) + else: + self.log.warning("Cannot pull user info, " + "mentions won't work", exc_info=True) + return [], [] + + return users, groups + + def send_message(self, channel, message, publish_files): + from slack_sdk.errors import SlackApiError + try: + attachment_str = "\n\n Attachment links: \n" + file_ids = [] + for published_file in publish_files: + response = self.client.files_upload( + file=published_file, + filename=os.path.basename(published_file)) + attachment_str += "\n<{}|{}>".format( + response["file"]["permalink"], + os.path.basename(published_file)) + file_ids.append(response["file"]["id"]) + + if publish_files: + message += attachment_str + + response = self.client.chat_postMessage( + channel=channel, + text=message + ) + return response.data["ts"], file_ids + except SlackApiError as e: + # # You will get a SlackApiError if "ok" is False + error_str = self._enrich_error(str(e.response["error"]), channel) + self.log.warning("Error happened {}".format(error_str)) + except Exception as e: + error_str = self._enrich_error(str(e), channel) + self.log.warning("Not SlackAPI error", exc_info=True) + + return None, [] + + +class SlackPython2Operations(AbstractSlackOperations): + + def __init__(self, token, log): + from slackclient import SlackClient + + self.client = SlackClient(token=token) + self.log = log + + def _get_users_list(self): + return self.client.api_call("users.list") + + def _get_usergroups_list(self): + return self.client.api_call("usergroups.list") + + def get_users_and_groups(self): + while True: + try: + users = self._get_users() + groups = self._get_groups() + break + except Exception: + self.log.warning("Cannot pull user info, " + "mentions won't work", exc_info=True) + return [], [] + + return users, groups + + def send_message(self, channel, message, publish_files): try: - client = SlackClient(token) attachment_str = "\n\n Attachment links: \n" file_ids = [] for p_file in publish_files: with open(p_file, 'rb') as pf: - response = client.api_call( + response = self.client.api_call( "files.upload", file=pf, channel=channel, title=os.path.basename(p_file) ) - attachment_str += "\n<{}|{}>".format( - response["file"]["permalink"], - os.path.basename(p_file)) - file_ids.append(response["file"]["id"]) + if response.get("error"): + error_str = self._enrich_error( + str(response.get("error")), + channel) + self.log.warning( + "Error happened: {}".format(error_str)) + else: + attachment_str += "\n<{}|{}>".format( + response["file"]["permalink"], + os.path.basename(p_file)) + file_ids.append(response["file"]["id"]) if publish_files: message += attachment_str - response = client.api_call( + response = self.client.api_call( "chat.postMessage", channel=channel, text=message @@ -195,46 +457,3 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin): self.log.warning("Error happened: {}".format(error_str)) return None, [] - - def _python3_call(self, token, channel, message, publish_files): - from slack_sdk import WebClient - from slack_sdk.errors import SlackApiError - try: - client = WebClient(token=token) - attachment_str = "\n\n Attachment links: \n" - file_ids = [] - for published_file in publish_files: - response = client.files_upload( - file=published_file, - filename=os.path.basename(published_file)) - attachment_str += "\n<{}|{}>".format( - response["file"]["permalink"], - os.path.basename(published_file)) - file_ids.append(response["file"]["id"]) - - if publish_files: - message += attachment_str - - response = client.chat_postMessage( - channel=channel, - text=message - ) - return response.data["ts"], file_ids - except SlackApiError as e: - # You will get a SlackApiError if "ok" is False - error_str = self._enrich_error(str(e.response["error"]), channel) - self.log.warning("Error happened {}".format(error_str)) - except Exception as e: - error_str = self._enrich_error(str(e), channel) - self.log.warning("Not SlackAPI error", exc_info=True) - - return None, [] - - def _enrich_error(self, error_str, channel): - """Enhance known errors with more helpful notations.""" - if 'not_in_channel' in error_str: - # there is no file.write.public scope, app must be explicitly in - # the channel - msg = " - application must added to channel '{}'.".format(channel) - error_str += msg + " Ask Slack admin." - return error_str diff --git a/openpype/modules/slack/slack_module.py b/openpype/modules/slack/slack_module.py index 9b2976d766..797ae19f4a 100644 --- a/openpype/modules/slack/slack_module.py +++ b/openpype/modules/slack/slack_module.py @@ -1,14 +1,10 @@ import os -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( - IPluginPaths, - ILaunchHookPaths -) +from openpype.modules import OpenPypeModule, IPluginPaths SLACK_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) -class SlackIntegrationModule(OpenPypeModule, IPluginPaths, ILaunchHookPaths): +class SlackIntegrationModule(OpenPypeModule, IPluginPaths): """Allows sending notification to Slack channels during publishing.""" name = "slack" @@ -18,7 +14,8 @@ class SlackIntegrationModule(OpenPypeModule, IPluginPaths, ILaunchHookPaths): self.enabled = slack_settings["enabled"] def get_launch_hook_paths(self): - """Implementation of `ILaunchHookPaths`.""" + """Implementation for applications launch hooks.""" + return os.path.join(SLACK_MODULE_DIR, "launch_hooks") def get_plugin_paths(self): diff --git a/openpype/modules/standalonepublish_action.py b/openpype/modules/standalonepublish_action.py deleted file mode 100644 index ba53ce9b9e..0000000000 --- a/openpype/modules/standalonepublish_action.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import platform -import subprocess -from openpype.lib import get_openpype_execute_args -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction - - -class StandAlonePublishAction(OpenPypeModule, ITrayAction): - label = "Publish" - name = "standalonepublish_tool" - - def initialize(self, modules_settings): - import openpype - self.enabled = modules_settings[self.name]["enabled"] - self.publish_paths = [ - os.path.join( - openpype.PACKAGE_DIR, - "hosts", - "standalonepublisher", - "plugins", - "publish" - ) - ] - - def tray_init(self): - return - - def on_action_trigger(self): - self.run_standalone_publisher() - - def connect_with_modules(self, enabled_modules): - """Collect publish paths from other modules.""" - publish_paths = self.manager.collect_plugin_paths()["publish"] - self.publish_paths.extend(publish_paths) - - def run_standalone_publisher(self): - args = get_openpype_execute_args("standalonepublisher") - kwargs = {} - if platform.system().lower() == "darwin": - new_args = ["open", "-na", args.pop(0), "--args"] - new_args.extend(args) - args = new_args - - detached_process = getattr(subprocess, "DETACHED_PROCESS", None) - if detached_process is not None: - kwargs["creationflags"] = detached_process - - subprocess.Popen(args, **kwargs) diff --git a/openpype/modules/sync_server/providers/abstract_provider.py b/openpype/modules/sync_server/providers/abstract_provider.py index 688a17f14f..e11a8ba71e 100644 --- a/openpype/modules/sync_server/providers/abstract_provider.py +++ b/openpype/modules/sync_server/providers/abstract_provider.py @@ -1,8 +1,8 @@ import abc import six -from openpype.api import Logger +from openpype.lib import Logger -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") @six.add_metaclass(abc.ABCMeta) @@ -10,6 +10,8 @@ class AbstractProvider: CODE = '' LABEL = '' + _log = None + def __init__(self, project_name, site_name, tree=None, presets=None): self.presets = None self.active = False @@ -19,6 +21,12 @@ class AbstractProvider: super(AbstractProvider, self).__init__() + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + @abc.abstractmethod def is_active(self): """ @@ -62,7 +70,7 @@ class AbstractProvider: @abc.abstractmethod def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -75,7 +83,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): name of project_name file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -87,7 +95,7 @@ class AbstractProvider: @abc.abstractmethod def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -99,7 +107,7 @@ class AbstractProvider: arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -199,11 +207,11 @@ class AbstractProvider: path = anatomy.fill_root(path) except KeyError: msg = "Error in resolving local root from anatomy" - log.error(msg) + self.log.error(msg) raise ValueError(msg) except IndexError: msg = "Path {} contains unfillable placeholder" - log.error(msg) + self.log.error(msg) raise ValueError(msg) return path diff --git a/openpype/modules/sync_server/providers/dropbox.py b/openpype/modules/sync_server/providers/dropbox.py index f5910299e5..a517e7d847 100644 --- a/openpype/modules/sync_server/providers/dropbox.py +++ b/openpype/modules/sync_server/providers/dropbox.py @@ -2,12 +2,9 @@ import os import dropbox -from openpype.api import Logger from .abstract_provider import AbstractProvider from ..utils import EditableScopes -log = Logger().get_logger("SyncServer") - class DropboxHandler(AbstractProvider): CODE = 'dropbox' @@ -17,23 +14,29 @@ class DropboxHandler(AbstractProvider): self.active = False self.site_name = site_name self.presets = presets + self.dbx = None if not self.presets: - log.info( + self.log.info( "Sync Server: There are no presets for {}.".format(site_name) ) return + if not self.presets.get("enabled"): + self.log.debug("Sync Server: Site {} not enabled for {}.". + format(site_name, project_name)) + return + token = self.presets.get("token", "") if not token: msg = "Sync Server: No access token for dropbox provider" - log.info(msg) + self.log.info(msg) return team_folder_name = self.presets.get("team_folder_name", "") if not team_folder_name: msg = "Sync Server: No team folder name for dropbox provider" - log.info(msg) + self.log.info(msg) return acting_as_member = self.presets.get("acting_as_member", "") @@ -41,19 +44,16 @@ class DropboxHandler(AbstractProvider): msg = ( "Sync Server: No acting member for dropbox provider" ) - log.info(msg) + self.log.info(msg) return - self.dbx = None - - if self.presets["enabled"]: - try: - self.dbx = self._get_service( - token, acting_as_member, team_folder_name - ) - except Exception as e: - log.info("Could not establish dropbox object: {}".format(e)) - return + try: + self.dbx = self._get_service( + token, acting_as_member, team_folder_name + ) + except Exception as e: + self.log.info("Could not establish dropbox object: {}".format(e)) + return super(AbstractProvider, self).__init__() @@ -165,7 +165,7 @@ class DropboxHandler(AbstractProvider): Returns: (boolean) """ - return self.presets["enabled"] and self.dbx is not None + return self.presets.get("enabled") and self.dbx is not None @classmethod def get_configurable_items(cls): @@ -221,7 +221,7 @@ class DropboxHandler(AbstractProvider): return False def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Copy file from 'source_path' to 'target_path' on provider. @@ -234,7 +234,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -287,7 +287,7 @@ class DropboxHandler(AbstractProvider): cursor.offset = f.tell() server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -298,7 +298,7 @@ class DropboxHandler(AbstractProvider): return path def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download file from provider into local system @@ -310,7 +310,7 @@ class DropboxHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -334,7 +334,7 @@ class DropboxHandler(AbstractProvider): self.dbx.files_download_to_file(local_path, source_path) server.update_db( - collection=collection, + project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -445,7 +445,7 @@ class DropboxHandler(AbstractProvider): path = anatomy.fill_root(path) except KeyError: msg = "Error in resolving local root from anatomy" - log.error(msg) + self.log.error(msg) raise ValueError(msg) return path diff --git a/openpype/modules/sync_server/providers/gdrive.py b/openpype/modules/sync_server/providers/gdrive.py index 0b586613b5..3d94e5dff7 100644 --- a/openpype/modules/sync_server/providers/gdrive.py +++ b/openpype/modules/sync_server/providers/gdrive.py @@ -5,12 +5,12 @@ import sys import six import platform -from openpype.api import Logger -from openpype.api import get_system_settings +from openpype.lib import Logger +from openpype.settings import get_system_settings from .abstract_provider import AbstractProvider from ..utils import time_function, ResumableError -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("GDriveHandler") try: from googleapiclient.discovery import build @@ -69,24 +69,49 @@ class GDriveHandler(AbstractProvider): self.presets = presets if not self.presets: - log.info("Sync Server: There are no presets for {}.". - format(site_name)) + self.log.info( + "Sync Server: There are no presets for {}.".format(site_name) + ) + return + + if not self.presets.get("enabled"): + self.log.debug( + "Sync Server: Site {} not enabled for {}.".format( + site_name, project_name + ) + ) + return + + current_platform = platform.system().lower() + cred_path = self.presets.get("credentials_url", {}). \ + get(current_platform) or '' + + if not cred_path: + msg = "Sync Server: Please, fill the credentials for gdrive "\ + "provider for platform '{}' !".format(current_platform) + self.log.info(msg) + return + + try: + cred_path = cred_path.format(**os.environ) + except KeyError as e: + self.log.info(( + "Sync Server: The key(s) {} does not exist in the " + "environment variables" + ).format(" ".join(e.args))) return - cred_path = self.presets.get("credentials_url", {}).\ - get(platform.system().lower()) or '' if not os.path.exists(cred_path): msg = "Sync Server: No credentials for gdrive provider " + \ "for '{}' on path '{}'!".format(site_name, cred_path) - log.info(msg) + self.log.info(msg) return self.service = None - if self.presets["enabled"]: - self.service = self._get_gd_service(cred_path) + self.service = self._get_gd_service(cred_path) - self._tree = tree - self.active = True + self._tree = tree + self.active = True def is_active(self): """ @@ -94,7 +119,7 @@ class GDriveHandler(AbstractProvider): Returns: (boolean) """ - return self.presets["enabled"] and self.service is not None + return self.presets.get("enabled") and self.service is not None @classmethod def get_system_settings_schema(cls): @@ -232,7 +257,7 @@ class GDriveHandler(AbstractProvider): return folder_id def upload_file(self, source_path, path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -245,7 +270,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -299,28 +324,31 @@ class GDriveHandler(AbstractProvider): fields='id') media.stream() - log.debug("Start Upload! {}".format(source_path)) + self.log.debug("Start Upload! {}".format(source_path)) last_tick = status = response = None status_val = 0 while response is None: - if server.is_representation_paused(representation['_id'], - check_parents=True, - project_name=collection): - raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() - log.debug("Uploaded %d%%." % + self.log.debug("Uploaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, site=site, progress=status_val ) + if server.is_representation_paused( + project_name, + representation['_id'], + site, + check_parents=True + ): + raise ValueError("Paused during process, please redo.") status, response = request.next_chunk() except errors.HttpError as ex: @@ -331,15 +359,16 @@ class GDriveHandler(AbstractProvider): if 'has not granted' in ex._get_reason().strip(): raise PermissionError(ex._get_reason().strip()) - log.warning("Forbidden received, hit quota. " - "Injecting 60s delay.") + self.log.warning( + "Forbidden received, hit quota. Injecting 60s delay." + ) time.sleep(60) return False raise return response['id'] def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'local_path'. @@ -353,7 +382,7 @@ class GDriveHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -389,24 +418,27 @@ class GDriveHandler(AbstractProvider): last_tick = status = response = None status_val = 0 while response is None: - if server.is_representation_paused(representation['_id'], - check_parents=True, - project_name=collection): - raise ValueError("Paused during process, please redo.") if status: status_val = float(status.progress()) if not last_tick or \ time.time() - last_tick >= server.LOG_PROGRESS_SEC: last_tick = time.time() - log.debug("Downloaded %d%%." % + self.log.debug("Downloaded %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, site=site, progress=status_val ) + if server.is_representation_paused( + project_name, + representation['_id'], + site, + check_parents=True + ): + raise ValueError("Paused during process, please redo.") status, response = downloader.next_chunk() return target_name @@ -610,9 +642,9 @@ class GDriveHandler(AbstractProvider): ["gdrive"] ) except KeyError: - log.info(("Sync Server: There are no presets for Gdrive " + - "provider."). - format(str(provider_presets))) + log.info(( + "Sync Server: There are no presets for Gdrive provider." + ).format(str(provider_presets))) return return provider_presets @@ -685,7 +717,7 @@ class GDriveHandler(AbstractProvider): roots[self.MY_DRIVE_STR] = self.service.files() \ .get(fileId='root').execute() except errors.HttpError: - log.warning("HttpError in sync loop, " + self.log.warning("HttpError in sync loop, " "trying next loop", exc_info=True) raise ResumableError @@ -708,7 +740,7 @@ class GDriveHandler(AbstractProvider): Returns: (dictionary) path as a key, folder id as a value """ - log.debug("build_tree len {}".format(len(folders))) + self.log.debug("build_tree len {}".format(len(folders))) if not self.root: # build only when necessary, could be expensive self.root = self._prepare_root_info() @@ -760,9 +792,9 @@ class GDriveHandler(AbstractProvider): loop_cnt += 1 if len(no_parents_yet) > 0: - log.debug("Some folders path are not resolved {}". + self.log.debug("Some folders path are not resolved {}". format(no_parents_yet)) - log.debug("Remove deleted folders from trash.") + self.log.debug("Remove deleted folders from trash.") return tree diff --git a/openpype/modules/sync_server/providers/local_drive.py b/openpype/modules/sync_server/providers/local_drive.py index 68f604b39c..98bdb487da 100644 --- a/openpype/modules/sync_server/providers/local_drive.py +++ b/openpype/modules/sync_server/providers/local_drive.py @@ -4,10 +4,12 @@ import shutil import threading import time -from openpype.api import Logger, Anatomy +from openpype.lib import Logger +from openpype.lib.local_settings import get_local_site_id +from openpype.pipeline import Anatomy from .abstract_provider import AbstractProvider -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class LocalDriveHandler(AbstractProvider): @@ -81,7 +83,7 @@ class LocalDriveHandler(AbstractProvider): return editable def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False, direction="Upload"): """ Copies file from 'source_path' to 'target_path' @@ -94,7 +96,7 @@ class LocalDriveHandler(AbstractProvider): thread = threading.Thread(target=self._copy, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, direction) else: if os.path.exists(target_path): @@ -104,13 +106,14 @@ class LocalDriveHandler(AbstractProvider): return os.path.basename(target_path) def download_file(self, source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Download a file form 'source_path' to 'local_path' """ return self.upload_file(source_path, local_path, - server, collection, file, representation, site, + server, project_name, file, + representation, site, overwrite, direction="Download") def delete_file(self, path): @@ -187,7 +190,7 @@ class LocalDriveHandler(AbstractProvider): except shutil.SameFileError: print("same files, skipping") - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -203,7 +206,7 @@ class LocalDriveHandler(AbstractProvider): status_val = target_file_size / source_file_size last_tick = time.time() log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, @@ -218,6 +221,6 @@ class LocalDriveHandler(AbstractProvider): def _normalize_site_name(self, site_name): """Transform user id to 'local' for Local settings""" - if site_name != 'studio': + if site_name == get_local_site_id(): return 'local' return site_name diff --git a/openpype/modules/sync_server/providers/sftp.py b/openpype/modules/sync_server/providers/sftp.py index 49b87b14ec..c41edc78bc 100644 --- a/openpype/modules/sync_server/providers/sftp.py +++ b/openpype/modules/sync_server/providers/sftp.py @@ -4,10 +4,10 @@ import time import threading import platform -from openpype.api import Logger -from openpype.api import get_system_settings +from openpype.lib import Logger +from openpype.settings import get_system_settings from .abstract_provider import AbstractProvider -log = Logger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer-SFTPHandler") pysftp = None try: @@ -43,8 +43,9 @@ class SFTPHandler(AbstractProvider): self.presets = presets if not self.presets: - log.warning("Sync Server: There are no presets for {}.". - format(site_name)) + self.log.warning( + "Sync Server: There are no presets for {}.".format(site_name) + ) return # store to instance for reconnect @@ -71,7 +72,7 @@ class SFTPHandler(AbstractProvider): Returns: (boolean) """ - return self.presets["enabled"] and self.conn is not None + return self.presets.get("enabled") and self.conn is not None @classmethod def get_system_settings_schema(cls): @@ -222,7 +223,7 @@ class SFTPHandler(AbstractProvider): return os.path.basename(path) def upload_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Uploads single file from 'source_path' to destination 'path'. @@ -235,7 +236,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -256,7 +257,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._upload, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "upload") return os.path.basename(target_path) @@ -267,7 +268,7 @@ class SFTPHandler(AbstractProvider): conn.put(source_path, target_path) def download_file(self, source_path, target_path, - server, collection, file, representation, site, + server, project_name, file, representation, site, overwrite=False): """ Downloads single file from 'source_path' (remote) to 'target_path'. @@ -281,7 +282,7 @@ class SFTPHandler(AbstractProvider): arguments for saving progress: server (SyncServer): server instance to call update_db on - collection (str): name of collection + project_name (str): file (dict): info about uploaded file (matches structure from db) representation (dict): complete repre containing 'file' site (str): site name @@ -302,7 +303,7 @@ class SFTPHandler(AbstractProvider): thread = threading.Thread(target=self._download, args=(source_path, target_path)) thread.start() - self._mark_progress(collection, file, representation, server, + self._mark_progress(project_name, file, representation, server, site, source_path, target_path, "download") return os.path.basename(target_path) @@ -423,9 +424,9 @@ class SFTPHandler(AbstractProvider): return pysftp.Connection(**conn_params) except (paramiko.ssh_exception.SSHException, pysftp.exceptions.ConnectionException): - log.warning("Couldn't connect", exc_info=True) + self.log.warning("Couldn't connect", exc_info=True) - def _mark_progress(self, collection, file, representation, server, site, + def _mark_progress(self, project_name, file, representation, server, site, source_path, target_path, direction): """ Updates progress field in DB by values 0-1. @@ -445,8 +446,8 @@ class SFTPHandler(AbstractProvider): time.time() - last_tick >= server.LOG_PROGRESS_SEC: status_val = target_file_size / source_file_size last_tick = time.time() - log.debug(direction + "ed %d%%." % int(status_val * 100)) - server.update_db(collection=collection, + self.log.debug(direction + "ed %d%%." % int(status_val * 100)) + server.update_db(project_name=project_name, new_file_id=None, file=file, representation=representation, diff --git a/openpype/modules/sync_server/resources/disabled.png b/openpype/modules/sync_server/resources/disabled.png new file mode 100644 index 0000000000..e036d7ef6a Binary files /dev/null and b/openpype/modules/sync_server/resources/disabled.png differ diff --git a/openpype/modules/sync_server/rest_api.py b/openpype/modules/sync_server/rest_api.py new file mode 100644 index 0000000000..a7d9dd80b7 --- /dev/null +++ b/openpype/modules/sync_server/rest_api.py @@ -0,0 +1,37 @@ +from aiohttp.web_response import Response +from openpype.lib import Logger + + +class SyncServerModuleRestApi: + """ + REST API endpoint used for calling from hosts when context change + happens in Workfile app. + """ + + def __init__(self, user_module, server_manager): + self._log = None + self.module = user_module + self.server_manager = server_manager + + self.prefix = "/sync_server" + + self.register() + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + def register(self): + self.server_manager.add_route( + "POST", + self.prefix + "/reset_timer", + self.reset_timer, + ) + + async def reset_timer(self, _request): + """Force timer to run immediately.""" + self.module.reset_timer() + + return Response(status=200) diff --git a/openpype/modules/sync_server/sync_server.py b/openpype/modules/sync_server/sync_server.py index 22eed01ef3..aef3623efa 100644 --- a/openpype/modules/sync_server/sync_server.py +++ b/openpype/modules/sync_server/sync_server.py @@ -6,15 +6,12 @@ import concurrent.futures from concurrent.futures._base import CancelledError from .providers import lib -from openpype.lib import PypeLogger +from openpype.lib import Logger from .utils import SyncStatus, ResumableError -log = PypeLogger().get_logger("SyncServer") - - -async def upload(module, collection, file, representation, provider_name, +async def upload(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Upload single 'file' of a 'representation' to 'provider'. @@ -31,7 +28,7 @@ async def upload(module, collection, file, representation, provider_name, Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source db file (dictionary): of file from representation in Mongo representation (dictionary): of representation provider_name (string): gdrive, gdc etc. @@ -47,15 +44,16 @@ async def upload(module, collection, file, representation, provider_name, # thread can do that at a time, upload/download to prepared # structure should be run in parallel remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") try: - local_file_path, remote_file_path = resolve_paths(module, - file_path, collection, remote_site_name, remote_handler + local_file_path, remote_file_path = resolve_paths( + module, file_path, project_name, + remote_site_name, remote_handler ) except Exception as exp: print(exp) @@ -74,27 +72,28 @@ async def upload(module, collection, file, representation, provider_name, local_file_path, remote_file_path, module, - collection, + project_name, file, representation, remote_site_name, True ) - module.handle_alternate_site(collection, representation, remote_site_name, + module.handle_alternate_site(project_name, representation, + remote_site_name, file["_id"], file_id) return file_id -async def download(module, collection, file, representation, provider_name, +async def download(module, project_name, file, representation, provider_name, remote_site_name, tree=None, preset=None): """ Downloads file to local folder denoted in representation.Context. Args: module(SyncServerModule): object to run SyncServerModule API - collection (str): source collection + project_name (str): source file (dictionary) : info about processed file representation (dictionary): repr that 'file' belongs to provider_name (string): 'gdrive' etc @@ -108,20 +107,20 @@ async def download(module, collection, file, representation, provider_name, """ with module.lock: remote_handler = lib.factory.get_provider(provider_name, - collection, + project_name, remote_site_name, tree=tree, presets=preset) file_path = file.get("path", "") local_file_path, remote_file_path = resolve_paths( - module, file_path, collection, remote_site_name, remote_handler + module, file_path, project_name, remote_site_name, remote_handler ) local_folder = os.path.dirname(local_file_path) os.makedirs(local_folder, exist_ok=True) - local_site = module.get_active_site(collection) + local_site = module.get_active_site(project_name) loop = asyncio.get_running_loop() file_id = await loop.run_in_executor(None, @@ -129,20 +128,20 @@ async def download(module, collection, file, representation, provider_name, remote_file_path, local_file_path, module, - collection, + project_name, file, representation, local_site, True ) - module.handle_alternate_site(collection, representation, local_site, + module.handle_alternate_site(project_name, representation, local_site, file["_id"], file_id) return file_id -def resolve_paths(module, file_path, collection, +def resolve_paths(module, file_path, project_name, remote_site_name=None, remote_handler=None): """ Returns tuple of local and remote file paths with {root} @@ -153,7 +152,7 @@ def resolve_paths(module, file_path, collection, Args: module(SyncServerModule): object to run SyncServerModule API file_path(string): path with {root} - collection(string): project name + project_name(string): project name remote_site_name(string): remote site remote_handler(AbstractProvider): implementation Returns: @@ -164,13 +163,13 @@ def resolve_paths(module, file_path, collection, remote_file_path = remote_handler.resolve_path(file_path) local_handler = lib.factory.get_provider( - 'local_drive', collection, module.get_active_site(collection)) + 'local_drive', project_name, module.get_active_site(project_name)) local_file_path = local_handler.resolve_path(file_path) return local_file_path, remote_file_path -def site_is_working(module, project_name, site_name): +def _site_is_working(module, project_name, site_name, site_config): """ Confirm that 'site_name' is configured correctly for 'project_name'. @@ -180,54 +179,17 @@ def site_is_working(module, project_name, site_name): module (SyncServerModule) project_name(string): site_name(string): + site_config (dict): configuration for site from Settings Returns (bool) """ - if _get_configured_sites(module, project_name).get(site_name): - return True - return False + provider = module.get_provider_for_site(site=site_name) + handler = lib.factory.get_provider(provider, + project_name, + site_name, + presets=site_config) - -def _get_configured_sites(module, project_name): - """ - Loops through settings and looks for configured sites and checks - its handlers for particular 'project_name'. - - Args: - project_setting(dict): dictionary from Settings - only_project_name(string, optional): only interested in - particular project - Returns: - (dict of dict) - {'ProjectA': {'studio':True, 'gdrive':False}} - """ - settings = module.get_sync_project_setting(project_name) - return _get_configured_sites_from_setting(module, project_name, settings) - - -def _get_configured_sites_from_setting(module, project_name, project_setting): - if not project_setting.get("enabled"): - return {} - - initiated_handlers = {} - configured_sites = {} - all_sites = module._get_default_site_configs() - all_sites.update(project_setting.get("sites")) - for site_name, config in all_sites.items(): - provider = module.get_provider_for_site(site=site_name) - handler = initiated_handlers.get((provider, site_name)) - if not handler: - handler = lib.factory.get_provider(provider, - project_name, - site_name, - presets=config) - initiated_handlers[(provider, site_name)] = \ - handler - - if handler.is_active(): - configured_sites[site_name] = True - - return configured_sites + return handler.is_active() class SyncServerThread(threading.Thread): @@ -236,6 +198,8 @@ class SyncServerThread(threading.Thread): Stopped when tray is closed. """ def __init__(self, module): + self.log = Logger.get_logger(self.__class__.__name__) + super(SyncServerThread, self).__init__() self.module = module self.loop = None @@ -247,17 +211,17 @@ class SyncServerThread(threading.Thread): self.is_running = True try: - log.info("Starting Sync Server") + self.log.info("Starting Sync Server") self.loop = asyncio.new_event_loop() # create new loop for thread asyncio.set_event_loop(self.loop) self.loop.set_default_executor(self.executor) asyncio.ensure_future(self.check_shutdown(), loop=self.loop) asyncio.ensure_future(self.sync_loop(), loop=self.loop) - log.info("Sync Server Started") + self.log.info("Sync Server Started") self.loop.run_forever() except Exception: - log.warning( + self.log.warning( "Sync Server service has failed", exc_info=True ) finally: @@ -269,8 +233,8 @@ class SyncServerThread(threading.Thread): - gets list of collections in DB - gets list of active remote providers (has configuration, credentials) - - for each collection it looks for representations that should - be synced + - for each project_name it looks for representations that + should be synced - synchronize found collections - update representations - fills error messages for exceptions - waits X seconds and repeat @@ -280,20 +244,20 @@ class SyncServerThread(threading.Thread): while self.is_running and not self.module.is_paused(): try: import time - start_time = None + start_time = time.time() self.module.set_sync_project_settings() # clean cache - for collection, preset in self.module.sync_project_settings.\ - items(): - if collection not in self.module.get_enabled_projects(): - continue + project_name = None + enabled_projects = self.module.get_enabled_projects() + for project_name in enabled_projects: + preset = self.module.sync_project_settings[project_name] - start_time = time.time() - local_site, remote_site = self._working_sites(collection) + local_site, remote_site = self._working_sites(project_name, + preset) if not all([local_site, remote_site]): continue sync_repres = self.module.get_sync_representations( - collection, + project_name, local_site, remote_site ) @@ -311,7 +275,7 @@ class SyncServerThread(threading.Thread): remote_provider = \ self.module.get_provider_for_site(site=remote_site) handler = lib.factory.get_provider(remote_provider, - collection, + project_name, remote_site, presets=site_preset) limit = lib.factory.get_provider_batch_limit( @@ -320,9 +284,6 @@ class SyncServerThread(threading.Thread): # building folder tree structure in memory # call only if needed, eg. DO_UPLOAD or DO_DOWNLOAD for sync in sync_repres: - if self.module.\ - is_representation_paused(sync['_id']): - continue if limit <= 0: continue files = sync.get("files") or [] @@ -342,7 +303,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( upload(self.module, - collection, + project_name, file, sync, remote_provider, @@ -354,7 +315,7 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, remote_site, - collection + project_name )) processed_file_path.add(file_path) if status == SyncStatus.DO_DOWNLOAD: @@ -362,7 +323,7 @@ class SyncServerThread(threading.Thread): limit -= 1 task = asyncio.create_task( download(self.module, - collection, + project_name, file, sync, remote_provider, @@ -374,23 +335,24 @@ class SyncServerThread(threading.Thread): files_processed_info.append((file, sync, local_site, - collection + project_name )) processed_file_path.add(file_path) - log.debug("Sync tasks count {}". - format(len(task_files_to_process))) + self.log.debug("Sync tasks count {}".format( + len(task_files_to_process) + )) files_created = await asyncio.gather( *task_files_to_process, return_exceptions=True) for file_id, info in zip(files_created, files_processed_info): - file, representation, site, collection = info + file, representation, site, project_name = info error = None if isinstance(file_id, BaseException): error = str(file_id) file_id = None - self.module.update_db(collection, + self.module.update_db(project_name, file_id, file, representation, @@ -398,28 +360,31 @@ class SyncServerThread(threading.Thread): error) duration = time.time() - start_time - log.debug("One loop took {:.2f}s".format(duration)) + self.log.debug("One loop took {:.2f}s".format(duration)) - delay = self.module.get_loop_delay(collection) - log.debug("Waiting for {} seconds to new loop".format(delay)) + delay = self.module.get_loop_delay(project_name) + self.log.debug( + "Waiting for {} seconds to new loop".format(delay) + ) self.timer = asyncio.create_task(self.run_timer(delay)) await asyncio.gather(self.timer) except ConnectionResetError: - log.warning("ConnectionResetError in sync loop, " - "trying next loop", - exc_info=True) + self.log.warning( + "ConnectionResetError in sync loop, trying next loop", + exc_info=True) except CancelledError: # just stopping server pass except ResumableError: - log.warning("ResumableError in sync loop, " - "trying next loop", - exc_info=True) + self.log.warning( + "ResumableError in sync loop, trying next loop", + exc_info=True) except Exception: self.stop() - log.warning("Unhandled except. in sync loop, stopping server", - exc_info=True) + self.log.warning( + "Unhandled except. in sync loop, stopping server", + exc_info=True) def stop(self): """Sets is_running flag to false, 'check_shutdown' shuts server down""" @@ -432,16 +397,17 @@ class SyncServerThread(threading.Thread): while self.is_running: if self.module.long_running_tasks: task = self.module.long_running_tasks.pop() - log.info("starting long running") + self.log.info("starting long running") await self.loop.run_in_executor(None, task["func"]) - log.info("finished long running") + self.log.info("finished long running") self.module.projects_processed.remove(task["project_name"]) await asyncio.sleep(0.5) tasks = [task for task in asyncio.all_tasks() if task is not asyncio.current_task()] list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks results = await asyncio.gather(*tasks, return_exceptions=True) - log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + self.log.debug( + f'Finished awaiting cancelled tasks, results: {results}...') await self.loop.shutdown_asyncgens() # to really make sure everything else has time to stop self.executor.shutdown(wait=True) @@ -454,29 +420,35 @@ class SyncServerThread(threading.Thread): def reset_timer(self): """Called when waiting for next loop should be skipped""" - log.debug("Resetting timer") + self.log.debug("Resetting timer") if self.timer: self.timer.cancel() self.timer = None - def _working_sites(self, collection): - if self.module.is_project_paused(collection): - log.debug("Both sites same, skipping") + def _working_sites(self, project_name, sync_config): + if self.module.is_project_paused(project_name): + self.log.debug("Both sites same, skipping") return None, None - local_site = self.module.get_active_site(collection) - remote_site = self.module.get_remote_site(collection) + local_site = self.module.get_active_site(project_name) + remote_site = self.module.get_remote_site(project_name) if local_site == remote_site: - log.debug("{}-{} sites same, skipping".format(local_site, - remote_site)) + self.log.debug("{}-{} sites same, skipping".format( + local_site, remote_site)) return None, None - configured_sites = _get_configured_sites(self.module, collection) - if not all([local_site in configured_sites, - remote_site in configured_sites]): - log.debug("Some of the sites {} - {} is not ".format(local_site, - remote_site) + - "working properly") + local_site_config = sync_config.get('sites')[local_site] + remote_site_config = sync_config.get('sites')[remote_site] + if not all([_site_is_working(self.module, project_name, local_site, + local_site_config), + _site_is_working(self.module, project_name, remote_site, + remote_site_config)]): + self.log.debug( + "Some of the sites {} - {} is not working properly".format( + local_site, remote_site + ) + ) + return None, None return local_site, remote_site diff --git a/openpype/modules/sync_server/sync_server_module.py b/openpype/modules/sync_server/sync_server_module.py index caf58503f1..ba0abe7d3b 100644 --- a/openpype/modules/sync_server/sync_server_module.py +++ b/openpype/modules/sync_server/sync_server_module.py @@ -1,32 +1,39 @@ import os -from bson.objectid import ObjectId +import sys +import time from datetime import datetime import threading import platform import copy -from collections import deque +import signal +from collections import deque, defaultdict -from avalon.api import AvalonMongoDB +import click +from bson.objectid import ObjectId -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayModule -from openpype.api import ( - Anatomy, +from openpype.client import ( + get_projects, + get_representations, + get_representation_by_id, +) +from openpype.modules import OpenPypeModule, ITrayModule +from openpype.settings import ( get_project_settings, get_system_settings, - get_local_site_id) -from openpype.lib import PypeLogger +) +from openpype.lib import Logger, get_local_site_id +from openpype.pipeline import AvalonMongoDB, Anatomy from openpype.settings.lib import ( get_default_anatomy_settings, - get_anatomy_settings) + get_anatomy_settings +) from .providers.local_drive import LocalDriveHandler from .providers import lib -from .utils import time_function, SyncStatus +from .utils import time_function, SyncStatus, SiteAlreadyPresentError - -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class SyncServerModule(OpenPypeModule, ITrayModule): @@ -117,7 +124,6 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.action_show_widget = None self._paused = False self._paused_projects = set() - self._paused_representations = set() self._anatomies = {} self._connection = None @@ -128,44 +134,55 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.projects_processed = set() """ Start of Public API """ - def add_site(self, collection, representation_id, site_name=None, - force=False): + def add_site(self, project_name, representation_id, site_name=None, + force=False, priority=None, reset_timer=False): """ - Adds new site to representation to be synced. + Adds new site to representation to be synced. - 'collection' must have synchronization enabled (globally or - project only) + 'project_name' must have synchronization enabled (globally or + project only) - Used as a API endpoint from outside applications (Loader etc) + Used as an API endpoint from outside applications (Loader etc). - Args: - collection (string): project name (must match DB) - representation_id (string): MongoDB _id value - site_name (string): name of configured and active site - force (bool): reset site if exists + Use 'force' to reset existing site. - Returns: - throws ValueError if any issue + Args: + project_name (string): project name (must match DB) + representation_id (string): MongoDB _id value + site_name (string): name of configured and active site + force (bool): reset site if exists + priority (int): set priority + reset_timer (bool): if delay timer should be reset, eg. user mark + some representation to be synced manually + + Throws: + SiteAlreadyPresentError - if adding already existing site and + not 'force' + ValueError - other errors (repre not found, misconfiguration) """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") if not site_name: site_name = self.DEFAULT_SITE - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, - site_name=site_name, force=force) + site_name=site_name, + force=force, + priority=priority) - # public facing API - def remove_site(self, collection, representation_id, site_name, + if reset_timer: + self.reset_timer() + + def remove_site(self, project_name, representation_id, site_name, remove_local_files=False): """ Removes 'site_name' for particular 'representation_id' on - 'collection' + 'project_name' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site remove_local_files (bool): remove only files for 'local_id' @@ -174,19 +191,164 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: throws ValueError if any issue """ - if not self.get_sync_project_setting(collection): + if not self.get_sync_project_setting(project_name): raise ValueError("Project not configured") - self.reset_site_on_representation(collection, + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, remove=True) if remove_local_files: - self._remove_local_file(collection, representation_id, site_name) + self._remove_local_file(project_name, representation_id, site_name) - def clear_project(self, collection, site_name): + def compute_resource_sync_sites(self, project_name): + """Get available resource sync sites state for publish process. + + Returns dict with prepared state of sync sites for 'project_name'. + It checks if Site Sync is enabled, handles alternative sites. + Publish process stores this dictionary as a part of representation + document in DB. + + Example: + [ + { + 'name': '42abbc09-d62a-44a4-815c-a12cd679d2d7', + 'created_dt': datetime.datetime(2022, 3, 30, 12, 16, 9, 778637) + }, + {'name': 'studio'}, + {'name': 'SFTP'} + ] -- representation is published locally, artist or Settings have set + remote site as 'studio'. 'SFTP' is alternate site to 'studio'. Eg. + whenever file is on 'studio', it is also on 'SFTP'. """ - Clear 'collection' of 'site_name' and its local files + + def create_metadata(name, created=True): + """Create sync site metadata for site with `name`""" + metadata = {"name": name} + if created: + metadata["created_dt"] = datetime.now() + return metadata + + if ( + not self.sync_system_settings["enabled"] or + not self.sync_project_settings[project_name]["enabled"]): + return [create_metadata(self.DEFAULT_SITE)] + + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) + + # Attached sites metadata by site name + # That is the local site, remote site, the always accesible sites + # and their alternate sites (alias of sites with different protocol) + attached_sites = dict() + attached_sites[local_site] = create_metadata(local_site) + + if remote_site and remote_site not in attached_sites: + attached_sites[remote_site] = create_metadata(remote_site, + created=False) + + attached_sites = self._add_alternative_sites(attached_sites) + # add skeleton for sites where it should be always synced to + # usually it would be a backup site which is handled by separate + # background process + for site in self._get_always_accessible_sites(project_name): + if site not in attached_sites: + attached_sites[site] = create_metadata(site, created=False) + + return list(attached_sites.values()) + + def _get_always_accessible_sites(self, project_name): + """Sites that synced to as a part of background process. + + Artist machine doesn't handle those, explicit Tray with that site name + as a local id must be running. + Example is dropbox site serving as a backup solution + """ + always_accessible_sites = ( + self.get_sync_project_setting(project_name)["config"]. + get("always_accessible_on", []) + ) + return [site.strip() for site in always_accessible_sites] + + def _add_alternative_sites(self, attached_sites): + """Add skeleton document for alternative sites + + Each new configured site in System Setting could serve as a alternative + site, it's a kind of alias. It means that files on 'a site' are + physically accessible also on 'a alternative' site. + Example is sftp site serving studio files via sftp protocol, physically + file is only in studio, sftp server has this location mounted. + """ + additional_sites = self.sync_system_settings.get("sites", {}) + + alt_site_pairs = self._get_alt_site_pairs(additional_sites) + + for site_name in additional_sites.keys(): + # Get alternate sites (stripped names) for this site name + alt_sites = alt_site_pairs.get(site_name) + alt_sites = [site.strip() for site in alt_sites] + alt_sites = set(alt_sites) + + # If no alternative sites we don't need to add + if not alt_sites: + continue + + # Take a copy of data of the first alternate site that is already + # defined as an attached site to match the same state. + match_meta = next((attached_sites[site] for site in alt_sites + if site in attached_sites), None) + if not match_meta: + continue + + alt_site_meta = copy.deepcopy(match_meta) + alt_site_meta["name"] = site_name + + # Note: We change mutable `attached_site` dict in-place + attached_sites[site_name] = alt_site_meta + + return attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(set) + for site_name, site_info in conf_sites.items(): + alt_sites = set(site_info.get("alternative_sites", [])) + alt_site_pairs[site_name].update(alt_sites) + + for alt_site in alt_sites: + alt_site_pairs[alt_site].add(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.add(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs + + def clear_project(self, project_name, site_name): + """ + Clear 'project_name' of 'site_name' and its local files Works only on real local sites, not on 'studio' """ @@ -195,170 +357,180 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "files.sites.name": site_name } + # TODO currently not possible to replace with get_representations representations = list( - self.connection.database[collection].find(query)) + self.connection.database[project_name].find(query)) if not representations: self.log.debug("No repre found") return for repre in representations: - self.remove_site(collection, repre.get("_id"), site_name, True) + self.remove_site(project_name, repre.get("_id"), site_name, True) - def create_validate_project_task(self, collection, site_name): + def create_validate_project_task(self, project_name, site_name): """Adds metadata about project files validation on a queue. - This process will loop through all representation and check if - their files actually exist on an active site. + This process will loop through all representation and check if + their files actually exist on an active site. - This might be useful for edge cases when artists is switching - between sites, remote site is actually physically mounted and - active site has same file urls etc. + It also checks if site is set in DB, but file is physically not + present - Task will run on a asyncio loop, shouldn't be blocking. + This might be useful for edge cases when artists is switching + between sites, remote site is actually physically mounted and + active site has same file urls etc. + + Task will run on a asyncio loop, shouldn't be blocking. """ task = { "type": "validate", - "project_name": collection, - "func": lambda: self.validate_project(collection, site_name) + "project_name": project_name, + "func": lambda: self.validate_project(project_name, site_name, + reset_missing=True) } - self.projects_processed.add(collection) + self.projects_processed.add(project_name) self.long_running_tasks.append(task) - def validate_project(self, collection, site_name, remove_missing=False): - """ - Validate 'collection' of 'site_name' and its local files + def validate_project(self, project_name, site_name, reset_missing=False): + """Validate 'project_name' of 'site_name' and its local files - If file present and not marked with a 'site_name' in DB, DB is - updated with site name and file modified date. + If file present and not marked with a 'site_name' in DB, DB is + updated with site name and file modified date. - Args: - module (SyncServerModule) - collection (string): project name - site_name (string): active site name - remove_missing (bool): if True remove sites in DB if missing - physically + Args: + project_name (string): project name + site_name (string): active site name + reset_missing (bool): if True reset site in DB if missing + physically """ - self.log.debug("Validation of {} for {} started".format(collection, + self.log.debug("Validation of {} for {} started".format(project_name, site_name)) - query = { - "type": "representation" - } - - representations = list( - self.connection.database[collection].find(query)) + representations = list(get_representations(project_name)) if not representations: self.log.debug("No repre found") return sites_added = 0 - sites_removed = 0 + sites_reset = 0 for repre in representations: repre_id = repre["_id"] for repre_file in repre.get("files", []): try: - has_site = site_name in [site["name"] - for site in repre_file["sites"]] - except TypeError: + is_on_site = site_name in [site["name"] + for site in repre_file["sites"] + if (site.get("created_dt") and + not site.get("error"))] + except (TypeError, AttributeError): self.log.debug("Structure error in {}".format(repre_id)) continue - if has_site and not remove_missing: - continue - file_path = repre_file.get("path", "") - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file_path) - if local_file_path and os.path.exists(local_file_path): - self.log.debug("Adding site {} for {}".format(site_name, - repre_id)) - if not has_site: - query = { - "_id": repre_id - } + file_exists = (local_file_path and + os.path.exists(local_file_path)) + if not is_on_site: + if file_exists: + self.log.debug( + "Adding site {} for {}".format(site_name, + repre_id)) + created_dt = datetime.fromtimestamp( os.path.getmtime(local_file_path)) elem = {"name": site_name, "created_dt": created_dt} - self._add_site(collection, query, [repre], elem, + self._add_site(project_name, repre, elem, site_name=site_name, - file_id=repre_file["_id"]) + file_id=repre_file["_id"], + force=True) sites_added += 1 else: - if has_site and remove_missing: - self.log.debug("Removing site {} for {}". + if not file_exists and reset_missing: + self.log.debug("Resetting site {} for {}". format(site_name, repre_id)) - self.reset_provider_for_file(collection, - repre_id, - file_id=repre_file["_id"], - remove=True) - sites_removed += 1 + self.reset_site_on_representation( + project_name, repre_id, site_name=site_name, + file_id=repre_file["_id"]) + sites_reset += 1 if sites_added % 100 == 0: self.log.debug("Sites added {}".format(sites_added)) - self.log.debug("Validation of {} for {} ended".format(collection, + self.log.debug("Validation of {} for {} ended".format(project_name, site_name)) - self.log.info("Sites added {}, sites removed {}".format(sites_added, - sites_removed)) + self.log.info("Sites added {}, sites reset {}".format(sites_added, + reset_missing)) - def pause_representation(self, collection, representation_id, site_name): + def pause_representation(self, project_name, representation_id, site_name): """ Sets 'representation_id' as paused, eg. no syncing should be happening on it. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ - log.info("Pausing SyncServer for {}".format(representation_id)) - self._paused_representations.add(representation_id) - self.reset_site_on_representation(collection, representation_id, + self.log.info("Pausing SyncServer for {}".format(representation_id)) + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=True) - def unpause_representation(self, collection, representation_id, site_name): + def unpause_representation(self, project_name, + representation_id, site_name): """ Sets 'representation_id' as unpaused. Does not fail or warn if repre wasn't paused. Args: - collection (string): project name + project_name (string): project name representation_id (string): MongoDB objectId value site_name (string): 'gdrive', 'studio' etc. """ - log.info("Unpausing SyncServer for {}".format(representation_id)) - try: - self._paused_representations.remove(representation_id) - except KeyError: - pass - # self.paused_representations is not persistent - self.reset_site_on_representation(collection, representation_id, + self.log.info("Unpausing SyncServer for {}".format(representation_id)) + self.reset_site_on_representation(project_name, representation_id, site_name=site_name, pause=False) - def is_representation_paused(self, representation_id, - check_parents=False, project_name=None): + def is_representation_paused(self, project_name, representation_id, + site_name, check_parents=False): """ - Returns if 'representation_id' is paused or not. + Returns if 'representation_id' is paused or not for site. Args: - representation_id (string): MongoDB objectId value + project_name (str): project to check if paused + representation_id (str): MongoDB objectId value + site (str): site to check representation is paused for check_parents (bool): check if parent project or server itself are not paused - project_name (string): project to check if paused - if 'check_parents', 'project_name' should be set too Returns: (bool) """ - condition = representation_id in self._paused_representations - if check_parents and project_name: - condition = condition or \ - self.is_project_paused(project_name) or \ - self.is_paused() - return condition + # Check parents are paused + if check_parents and ( + self.is_project_paused(project_name) + or self.is_paused() + ): + return True + + # Get representation + representation = get_representation_by_id(project_name, + representation_id, + fields=["files.sites"]) + if not representation: + return False + + # Check if representation is paused + for file_info in representation.get("files", []): + for site in file_info.get("sites", []): + if site["name"] != site_name: + continue + + return site.get("paused", False) + + return False def pause_project(self, project_name): """ @@ -366,9 +538,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): happening on all representation inside. Args: - project_name (string): collection name + project_name (string): project_name name """ - log.info("Pausing SyncServer for {}".format(project_name)) + self.log.info("Pausing SyncServer for {}".format(project_name)) self._paused_projects.add(project_name) def unpause_project(self, project_name): @@ -378,9 +550,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Does not fail or warn if project wasn't paused. Args: - project_name (string): collection name + project_name (string): """ - log.info("Unpausing SyncServer for {}".format(project_name)) + self.log.info("Unpausing SyncServer for {}".format(project_name)) try: self._paused_projects.remove(project_name) except KeyError: @@ -391,7 +563,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns if 'project_name' is paused or not. Args: - project_name (string): collection name + project_name (string): check_parents (bool): check if server itself is not paused Returns: @@ -408,14 +580,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): It won't check anything, not uploading/downloading... """ - log.info("Pausing SyncServer") + self.log.info("Pausing SyncServer") self._paused = True def unpause_server(self): """ Unpause server """ - log.info("Unpausing SyncServer") + self.log.info("Unpausing SyncServer") self._paused = False def is_paused(self): @@ -726,7 +898,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): # val = val[platform.system().lower()] # except KeyError: # st = "{}'s field value {} should be".format(key, val) # noqa: E501 - # log.error(st + " multiplatform dict") + # self.log.error(st + " multiplatform dict") # # item["namespace"] = item["namespace"].replace('{site}', # site_name) @@ -756,23 +928,94 @@ class SyncServerModule(OpenPypeModule, ITrayModule): In case of user's involvement (reset site), start that right away. """ - self.sync_server_thread.reset_timer() + + if not self.enabled: + return + + if self.sync_server_thread is None: + self._reset_timer_with_rest_api() + else: + self.sync_server_thread.reset_timer() + + def is_representation_on_site( + self, project_name, representation_id, site_name + ): + """Checks if 'representation_id' has all files avail. on 'site_name'""" + representation = get_representation_by_id(project_name, + representation_id, + fields=["_id", "files"]) + if not representation: + return False + + on_site = False + for file_info in representation.get("files", []): + for site in file_info.get("sites", []): + if site["name"] != site_name: + continue + + if (site.get("progress") or site.get("error") or + not site.get("created_dt")): + return False + on_site = True + + return on_site + + def _reset_timer_with_rest_api(self): + # POST to webserver sites to add to representations + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + self.log.warning("Couldn't find webserver url") + return + + rest_api_url = "{}/sync_server/reset_timer".format( + webserver_url + ) + + try: + import requests + except Exception: + self.log.warning( + "Couldn't add sites to representations " + "('requests' is not available)" + ) + return + + requests.post(rest_api_url) def get_enabled_projects(self): """Returns list of projects which have SyncServer enabled.""" enabled_projects = [] if self.enabled: - for project in self.connection.projects(projection={"name": 1}): + for project in get_projects(fields=["name"]): project_name = project["name"] - project_settings = self.get_sync_project_setting(project_name) - if project_settings and project_settings.get("enabled"): + if self.is_project_enabled(project_name): enabled_projects.append(project_name) return enabled_projects - def handle_alternate_site(self, collection, representation, processed_site, - file_id, synced_file_id): + def is_project_enabled(self, project_name, single=False): + """Checks if 'project_name' is enabled for syncing. + 'get_sync_project_setting' is potentially expensive operation (pulls + settings for all projects if cached version is not available), using + project_settings for specific project should be faster. + Args: + project_name (str) + single (bool): use 'get_project_settings' method + """ + if self.enabled: + if single: + project_settings = get_project_settings(project_name) + project_settings = \ + self._parse_sync_settings_from_settings(project_settings) + else: + project_settings = self.get_sync_project_setting(project_name) + if project_settings and project_settings.get("enabled"): + return True + return False + + def handle_alternate_site(self, project_name, representation, + processed_site, file_id, synced_file_id): """ For special use cases where one site vendors another. @@ -785,7 +1028,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): same location >> file is accesible on 'sftp' site right away. Args: - collection (str): name of project + project_name (str): name of project representation (dict) processed_site (str): real site_name of published/uploaded file file_id (ObjectId): DB id of file handled @@ -809,26 +1052,112 @@ class SyncServerModule(OpenPypeModule, ITrayModule): alternate_sites = set(alternate_sites) for alt_site in alternate_sites: - query = { - "_id": representation["_id"] - } elem = {"name": alt_site, "created_dt": datetime.now(), "id": synced_file_id} self.log.debug("Adding alternate {} to {}".format( alt_site, representation["_id"])) - self._add_site(collection, query, - [representation], elem, + self._add_site(project_name, + representation, elem, alt_site, file_id=file_id, force=True) + def get_repre_info_for_versions(self, project_name, version_ids, + active_site, remote_site): + """Returns representation documents for versions and sites combi + + Args: + project_name (str) + version_ids (list): of version[_id] + active_site (string): 'local', 'studio' etc + remote_site (string): dtto + Returns: + + """ + self.connection.Session["AVALON_PROJECT"] = project_name + query = [ + {"$match": {"parent": {"$in": version_ids}, + "type": "representation", + "files.sites.name": {"$exists": 1}}}, + {"$unwind": "$files"}, + {'$addFields': { + 'order_local': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', active_site]} + } + } + }}, + {'$addFields': { + 'order_remote': { + '$filter': { + 'input': '$files.sites', 'as': 'p', + 'cond': {'$eq': ['$$p.name', remote_site]} + } + } + }}, + {'$addFields': { + 'progress_local': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_local.progress"}, + "$order_local.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_local.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$addFields': { + 'progress_remote': {"$arrayElemAt": [{ + '$cond': [ + {'$size': "$order_remote.progress"}, + "$order_remote.progress", + # if exists created_dt count is as available + {'$cond': [ + {'$size': "$order_remote.created_dt"}, + [1], + [0] + ]} + ]}, + 0 + ]} + }}, + {'$group': { # first group by repre + '_id': '$_id', + 'parent': {'$first': '$parent'}, + 'avail_ratio_local': { + '$first': { + '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] + } + }, + 'avail_ratio_remote': { + '$first': { + '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] + } + } + }}, + {'$group': { # second group by parent, eg version_id + '_id': '$parent', + 'repre_count': {'$sum': 1}, # total representations + # fully available representation for site + 'avail_repre_local': {'$sum': "$avail_ratio_local"}, + 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, + }}, + ] + # docs = list(self.connection.aggregate(query)) + return self.connection.aggregate(query) + """ End of Public API """ - def get_local_file_path(self, collection, site_name, file_path): + def get_local_file_path(self, project_name, site_name, file_path): """ Externalized for app """ - handler = LocalDriveHandler(collection, site_name) + handler = LocalDriveHandler(project_name, site_name) local_file_path = handler.resolve_path(file_path) return local_file_path @@ -848,6 +1177,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if self.enabled and sync_settings.get('enabled'): sites.append(self.LOCAL_SITE) + active_site = sync_settings["config"]["active_site"] + # for Tray running background process + if active_site not in sites and active_site == get_local_site_id(): + sites.append(active_site) + return sites def tray_init(self): @@ -859,21 +1193,13 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ self.server_init() - from .tray.app import SyncServerWindow - self.widget = SyncServerWindow(self) - def server_init(self): """Actual initialization of Sync Server.""" # import only in tray or Python3, because of Python2 hosts - from .sync_server import SyncServerThread - if not self.enabled: return - enabled_projects = self.get_enabled_projects() - if not enabled_projects: - self.enabled = False - return + from .sync_server import SyncServerThread self.lock = threading.Lock() @@ -893,10 +1219,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.server_start() def server_start(self): - if self.sync_project_settings and self.enabled: + if self.enabled: self.sync_server_thread.start() else: - log.info("No presets or active providers. " + + self.log.info("No presets or active providers. " + "Synchronization not possible.") def tray_exit(self): @@ -914,12 +1240,12 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if not self.is_running: return try: - log.info("Stopping sync server server") + self.log.info("Stopping sync server server") self.sync_server_thread.is_running = False self.sync_server_thread.stop() - log.info("Sync server stopped") + self.log.info("Sync server stopped") except Exception: - log.warning( + self.log.warning( "Error has happened during Killing sync server", exc_info=True ) @@ -928,7 +1254,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if not self.enabled: return - from Qt import QtWidgets + from qtpy import QtWidgets """Add menu or action to Tray(or parent)'s menu""" action = QtWidgets.QAction(self.label, parent_menu) action.triggered.connect(self.show_widget) @@ -990,10 +1316,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): def _prepare_sync_project_settings(self, exclude_locals): sync_project_settings = {} system_sites = self.get_all_site_configs() - project_docs = self.connection.projects( - projection={"name": 1}, - only_active=True - ) + project_docs = get_projects(fields=["name"]) for project_doc in project_docs: project_name = project_doc["name"] sites = copy.deepcopy(system_sites) # get all configured sites @@ -1007,7 +1330,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): sync_project_settings[project_name] = proj_settings if not sync_project_settings: - log.info("No enabled and configured projects for sync.") + self.log.info("No enabled and configured projects for sync.") return sync_project_settings def get_sync_project_setting(self, project_name, exclude_locals=False, @@ -1055,13 +1378,19 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ sync_sett = self.sync_system_settings project_enabled = True + project_settings = None if project_name: project_enabled = project_name in self.get_enabled_projects() + project_settings = self.get_sync_project_setting(project_name) sync_enabled = sync_sett["enabled"] and project_enabled system_sites = {} if sync_enabled: for site, detail in sync_sett.get("sites", {}).items(): + if project_settings: + site_settings = project_settings["sites"].get(site) + if site_settings: + detail.update(site_settings) system_sites[site] = detail system_sites.update(self._get_default_site_configs(sync_enabled, @@ -1083,14 +1412,22 @@ class SyncServerModule(OpenPypeModule, ITrayModule): exclude_locals=True) roots = {} for root, config in anatomy_sett["roots"].items(): - roots[root] = config[platform.system().lower()] + roots[root] = config studio_config = { + 'enabled': True, 'provider': 'local_drive', "root": roots } all_sites = {self.DEFAULT_SITE: studio_config} if sync_enabled: - all_sites[get_local_site_id()] = {'provider': 'local_drive'} + all_sites[get_local_site_id()] = {'enabled': True, + 'provider': 'local_drive', + "root": roots} + # duplicate values for normalized local name + all_sites["local"] = { + 'enabled': True, + 'provider': 'local_drive', + "root": roots} return all_sites def get_provider_for_site(self, project_name=None, site=None): @@ -1118,7 +1455,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return sites.get(site, 'N/A') @time_function - def get_sync_representations(self, collection, active_site, remote_site): + def get_sync_representations(self, project_name, active_site, remote_site): """ Get representations that should be synced, these could be recognised by presence of document in 'files.sites', where key is @@ -1129,8 +1466,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): better performance. Goal is to get as few representations as possible. Args: - collection (string): name of collection (in most cases matches - project name + project_name (string): active_site (string): identifier of current active site (could be 'local_0' when working from home, 'studio' when working in the studio (default) @@ -1139,10 +1475,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: (list) of dictionaries """ - log.debug("Check representations for : {}".format(collection)) - self.connection.Session["AVALON_PROJECT"] = collection + self.log.debug("Check representations for : {}".format(project_name)) + self.connection.Session["AVALON_PROJECT"] = project_name # retry_cnt - number of attempts to sync specific file before giving up - retries_arr = self._get_retries_arr(collection) + retries_arr = self._get_retries_arr(project_name) match = { "type": "representation", "$or": [ @@ -1158,7 +1494,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "$elemMatch": { "name": {"$in": [remote_site]}, "created_dt": {"$exists": False}, - "tries": {"$in": retries_arr} + "tries": {"$in": retries_arr}, + "paused": {"$exists": False} } } }]}, @@ -1168,7 +1505,8 @@ class SyncServerModule(OpenPypeModule, ITrayModule): "$elemMatch": { "name": active_site, "created_dt": {"$exists": False}, - "tries": {"$in": retries_arr} + "tries": {"$in": retries_arr}, + "paused": {"$exists": False} } }}, { "files.sites": { @@ -1218,9 +1556,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): }}, {"$sort": {'priority': -1, '_id': 1}}, ] - log.debug("active_site:{} - remote_site:{}".format(active_site, - remote_site)) - log.debug("query: {}".format(aggr)) + self.log.debug("active_site:{} - remote_site:{}".format( + active_site, remote_site + )) + self.log.debug("query: {}".format(aggr)) representations = self.connection.aggregate(aggr) return representations @@ -1255,7 +1594,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if get_local_site_id() not in (local_site, remote_site): # don't do upload/download for studio sites - log.debug("No local site {} - {}".format(local_site, remote_site)) + self.log.debug( + "No local site {} - {}".format(local_site, remote_site) + ) return SyncStatus.DO_NOTHING _, remote_rec = self._get_site_rec(sites, remote_site) or {} @@ -1279,21 +1620,21 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return SyncStatus.DO_NOTHING - def update_db(self, collection, new_file_id, file, representation, + def update_db(self, project_name, new_file_id, file, representation, site, error=None, progress=None, priority=None): """ Update 'provider' portion of records in DB with success (file_id) or error (exception) Args: - collection (string): name of project - force to db connection as + project_name (string): name of project - force to db connection as each file might come from different collection - new_file_id (string): + new_file_id (string): only present if file synced successfully file (dictionary): info about processed file (pulled from DB) representation (dictionary): parent repr of file (from DB) site (string): label ('gdrive', 'S3') error (string): exception message - progress (float): 0-1 of progress of upload/download + progress (float): 0-0.99 of progress of upload/download priority (int): 0-100 set priority Returns: @@ -1329,7 +1670,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): if file_id: arr_filter.append({'f._id': ObjectId(file_id)}) - self.connection.database[collection].update_one( + self.connection.database[project_name].update_one( query, update, upsert=True, @@ -1346,11 +1687,16 @@ class SyncServerModule(OpenPypeModule, ITrayModule): error_str = '' source_file = file.get("path", "") - log.debug("File for {} - {source_file} process {status} {error_str}". - format(representation_id, - status=status, - source_file=source_file, - error_str=error_str)) + self.log.debug( + ( + "File for {} - {source_file} process {status} {error_str}" + ).format( + representation_id, + status=status, + source_file=source_file, + error_str=error_str + ) + ) def _get_file_info(self, files, _id): """ @@ -1392,9 +1738,10 @@ class SyncServerModule(OpenPypeModule, ITrayModule): return -1, None - def reset_site_on_representation(self, collection, representation_id, + def reset_site_on_representation(self, project_name, representation_id, side=None, file_id=None, site_name=None, - remove=False, pause=None, force=False): + remove=False, pause=None, force=False, + priority=None): """ Reset information about synchronization for particular 'file_id' and provider. @@ -1409,7 +1756,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Should be used when repre should be synced to new site. Args: - collection (string): name of project (eg. collection) in DB + project_name (string): name of project (eg. collection) in DB representation_id(string): _id of representation file_id (string): file _id in representation side (string): local or remote side @@ -1417,24 +1764,25 @@ class SyncServerModule(OpenPypeModule, ITrayModule): remove (bool): if True remove site altogether pause (bool or None): if True - pause, False - unpause force (bool): hard reset - currently only for add_site + priority (int): set priority - Returns: - throws ValueError + Raises: + SiteAlreadyPresentError - if adding already existing site and + not 'force' + ValueError - other errors (repre not found, misconfiguration) """ - query = { - "_id": ObjectId(representation_id) - } - - representation = list(self.connection.database[collection].find(query)) + representation = get_representation_by_id(project_name, + representation_id) if not representation: raise ValueError("Representation {} not found in {}". - format(representation_id, collection)) + format(representation_id, project_name)) + if side and site_name: raise ValueError("Misconfiguration, only one of side and " + "site_name arguments should be passed.") - local_site = self.get_active_site(collection) - remote_site = self.get_remote_site(collection) + local_site = self.get_active_site(project_name) + remote_site = self.get_remote_site(project_name) if side: if side == 'local': @@ -1444,38 +1792,48 @@ class SyncServerModule(OpenPypeModule, ITrayModule): elem = {"name": site_name} + # Add priority + if priority: + elem["priority"] = priority + if file_id: # reset site for particular file - self._reset_site_for_file(collection, query, + self._reset_site_for_file(project_name, representation_id, elem, file_id, site_name) elif side: # reset site for whole representation - self._reset_site(collection, query, elem, site_name) + self._reset_site(project_name, representation_id, elem, site_name) elif remove: # remove site for whole representation - self._remove_site(collection, query, representation, site_name) + self._remove_site(project_name, + representation, site_name) elif pause is not None: - self._pause_unpause_site(collection, query, + self._pause_unpause_site(project_name, representation, site_name, pause) else: # add new site to all files for representation - self._add_site(collection, query, representation, elem, site_name, - force) + self._add_site(project_name, representation, elem, site_name, + force=force) - def _update_site(self, collection, query, update, arr_filter): + def _update_site(self, project_name, representation_id, + update, arr_filter): """ Auxiliary method to call update_one function on DB Used for refactoring ugly reset_provider_for_file """ - self.connection.database[collection].update_one( + query = { + "_id": ObjectId(representation_id) + } + + self.connection.database[project_name].update_one( query, update, upsert=True, array_filters=arr_filter ) - def _reset_site_for_file(self, collection, query, + def _reset_site_for_file(self, project_name, representation_id, elem, file_id, site_name): """ Resets 'site_name' for 'file_id' on representation in 'query' on - 'collection' + 'project_name' """ update = { "$set": {"files.$[f].sites.$[s]": elem} @@ -1488,9 +1846,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _reset_site(self, collection, query, elem, site_name): + def _reset_site(self, project_name, representation_id, elem, site_name): """ Resets 'site_name' for all files of representation in 'query' """ @@ -1502,23 +1860,23 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, update, arr_filter) - def _remove_site(self, collection, query, representation, site_name): + def _remove_site(self, project_name, representation, site_name): """ Removes 'site_name' for 'representation' in 'query' Throws ValueError if 'site_name' not found on 'representation' """ found = False - for repre_file in representation.pop().get("files"): + for repre_file in representation.get("files"): for site in repre_file.get("sites"): if site.get("name") == site_name: found = True break if not found: msg = "Site {} not found".format(site_name) - log.info(msg) + self.log.info(msg) raise ValueError(msg) update = { @@ -1526,10 +1884,11 @@ class SyncServerModule(OpenPypeModule, ITrayModule): } arr_filter = [] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _pause_unpause_site(self, collection, query, - representation, site_name, pause): + def _pause_unpause_site(self, project_name, representation, + site_name, pause): """ Pauses/unpauses all files for 'representation' based on 'pause' @@ -1537,14 +1896,14 @@ class SyncServerModule(OpenPypeModule, ITrayModule): """ found = False site = None - for repre_file in representation.pop().get("files"): + for repre_file in representation.get("files"): for site in repre_file.get("sites"): if site["name"] == site_name: found = True break if not found: msg = "Site {} not found".format(site_name) - log.info(msg) + self.log.info(msg) raise ValueError(msg) if pause: @@ -1561,37 +1920,45 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'s.name': site_name} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation["_id"], + update, arr_filter) - def _add_site(self, collection, query, representation, elem, site_name, + def _add_site(self, project_name, representation, elem, site_name, force=False, file_id=None): """ - Adds 'site_name' to 'representation' on 'collection' + Adds 'site_name' to 'representation' on 'project_name' Args: - representation (list of 1 dict) + representation (dict) file_id (ObjectId) Use 'force' to remove existing or raises ValueError """ - reseted_existing = False - for repre_file in representation.pop().get("files"): + representation_id = representation["_id"] + reset_existing = False + files = representation.get("files", []) + if not files: + self.log.debug("No files for {}".format(representation_id)) + return + + for repre_file in files: if file_id and file_id != repre_file["_id"]: continue for site in repre_file.get("sites"): if site["name"] == site_name: - if force: - self._reset_site_for_file(collection, query, + if force or site.get("error"): + self._reset_site_for_file(project_name, + representation_id, elem, repre_file["_id"], site_name) - reseted_existing = True + reset_existing = True else: msg = "Site {} already present".format(site_name) - log.info(msg) - raise ValueError(msg) + self.log.info(msg) + raise SiteAlreadyPresentError(msg) - if reseted_existing: + if reset_existing: return if not file_id: @@ -1608,14 +1975,15 @@ class SyncServerModule(OpenPypeModule, ITrayModule): {'f._id': file_id} ] - self._update_site(collection, query, update, arr_filter) + self._update_site(project_name, representation_id, + update, arr_filter) - def _remove_local_file(self, collection, representation_id, site_name): + def _remove_local_file(self, project_name, representation_id, site_name): """ Removes all local files for 'site_name' of 'representation_id' Args: - collection (string): project name (must match DB) + project_name (string): project name (must match DB) representation_id (string): MongoDB _id value site_name (string): name of configured and active site @@ -1631,21 +1999,17 @@ class SyncServerModule(OpenPypeModule, ITrayModule): provider_name = self.get_provider_for_site(site=site_name) if provider_name == 'local_drive': - query = { - "_id": ObjectId(representation_id) - } - - representation = list( - self.connection.database[collection].find(query)) + representation = get_representation_by_id(project_name, + representation_id, + fields=["files"]) if not representation: self.log.debug("No repre {} found".format( representation_id)) return - representation = representation.pop() local_file_path = '' for file in representation.get("files"): - local_file_path = self.get_local_file_path(collection, + local_file_path = self.get_local_file_path(project_name, site_name, file.get("path", "") ) @@ -1677,6 +2041,9 @@ class SyncServerModule(OpenPypeModule, ITrayModule): Returns: (int): in seconds """ + if not project_name: + return 60 + ld = self.sync_project_settings[project_name]["config"]["loop_delay"] return int(ld) @@ -1688,16 +2055,19 @@ class SyncServerModule(OpenPypeModule, ITrayModule): self.widget = SyncServerWindow(self) no_errors = True except ValueError: - log.info("No system setting for sync. Not syncing.", exc_info=True) + self.log.info( + "No system setting for sync. Not syncing.", exc_info=True + ) except KeyError: - log.info(( + self.log.info(( "There are not set presets for SyncServer OR " "Credentials provided are invalid, " "no syncing possible"). format(str(self.sync_project_settings)), exc_info=True) except: - log.error("Uncaught exception durin start of SyncServer", - exc_info=True) + self.log.error( + "Uncaught exception durin start of SyncServer", + exc_info=True) self.enabled = no_errors self.widget.show() @@ -1755,7 +2125,7 @@ class SyncServerModule(OpenPypeModule, ITrayModule): (int) - number of failed attempts """ _, rec = self._get_site_rec(file.get("sites", []), provider) - return rec.get("tries", 0) + return self._get_tries_count_from_rec(rec) def _get_progress_dict(self, progress): """ @@ -1806,3 +2176,55 @@ class SyncServerModule(OpenPypeModule, ITrayModule): settings ('presets') """ return presets[project_name]['sites'][site_name]['root'] + + def cli(self, click_group): + click_group.add_command(cli_main) + + # Webserver module implementation + def webserver_initialization(self, server_manager): + """Add routes for syncs.""" + if self.tray_initialized: + from .rest_api import SyncServerModuleRestApi + self.rest_api_obj = SyncServerModuleRestApi( + self, server_manager + ) + + +@click.group(SyncServerModule.name, help="SyncServer module related commands.") +def cli_main(): + pass + + +@cli_main.command() +@click.option( + "-a", + "--active_site", + required=True, + help="Name of active stie") +def syncservice(active_site): + """Launch sync server under entered site. + + This should be ideally used by system service (such us systemd or upstart + on linux and window service). + """ + + from openpype.modules import ModulesManager + + os.environ["OPENPYPE_LOCAL_ID"] = active_site + + def signal_handler(sig, frame): + print("You pressed Ctrl+C. Process ended.") + sync_server_module.server_exit() + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + manager = ModulesManager() + sync_server_module = manager.modules_by_name["sync_server"] + + sync_server_module.server_init() + sync_server_module.server_start() + + while True: + time.sleep(1.0) diff --git a/openpype/modules/sync_server/tray/app.py b/openpype/modules/sync_server/tray/app.py index fc8558bdbc..c093835128 100644 --- a/openpype/modules/sync_server/tray/app.py +++ b/openpype/modules/sync_server/tray/app.py @@ -1,8 +1,7 @@ -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui from openpype.tools.settings import style -from openpype.lib import PypeLogger from openpype import resources from .widgets import ( @@ -10,8 +9,6 @@ from .widgets import ( SyncRepresentationSummaryWidget ) -log = PypeLogger().get_logger("SyncServer") - class SyncServerWindow(QtWidgets.QDialog): """ @@ -46,6 +43,14 @@ class SyncServerWindow(QtWidgets.QDialog): left_column_layout.addWidget(self.pause_btn) + checkbox = QtWidgets.QCheckBox("Show only enabled", self) + checkbox.setStyleSheet("QCheckBox{spacing: 5px;" + "padding:5px 5px 5px 5px;}") + checkbox.setChecked(True) + self.show_only_enabled_chk = checkbox + + left_column_layout.addWidget(self.show_only_enabled_chk) + repres = SyncRepresentationSummaryWidget( sync_server, project=self.projects.current_project, @@ -86,15 +91,27 @@ class SyncServerWindow(QtWidgets.QDialog): repres.message_generated.connect(self._update_message) self.projects.message_generated.connect(self._update_message) + self.show_only_enabled_chk.stateChanged.connect( + self._on_enabled_change + ) + self.representationWidget = repres + def showEvent(self, event): + self.representationWidget.set_project(self.projects.current_project) + self.projects.refresh() + self._set_running(True) + super().showEvent(event) + + def closeEvent(self, event): + self._set_running(False) + super().closeEvent(event) + def _on_project_change(self): if self.projects.current_project is None: return - self.representationWidget.table_view.model().set_project( - self.projects.current_project - ) + self.representationWidget.set_project(self.projects.current_project) project_name = self.projects.current_project if not self.sync_server.get_sync_project_setting(project_name): @@ -103,16 +120,12 @@ class SyncServerWindow(QtWidgets.QDialog): self.projects.refresh() return - def showEvent(self, event): - self.representationWidget.model.set_project( - self.projects.current_project) + def _on_enabled_change(self): + """Called when enabled projects only checkbox is toggled.""" + self.projects.show_only_enabled = \ + self.show_only_enabled_chk.isChecked() self.projects.refresh() - self._set_running(True) - super().showEvent(event) - - def closeEvent(self, event): - self._set_running(False) - super().closeEvent(event) + self.representationWidget.set_project(None) def _set_running(self, running): self.representationWidget.model.is_running = running diff --git a/openpype/modules/sync_server/tray/delegates.py b/openpype/modules/sync_server/tray/delegates.py index 5ab809a816..e14b2e2f60 100644 --- a/openpype/modules/sync_server/tray/delegates.py +++ b/openpype/modules/sync_server/tray/delegates.py @@ -1,8 +1,7 @@ import os -from Qt import QtCore, QtWidgets, QtGui +from qtpy import QtCore, QtWidgets, QtGui -from openpype.lib import PypeLogger -from . import lib +from openpype.lib import Logger from openpype.tools.utils.constants import ( LOCAL_PROVIDER_ROLE, @@ -16,7 +15,7 @@ from openpype.tools.utils.constants import ( EDIT_ICON_ROLE ) -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class PriorityDelegate(QtWidgets.QStyledItemDelegate): diff --git a/openpype/modules/sync_server/tray/lib.py b/openpype/modules/sync_server/tray/lib.py index 87344be634..ff93815639 100644 --- a/openpype/modules/sync_server/tray/lib.py +++ b/openpype/modules/sync_server/tray/lib.py @@ -2,11 +2,6 @@ import attr import abc import six -from openpype.lib import PypeLogger - - -log = PypeLogger().get_logger("SyncServer") - STATUS = { 0: 'In Progress', 1: 'Queued', diff --git a/openpype/modules/sync_server/tray/models.py b/openpype/modules/sync_server/tray/models.py index 7241cc3472..b52f350907 100644 --- a/openpype/modules/sync_server/tray/models.py +++ b/openpype/modules/sync_server/tray/models.py @@ -1,15 +1,15 @@ import os import attr from bson.objectid import ObjectId +import datetime -from Qt import QtCore -from Qt.QtCore import Qt +from qtpy import QtCore import qtawesome from openpype.tools.utils.delegates import pretty_timestamp -from openpype.lib import PypeLogger -from openpype.api import get_local_site_id +from openpype.lib import Logger, get_local_site_id +from openpype.client import get_representation_by_id from . import lib @@ -31,7 +31,7 @@ from openpype.tools.utils.constants import ( ) -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class _SyncRepresentationModel(QtCore.QAbstractTableModel): @@ -52,7 +52,8 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): All queries should go through this (because of collection). """ - return self.sync_server.connection.database[self.project] + if self.project: + return self.sync_server.connection.database[self.project] @property def project(self): @@ -77,16 +78,16 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): def columnCount(self, _index=None): return len(self._header) - def headerData(self, section, orientation, role=Qt.DisplayRole): + def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole): if section >= len(self.COLUMN_LABELS): return - if role == Qt.DisplayRole: - if orientation == Qt.Horizontal: + if role == QtCore.Qt.DisplayRole: + if orientation == QtCore.Qt.Horizontal: return self.COLUMN_LABELS[section][1] if role == HEADER_NAME_ROLE: - if orientation == Qt.Horizontal: + if orientation == QtCore.Qt.Horizontal: return self.COLUMN_LABELS[section][0] # return name def data(self, index, role): @@ -121,7 +122,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): return item.status == lib.STATUS[2] and \ item.remote_progress < 1 - if role in (Qt.DisplayRole, Qt.EditRole): + if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): # because of ImageDelegate if header_value in ['remote_site', 'local_site']: return "" @@ -144,12 +145,15 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): if role == STATUS_ROLE: return item.status - if role == Qt.UserRole: + if role == QtCore.Qt.UserRole: return item._id @property def can_edit(self): """Returns true if some site is user local site, eg. could edit""" + if not self.project: + return False + return get_local_site_id() in (self.active_site, self.remote_site) def get_column(self, index): @@ -190,7 +194,7 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): actually queried (scrolled a couple of times to list more than single page of records) """ - if self.is_editing or not self.is_running: + if self.is_editing or not self.is_running or not self.project: return self.refresh_started.emit() self.beginResetModel() @@ -232,6 +236,9 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): more records in DB than loaded. """ log.debug("fetchMore") + if not self.dbcon: + return + items_to_fetch = min(self._total_records - self._rec_loaded, self.PAGE_SIZE) self.query = self.get_query(self._rec_loaded) @@ -286,9 +293,10 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): # replace('False', 'false').\ # replace('True', 'true').replace('None', 'null')) - representations = self.dbcon.aggregate(pipeline=self.query, - allowDiskUse=True) - self.refresh(representations) + if self.dbcon: + representations = self.dbcon.aggregate(pipeline=self.query, + allowDiskUse=True) + self.refresh(representations) def set_word_filter(self, word_filter): """ @@ -378,9 +386,9 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): project (str): name of project """ self._project = project - self.sync_server.set_sync_project_settings() # project might have been deactivated in the meantime if not self.sync_server.get_sync_project_setting(project): + self._data = {} return self.active_site = self.sync_server.get_active_site(self.project) @@ -400,11 +408,28 @@ class _SyncRepresentationModel(QtCore.QAbstractTableModel): """ for i in range(self.rowCount(None)): index = self.index(i, 0) - value = self.data(index, Qt.UserRole) + value = self.data(index, QtCore.Qt.UserRole) if value == id: return index return None + def _convert_date(self, date_value, current_date): + """Converts 'date_value' to string. + + Value of date_value might contain date in the future, used for nicely + sort queued items next to last downloaded. + """ + try: + converted_date = None + # ignore date in the future - for sorting only + if date_value and date_value < current_date: + converted_date = date_value.strftime("%Y%m%dT%H%M%SZ") + except (AttributeError, TypeError): + # ignore unparseable values + pass + + return converted_date + class SyncRepresentationSummaryModel(_SyncRepresentationModel): """ @@ -414,7 +439,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): full text filtering. Allows pagination, most of heavy lifting is being done on DB side. - Single model matches to single collection. When project is changed, + Single model matches to single project. When project is changed, model is reset and refreshed. Args: @@ -509,25 +534,23 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): self._word_filter = None - if not self._project or self._project == lib.DUMMY_PROJECT: - return - self.sync_server = sync_server # TODO think about admin mode + self.sort_criteria = self.DEFAULT_SORT + + self.timer = QtCore.QTimer() + if not self._project or self._project == lib.DUMMY_PROJECT: + self.active_site = sync_server.DEFAULT_SITE + self.remote_site = sync_server.DEFAULT_SITE + return + # this is for regular user, always only single local and single remote self.active_site = self.sync_server.get_active_site(self.project) self.remote_site = self.sync_server.get_remote_site(self.project) - self.sort_criteria = self.DEFAULT_SORT - self.query = self.get_query() self.default_query = list(self.get_query()) - representations = self.dbcon.aggregate(pipeline=self.query, - allowDiskUse=True) - self.refresh(representations) - - self.timer = QtCore.QTimer() self.timer.timeout.connect(self.tick) self.timer.start(self.REFRESH_SEC) @@ -554,7 +577,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): remote_provider = lib.translate_provider_for_icon(self.sync_server, self.project, remote_site) - + current_date = datetime.datetime.now() for repre in result.get("paginatedResults"): files = repre.get("files", []) if isinstance(files, dict): # aggregate returns dictionary @@ -564,14 +587,10 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if not files: continue - local_updated = remote_updated = None - if repre.get('updated_dt_local'): - local_updated = \ - repre.get('updated_dt_local').strftime("%Y%m%dT%H%M%SZ") - - if repre.get('updated_dt_remote'): - remote_updated = \ - repre.get('updated_dt_remote').strftime("%Y%m%dT%H%M%SZ") + local_updated = self._convert_date(repre.get('updated_dt_local'), + current_date) + remote_updated = self._convert_date(repre.get('updated_dt_remote'), + current_date) avg_progress_remote = lib.convert_progress( repre.get('avg_progress_remote', '0')) @@ -639,6 +658,8 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE + # replace null with value in the future for better sorting + dummy_max_date = datetime.datetime(2099, 1, 1) aggr = [ {"$match": self.get_match_part()}, {'$unwind': '$files'}, @@ -681,7 +702,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): {'$cond': [ {'$size': "$order_remote.last_failed_dt"}, "$order_remote.last_failed_dt", - [] + [dummy_max_date] ]} ]}}, 'updated_dt_local': {'$first': { @@ -690,7 +711,7 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): {'$cond': [ {'$size': "$order_local.last_failed_dt"}, "$order_local.last_failed_dt", - [] + [dummy_max_date] ]} ]}}, 'files_size': {'$ifNull': ["$files.size", 0]}, @@ -895,13 +916,12 @@ class SyncRepresentationSummaryModel(_SyncRepresentationModel): if not self.can_edit: return - repre_id = self.data(index, Qt.UserRole) + repre_id = self.data(index, QtCore.Qt.UserRole) - representation = list(self.dbcon.find({"type": "representation", - "_id": repre_id})) + representation = get_representation_by_id(self.project, repre_id) if representation: self.sync_server.update_db(self.project, None, None, - representation.pop(), + representation, get_local_site_id(), priority=value) self.is_editing = False @@ -1003,9 +1023,6 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self.sort_criteria = self.DEFAULT_SORT self.query = self.get_query() - representations = self.dbcon.aggregate(pipeline=self.query, - allowDiskUse=True) - self.refresh(representations) self.timer = QtCore.QTimer() self.timer.timeout.connect(self.tick) @@ -1036,6 +1053,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): self.project, remote_site) + current_date = datetime.datetime.now() for repre in result.get("paginatedResults"): # log.info("!!! repre:: {}".format(repre)) files = repre.get("files", []) @@ -1043,16 +1061,12 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): files = [files] for file in files: - local_updated = remote_updated = None - if repre.get('updated_dt_local'): - local_updated = \ - repre.get('updated_dt_local').strftime( - "%Y%m%dT%H%M%SZ") - - if repre.get('updated_dt_remote'): - remote_updated = \ - repre.get('updated_dt_remote').strftime( - "%Y%m%dT%H%M%SZ") + local_updated = self._convert_date( + repre.get('updated_dt_local'), + current_date) + remote_updated = self._convert_date( + repre.get('updated_dt_remote'), + current_date) remote_progress = lib.convert_progress( repre.get('progress_remote', '0')) @@ -1101,6 +1115,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): if limit == 0: limit = SyncRepresentationSummaryModel.PAGE_SIZE + dummy_max_date = datetime.datetime(2099, 1, 1) aggr = [ {"$match": self.get_match_part()}, {"$unwind": "$files"}, @@ -1144,7 +1159,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): '$cond': [ {'$size': "$order_remote.last_failed_dt"}, "$order_remote.last_failed_dt", - [] + [dummy_max_date] ] } ] @@ -1157,7 +1172,7 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): '$cond': [ {'$size': "$order_local.last_failed_dt"}, "$order_local.last_failed_dt", - [] + [dummy_max_date] ] } ] @@ -1337,14 +1352,13 @@ class SyncRepresentationDetailModel(_SyncRepresentationModel): if not self.can_edit: return - file_id = self.data(index, Qt.UserRole) + file_id = self.data(index, QtCore.Qt.UserRole) updated_file = None - # conversion from cursor to list - representations = list(self.dbcon.find({"type": "representation", - "_id": self._id})) + representation = get_representation_by_id(self.project, self._id) + if not representation: + return - representation = representations.pop() for repre_file in representation["files"]: if repre_file["_id"] == file_id: updated_file = repre_file diff --git a/openpype/modules/sync_server/tray/widgets.py b/openpype/modules/sync_server/tray/widgets.py index 6aae9562cf..75a6d20d3c 100644 --- a/openpype/modules/sync_server/tray/widgets.py +++ b/openpype/modules/sync_server/tray/widgets.py @@ -3,14 +3,12 @@ import subprocess import sys from functools import partial -from Qt import QtWidgets, QtCore, QtGui -from Qt.QtCore import Qt +from qtpy import QtWidgets, QtCore, QtGui import qtawesome from openpype.tools.settings import style -from openpype.api import get_local_site_id -from openpype.lib import PypeLogger +from openpype.lib import Logger, get_local_site_id from openpype.tools.utils.delegates import pretty_timestamp @@ -36,7 +34,7 @@ from openpype.tools.utils.constants import ( TRIES_ROLE ) -log = PypeLogger().get_logger("SyncServer") +log = Logger.get_logger("SyncServer") class SyncProjectListWidget(QtWidgets.QWidget): @@ -47,6 +45,7 @@ class SyncProjectListWidget(QtWidgets.QWidget): message_generated = QtCore.Signal(str) refresh_msec = 10000 + show_only_enabled = True def __init__(self, sync_server, parent): super(SyncProjectListWidget, self).__init__(parent) @@ -122,11 +121,15 @@ class SyncProjectListWidget(QtWidgets.QWidget): self._model_reset = False selected_item = None - for project_name in self.sync_server.sync_project_settings.\ - keys(): + sync_settings = self.sync_server.sync_project_settings + for project_name in sync_settings.keys(): if self.sync_server.is_paused() or \ self.sync_server.is_project_paused(project_name): icon = self._get_icon("paused") + elif not sync_settings[project_name]["enabled"]: + if self.show_only_enabled: + continue + icon = self._get_icon("disabled") else: icon = self._get_icon("synced") @@ -139,12 +142,12 @@ class SyncProjectListWidget(QtWidgets.QWidget): if self.current_project == project_name: selected_item = item + if model.item(0) is None: + return + if selected_item: selected_index = model.indexFromItem(selected_item) - if len(self.sync_server.sync_project_settings.keys()) == 0: - model.appendRow(QtGui.QStandardItem(lib.DUMMY_PROJECT)) - if not self.current_project: self.current_project = model.item(0).data(QtCore.Qt.DisplayRole) @@ -153,8 +156,10 @@ class SyncProjectListWidget(QtWidgets.QWidget): if selected_index and \ selected_index.isValid() and \ not self._selection_changed: - mode = QtCore.QItemSelectionModel.Select | \ - QtCore.QItemSelectionModel.Rows + mode = ( + QtCore.QItemSelectionModel.Select + | QtCore.QItemSelectionModel.Rows + ) self.project_list.selectionModel().select(selected_index, mode) if self.current_project: @@ -248,12 +253,15 @@ class _SyncRepresentationWidget(QtWidgets.QWidget): active_changed = QtCore.Signal() # active index changed message_generated = QtCore.Signal(str) + def set_project(self, project): + self.model.set_project(project) + def _selection_changed(self, _new_selected, _all_selected): idxs = self.selection_model.selectedRows() self._selected_ids = set() for index in idxs: - self._selected_ids.add(self.model.data(index, Qt.UserRole)) + self._selected_ids.add(self.model.data(index, QtCore.Qt.UserRole)) def _set_selection(self): """ @@ -265,8 +273,10 @@ class _SyncRepresentationWidget(QtWidgets.QWidget): for selected_id in self._selected_ids: index = self.model.get_index(selected_id) if index and index.isValid(): - mode = QtCore.QItemSelectionModel.Select | \ - QtCore.QItemSelectionModel.Rows + mode = ( + QtCore.QItemSelectionModel.Select + | QtCore.QItemSelectionModel.Rows + ) self.selection_model.select(index, mode) existing_ids.add(selected_id) @@ -284,7 +294,7 @@ class _SyncRepresentationWidget(QtWidgets.QWidget): self.table_view.openPersistentEditor(index) return - _id = self.model.data(index, Qt.UserRole) + _id = self.model.data(index, QtCore.Qt.UserRole) detail_window = SyncServerDetailWindow( self.sync_server, _id, self.model.project, parent=self) detail_window.exec() @@ -581,7 +591,6 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): super(SyncRepresentationSummaryWidget, self).__init__(parent) self.sync_server = sync_server - self._selected_ids = set() # keep last selected _id txt_filter = QtWidgets.QLineEdit() @@ -609,7 +618,7 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): table_view.setSelectionBehavior( QtWidgets.QAbstractItemView.SelectRows) table_view.horizontalHeader().setSortIndicator( - -1, Qt.AscendingOrder) + -1, QtCore.Qt.AscendingOrder) table_view.setAlternatingRowColors(True) table_view.verticalHeader().hide() table_view.viewport().setAttribute(QtCore.Qt.WA_Hover, True) @@ -625,7 +634,6 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): column = table_view.model().get_header_index("priority") priority_delegate = delegates.PriorityDelegate(self) table_view.setItemDelegateForColumn(column, priority_delegate) - layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(top_bar_layout) @@ -633,21 +641,16 @@ class SyncRepresentationSummaryWidget(_SyncRepresentationWidget): self.table_view = table_view self.model = model - horizontal_header = HorizontalHeader(self) - table_view.setHorizontalHeader(horizontal_header) table_view.setSortingEnabled(True) - for column_name, width in self.default_widths: idx = model.get_header_index(column_name) table_view.setColumnWidth(idx, width) - table_view.doubleClicked.connect(self._double_clicked) self.txt_filter.textChanged.connect(lambda: model.set_word_filter( self.txt_filter.text())) table_view.customContextMenuRequested.connect(self._on_context_menu) - model.refresh_started.connect(self._save_scrollbar) model.refresh_finished.connect(self._set_scrollbar) model.modelReset.connect(self._set_selection) @@ -773,7 +776,8 @@ class SyncRepresentationDetailWidget(_SyncRepresentationWidget): QtWidgets.QAbstractItemView.ExtendedSelection) table_view.setSelectionBehavior( QtWidgets.QTableView.SelectRows) - table_view.horizontalHeader().setSortIndicator(-1, Qt.AscendingOrder) + table_view.horizontalHeader().setSortIndicator( + -1, QtCore.Qt.AscendingOrder) table_view.horizontalHeader().setSortIndicatorShown(True) table_view.setAlternatingRowColors(True) table_view.verticalHeader().hide() @@ -963,7 +967,6 @@ class HorizontalHeader(QtWidgets.QHeaderView): super(HorizontalHeader, self).__init__(QtCore.Qt.Horizontal, parent) self._parent = parent self.checked_values = {} - self.setModel(self._parent.model) self.setSectionsClickable(True) diff --git a/openpype/modules/sync_server/utils.py b/openpype/modules/sync_server/utils.py index 85e4e03f77..4caa01e9d7 100644 --- a/openpype/modules/sync_server/utils.py +++ b/openpype/modules/sync_server/utils.py @@ -1,6 +1,8 @@ import time -from openpype.api import Logger -log = Logger().get_logger("SyncServer") + +from openpype.lib import Logger + +log = Logger.get_logger("SyncServer") class ResumableError(Exception): @@ -8,6 +10,11 @@ class ResumableError(Exception): pass +class SiteAlreadyPresentError(Exception): + """Representation has already site skeleton present.""" + pass + + class SyncStatus: DO_NOTHING = 0 DO_UPLOAD = 1 diff --git a/openpype/modules/timers_manager/idle_threads.py b/openpype/modules/timers_manager/idle_threads.py index 9ec27e659b..eb11bbf117 100644 --- a/openpype/modules/timers_manager/idle_threads.py +++ b/openpype/modules/timers_manager/idle_threads.py @@ -1,8 +1,8 @@ import time -from Qt import QtCore +from qtpy import QtCore from pynput import mouse, keyboard -from openpype.lib import PypeLogger +from openpype.lib import Logger class IdleItem: @@ -31,7 +31,7 @@ class IdleManager(QtCore.QThread): def __init__(self): super(IdleManager, self).__init__() - self.log = PypeLogger.get_logger(self.__class__.__name__) + self.log = Logger.get_logger(self.__class__.__name__) self.signal_reset_timer.connect(self._reset_time) self.idle_item = IdleItem() diff --git a/openpype/modules/timers_manager/plugins/publish/start_timer.py b/openpype/modules/timers_manager/plugins/publish/start_timer.py new file mode 100644 index 0000000000..6408327ca1 --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/start_timer.py @@ -0,0 +1,39 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + +import pyblish.api + +from openpype.pipeline import legacy_io + + +class StartTimer(pyblish.api.ContextPlugin): + label = "Start Timer" + order = pyblish.api.IntegratorOrder + 1 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + project_name = legacy_io.active_project() + asset_name = legacy_io.Session.get("AVALON_ASSET") + task_name = legacy_io.Session.get("AVALON_TASK") + if not project_name or not asset_name or not task_name: + self.log.info(( + "Current context does not contain all" + " required information to start a timer." + )) + return + timers_manager.start_timer_with_webserver( + project_name, asset_name, task_name, self.log + ) diff --git a/openpype/modules/timers_manager/plugins/publish/stop_timer.py b/openpype/modules/timers_manager/plugins/publish/stop_timer.py new file mode 100644 index 0000000000..a8674ff2ca --- /dev/null +++ b/openpype/modules/timers_manager/plugins/publish/stop_timer.py @@ -0,0 +1,27 @@ +""" +Requires: + context -> system_settings + context -> openPypeModules +""" + + +import pyblish.api + + +class StopTimer(pyblish.api.ContextPlugin): + label = "Stop Timer" + order = pyblish.api.ExtractorOrder - 0.49 + hosts = ["*"] + + def process(self, context): + timers_manager = context.data["openPypeModules"]["timers_manager"] + if not timers_manager.enabled: + self.log.debug("TimersManager is disabled") + return + + modules_settings = context.data["system_settings"]["modules"] + if not modules_settings["timers_manager"]["disregard_publishing"]: + self.log.debug("Publish is not affecting running timers.") + return + + timers_manager.stop_timer_with_webserver(self.log) diff --git a/openpype/modules/timers_manager/rest_api.py b/openpype/modules/timers_manager/rest_api.py index f16cb316c3..979db9075b 100644 --- a/openpype/modules/timers_manager/rest_api.py +++ b/openpype/modules/timers_manager/rest_api.py @@ -1,9 +1,7 @@ import json from aiohttp.web_response import Response -from openpype.api import Logger - -log = Logger().get_logger("Event processor") +from openpype.lib import Logger class TimersManagerModuleRestApi: @@ -12,6 +10,7 @@ class TimersManagerModuleRestApi: happens in Workfile app. """ def __init__(self, user_module, server_manager): + self._log = None self.module = user_module self.server_manager = server_manager @@ -19,6 +18,12 @@ class TimersManagerModuleRestApi: self.register() + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + def register(self): self.server_manager.add_route( "POST", @@ -47,7 +52,7 @@ class TimersManagerModuleRestApi: "Payload must contain fields 'project_name," " 'asset_name' and 'task_name'" ) - log.error(msg) + self.log.error(msg) return Response(status=400, message=msg) self.module.stop_timers() @@ -73,7 +78,7 @@ class TimersManagerModuleRestApi: "Payload must contain fields 'project_name, 'asset_name'," " 'task_name'" ) - log.warning(message) + self.log.warning(message) return Response(text=message, status=404) time = self.module.get_task_time(project_name, asset_name, task_name) diff --git a/openpype/modules/timers_manager/timers_manager.py b/openpype/modules/timers_manager/timers_manager.py index 47d020104b..0ba68285a4 100644 --- a/openpype/modules/timers_manager/timers_manager.py +++ b/openpype/modules/timers_manager/timers_manager.py @@ -1,15 +1,19 @@ import os import platform -from avalon.api import AvalonMongoDB -from openpype.modules import OpenPypeModule -from openpype_interfaces import ( +from openpype.client import get_asset_by_name +from openpype.modules import ( + OpenPypeModule, ITrayService, - ILaunchHookPaths + IPluginPaths ) +from openpype.lib.events import register_event_callback + from .exceptions import InvalidContextError +TIMER_MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) + class ExampleTimersManagerConnector: """Timers manager can handle timers of multiple modules/addons. @@ -31,6 +35,7 @@ class ExampleTimersManagerConnector: } ``` """ + # Not needed at all def __init__(self, module): # Store timer manager module to be able call it's methods when needed @@ -70,7 +75,11 @@ class ExampleTimersManagerConnector: self._timers_manager_module.timer_stopped(self._module.id) -class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): +class TimersManager( + OpenPypeModule, + ITrayService, + IPluginPaths +): """ Handles about Timers. Should be able to start/stop all timers at once. @@ -174,11 +183,18 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): ) def get_launch_hook_paths(self): - """Implementation of `ILaunchHookPaths`.""" - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "launch_hooks" - ) + """Implementation for applications launch hooks.""" + + return [ + os.path.join(TIMER_MODULE_DIR, "launch_hooks") + ] + + def get_plugin_paths(self): + """Implementation of `IPluginPaths`.""" + + return { + "publish": [os.path.join(TIMER_MODULE_DIR, "plugins", "publish")] + } @staticmethod def get_timer_data_for_context( @@ -195,22 +211,13 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): " Project: \"{}\" Asset: \"{}\" Task: \"{}\"" ).format(str(project_name), str(asset_name), str(task_name))) - dbconn = AvalonMongoDB() - dbconn.install() - dbconn.Session["AVALON_PROJECT"] = project_name - - asset_doc = dbconn.find_one( - { - "type": "asset", - "name": asset_name - }, - { - "data.tasks": True, - "data.parents": True - } + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["_id", "name", "data.tasks", "data.parents"] ) + if not asset_doc: - dbconn.uninstall() raise InvalidContextError(( "Asset \"{}\" not found in project \"{}\"" ).format(asset_name, project_name)) @@ -218,7 +225,6 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): asset_data = asset_doc.get("data") or {} asset_tasks = asset_data.get("tasks") or {} if task_name not in asset_tasks: - dbconn.uninstall() raise InvalidContextError(( "Task \"{}\" not found on asset \"{}\" in project \"{}\"" ).format(task_name, asset_name, project_name)) @@ -236,9 +242,10 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): hierarchy_items = asset_data.get("parents") or [] hierarchy_items.append(asset_name) - dbconn.uninstall() return { "project_name": project_name, + "asset_id": str(asset_doc["_id"]), + "asset_name": asset_doc["name"], "task_name": task_name, "task_type": task_type, "hierarchy": hierarchy_items @@ -395,6 +402,7 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): logger (logging.Logger): Logger object. Using 'print' if not passed. """ + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") if not webserver_url: msg = "Couldn't find webserver url" @@ -421,3 +429,50 @@ class TimersManager(OpenPypeModule, ITrayService, ILaunchHookPaths): } return requests.post(rest_api_url, json=data) + + @staticmethod + def stop_timer_with_webserver(logger=None): + """Prepared method for calling stop timers on REST api. + + Args: + logger (logging.Logger): Logger used for logging messages. + """ + + webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") + if not webserver_url: + msg = "Couldn't find webserver url" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) + try: + import requests + except Exception: + msg = "Couldn't start timer ('requests' is not available)" + if logger is not None: + logger.warning(msg) + else: + print(msg) + return + + return requests.post(rest_api_url) + + def on_host_install(self, host, host_name, project_name): + self.log.debug("Installing task changed callback") + register_event_callback("taskChanged", self._on_host_task_change) + + def _on_host_task_change(self, event): + project_name = event["project_name"] + asset_name = event["asset_name"] + task_name = event["task_name"] + self.log.debug(( + "Sending message that timer should change to" + " Project: {} Asset: {} Task: {}" + ).format(project_name, asset_name, task_name)) + + self.start_timer_with_webserver( + project_name, asset_name, task_name, self.log + ) diff --git a/openpype/modules/timers_manager/widget_user_idle.py b/openpype/modules/timers_manager/widget_user_idle.py index 1ecea74440..9df328e6b2 100644 --- a/openpype/modules/timers_manager/widget_user_idle.py +++ b/openpype/modules/timers_manager/widget_user_idle.py @@ -1,4 +1,4 @@ -from Qt import QtCore, QtGui, QtWidgets +from qtpy import QtCore, QtGui, QtWidgets from openpype import resources, style diff --git a/openpype/modules/traypublish_action.py b/openpype/modules/traypublish_action.py deleted file mode 100644 index 39163b8eb8..0000000000 --- a/openpype/modules/traypublish_action.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -from openpype.lib import get_openpype_execute_args -from openpype.lib.execute import run_detached_process -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayAction - - -class TrayPublishAction(OpenPypeModule, ITrayAction): - label = "New Publish (beta)" - name = "traypublish_tool" - - def initialize(self, modules_settings): - import openpype - self.enabled = True - self.publish_paths = [ - os.path.join( - openpype.PACKAGE_DIR, - "hosts", - "traypublisher", - "plugins", - "publish" - ) - ] - self._experimental_tools = None - - def tray_init(self): - from openpype.tools.experimental_tools import ExperimentalTools - - self._experimental_tools = ExperimentalTools() - - def tray_menu(self, *args, **kwargs): - super(TrayPublishAction, self).tray_menu(*args, **kwargs) - traypublisher = self._experimental_tools.get("traypublisher") - visible = False - if traypublisher and traypublisher.enabled: - visible = True - self._action_item.setVisible(visible) - - def on_action_trigger(self): - self.run_traypublisher() - - def connect_with_modules(self, enabled_modules): - """Collect publish paths from other modules.""" - publish_paths = self.manager.collect_plugin_paths()["publish"] - self.publish_paths.extend(publish_paths) - - def run_traypublisher(self): - args = get_openpype_execute_args("traypublisher") - run_detached_process(args) diff --git a/openpype/modules/webserver/cors_middleware.py b/openpype/modules/webserver/cors_middleware.py new file mode 100644 index 0000000000..f1cd7b04b3 --- /dev/null +++ b/openpype/modules/webserver/cors_middleware.py @@ -0,0 +1,283 @@ +r""" +=============== +CORS Middleware +=============== +.. versionadded:: 0.2.0 +Dealing with CORS headers for aiohttp applications. +**IMPORTANT:** There is a `aiohttp-cors +`_ library, which handles CORS +headers by attaching additional handlers to aiohttp application for +OPTIONS (preflight) requests. In same time this CORS middleware mimics the +logic of `django-cors-headers `_, +where all handling done in the middleware without any additional handlers. This +approach allows aiohttp application to respond with CORS headers for OPTIONS or +wildcard handlers, which is not possible with ``aiohttp-cors`` due to +https://github.com/aio-libs/aiohttp-cors/issues/241 issue. +For detailed information about CORS (Cross Origin Resource Sharing) please +visit: +- `Wikipedia `_ +- Or `MDN `_ +Configuration +============= +**IMPORTANT:** By default, CORS middleware do not allow any origins to access +content from your aiohttp appliction. Which means, you need carefully check +possible options and provide custom values for your needs. +Usage +===== +.. code-block:: python + import re + from aiohttp import web + from aiohttp_middlewares import cors_middleware + from aiohttp_middlewares.cors import DEFAULT_ALLOW_HEADERS + # Unsecure configuration to allow all CORS requests + app = web.Application( + middlewares=[cors_middleware(allow_all=True)] + ) + # Allow CORS requests from URL http://localhost:3000 + app = web.Application( + middlewares=[ + cors_middleware(origins=["http://localhost:3000"]) + ] + ) + # Allow CORS requests from all localhost urls + app = web.Application( + middlewares=[ + cors_middleware( + origins=[re.compile(r"^https?\:\/\/localhost")] + ) + ] + ) + # Allow CORS requests from https://frontend.myapp.com as well + # as allow credentials + CORS_ALLOW_ORIGINS = ["https://frontend.myapp.com"] + app = web.Application( + middlewares=[ + cors_middleware( + origins=CORS_ALLOW_ORIGINS, + allow_credentials=True, + ) + ] + ) + # Allow CORS requests only for API urls + app = web.Application( + middelwares=[ + cors_middleware( + origins=CORS_ALLOW_ORIGINS, + urls=[re.compile(r"^\/api")], + ) + ] + ) + # Allow CORS requests for POST & PATCH methods, and for all + # default headers and `X-Client-UID` + app = web.Application( + middlewares=[ + cors_middleware( + origings=CORS_ALLOW_ORIGINS, + allow_methods=("POST", "PATCH"), + allow_headers=DEFAULT_ALLOW_HEADERS + + ("X-Client-UID",), + ) + ] + ) +""" + +import logging +import re +from typing import Pattern, Tuple + +from aiohttp import web + +from aiohttp_middlewares.annotations import ( + Handler, + Middleware, + StrCollection, + UrlCollection, +) +from aiohttp_middlewares.utils import match_path + + +ACCESS_CONTROL = "Access-Control" +ACCESS_CONTROL_ALLOW = f"{ACCESS_CONTROL}-Allow" +ACCESS_CONTROL_ALLOW_CREDENTIALS = f"{ACCESS_CONTROL_ALLOW}-Credentials" +ACCESS_CONTROL_ALLOW_HEADERS = f"{ACCESS_CONTROL_ALLOW}-Headers" +ACCESS_CONTROL_ALLOW_METHODS = f"{ACCESS_CONTROL_ALLOW}-Methods" +ACCESS_CONTROL_ALLOW_ORIGIN = f"{ACCESS_CONTROL_ALLOW}-Origin" +ACCESS_CONTROL_EXPOSE_HEADERS = f"{ACCESS_CONTROL}-Expose-Headers" +ACCESS_CONTROL_MAX_AGE = f"{ACCESS_CONTROL}-Max-Age" +ACCESS_CONTROL_REQUEST_METHOD = f"{ACCESS_CONTROL}-Request-Method" + +DEFAULT_ALLOW_HEADERS = ( + "accept", + "accept-encoding", + "authorization", + "content-type", + "dnt", + "origin", + "user-agent", + "x-csrftoken", + "x-requested-with", +) +DEFAULT_ALLOW_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT") +DEFAULT_URLS: Tuple[Pattern[str]] = (re.compile(r".*"),) + +logger = logging.getLogger(__name__) + + +def cors_middleware( + *, + allow_all: bool = False, + origins: UrlCollection = None, + urls: UrlCollection = None, + expose_headers: StrCollection = None, + allow_headers: StrCollection = DEFAULT_ALLOW_HEADERS, + allow_methods: StrCollection = DEFAULT_ALLOW_METHODS, + allow_credentials: bool = False, + max_age: int = None, +) -> Middleware: + """Middleware to provide CORS headers for aiohttp applications. + :param allow_all: + When enabled, allow any Origin to access content from your aiohttp web + application. **Please be careful with enabling this option as it may + result in security issues for your application.** By default: ``False`` + :param origins: + Allow content access for given list of origins. Support supplying + strings for exact origin match or regex instances. By default: ``None`` + :param urls: + Allow contect access for given list of URLs in aiohttp application. + By default: *apply CORS headers for all URLs* + :param expose_headers: + List of headers to be exposed with every CORS request. By default: + ``None`` + :param allow_headers: + List of allowed headers. By default: + .. code-block:: python + ( + "accept", + "accept-encoding", + "authorization", + "content-type", + "dnt", + "origin", + "user-agent", + "x-csrftoken", + "x-requested-with", + ) + :param allow_methods: + List of allowed methods. By default: + .. code-block:: python + ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT") + :param allow_credentials: + When enabled apply allow credentials header in response, which results + in sharing cookies on shared resources. **Please be careful with + allowing credentials for CORS requests.** By default: ``False`` + :param max_age: Access control max age in seconds. By default: ``None`` + """ + check_urls: UrlCollection = DEFAULT_URLS if urls is None else urls + + @web.middleware + async def middleware( + request: web.Request, handler: Handler + ) -> web.StreamResponse: + # Initial vars + request_method = request.method + request_path = request.rel_url.path + + # Is this an OPTIONS request + is_options_request = request_method == "OPTIONS" + + # Is this a preflight request + is_preflight_request = ( + is_options_request + and ACCESS_CONTROL_REQUEST_METHOD in request.headers + ) + + # Log extra data + log_extra = { + "is_preflight_request": is_preflight_request, + "method": request_method.lower(), + "path": request_path, + } + + # Check whether CORS should be enabled for given URL or not. By default + # CORS enabled for all URLs + if not match_items(check_urls, request_path): + logger.debug( + "Request should not be processed via CORS middleware", + extra=log_extra, + ) + return await handler(request) + + # If this is a preflight request - generate empty response + if is_preflight_request: + response = web.StreamResponse() + # Otherwise - call actual handler + else: + response = await handler(request) + + # Now check origin heaer + origin = request.headers.get("Origin") + # Empty origin - do nothing + if not origin: + logger.debug( + "Request does not have Origin header. CORS headers not " + "available for given requests", + extra=log_extra, + ) + return response + + # Set allow credentials header if necessary + if allow_credentials: + response.headers[ACCESS_CONTROL_ALLOW_CREDENTIALS] = "true" + + # Check whether current origin satisfies CORS policy + if not allow_all and not (origins and match_items(origins, origin)): + logger.debug( + "CORS headers not allowed for given Origin", extra=log_extra + ) + return response + + # Now start supplying CORS headers + # First one is Access-Control-Allow-Origin + if allow_all and not allow_credentials: + cors_origin = "*" + else: + cors_origin = origin + response.headers[ACCESS_CONTROL_ALLOW_ORIGIN] = cors_origin + + # Then Access-Control-Expose-Headers + if expose_headers: + response.headers[ACCESS_CONTROL_EXPOSE_HEADERS] = ", ".join( + expose_headers + ) + + # Now, if this is an options request, respond with extra Allow headers + if is_options_request: + response.headers[ACCESS_CONTROL_ALLOW_HEADERS] = ", ".join( + allow_headers + ) + response.headers[ACCESS_CONTROL_ALLOW_METHODS] = ", ".join( + allow_methods + ) + if max_age is not None: + response.headers[ACCESS_CONTROL_MAX_AGE] = str(max_age) + + # If this is preflight request - do not allow other middlewares to + # process this request + if is_preflight_request: + logger.debug( + "Provide CORS headers with empty response for preflight " + "request", + extra=log_extra, + ) + raise web.HTTPOk(text="", headers=response.headers) + + # Otherwise return normal response + logger.debug("Provide CORS headers for request", extra=log_extra) + return response + + return middleware + + +def match_items(items: UrlCollection, value: str) -> bool: + """Go through all items and try to match item with given value.""" + return any(match_path(item, value) for item in items) diff --git a/openpype/modules/webserver/host_console_listener.py b/openpype/modules/webserver/host_console_listener.py index 6138f9f097..e5c11af9c2 100644 --- a/openpype/modules/webserver/host_console_listener.py +++ b/openpype/modules/webserver/host_console_listener.py @@ -3,9 +3,9 @@ from aiohttp import web import json import logging from concurrent.futures import CancelledError -from Qt import QtWidgets +from qtpy import QtWidgets -from openpype_interfaces import ITrayService +from openpype.modules import ITrayService log = logging.getLogger(__name__) diff --git a/openpype/modules/webserver/server.py b/openpype/modules/webserver/server.py index 83a29e074e..120925a362 100644 --- a/openpype/modules/webserver/server.py +++ b/openpype/modules/webserver/server.py @@ -1,16 +1,19 @@ +import re import threading import asyncio from aiohttp import web -from openpype.lib import PypeLogger - -log = PypeLogger.get_logger("WebServer") +from openpype.lib import Logger +from .cors_middleware import cors_middleware class WebServerManager: """Manger that care about web server thread.""" + def __init__(self, port=None, host=None): + self._log = None + self.port = port or 8079 self.host = host or "localhost" @@ -18,12 +21,24 @@ class WebServerManager: self.handlers = {} self.on_stop_callbacks = [] - self.app = web.Application() + self.app = web.Application( + middlewares=[ + cors_middleware( + origins=[re.compile(r"^https?\:\/\/localhost")] + ) + ] + ) # add route with multiple methods for single "external app" self.webserver_thread = WebServerThread(self) + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + @property def url(self): return "http://{}:{}".format(self.host, self.port) @@ -42,12 +57,12 @@ class WebServerManager: if not self.is_running: return try: - log.debug("Stopping Web server") + self.log.debug("Stopping Web server") self.webserver_thread.is_running = False self.webserver_thread.stop() except Exception: - log.warning( + self.log.warning( "Error has happened during Killing Web server", exc_info=True ) @@ -65,7 +80,10 @@ class WebServerManager: class WebServerThread(threading.Thread): """ Listener for requests in thread.""" + def __init__(self, manager): + self._log = None + super(WebServerThread, self).__init__() self.is_running = False @@ -75,6 +93,12 @@ class WebServerThread(threading.Thread): self.site = None self.tasks = [] + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + @property def port(self): return self.manager.port @@ -87,13 +111,13 @@ class WebServerThread(threading.Thread): self.is_running = True try: - log.info("Starting WebServer server") + self.log.info("Starting WebServer server") self.loop = asyncio.new_event_loop() # create new loop for thread asyncio.set_event_loop(self.loop) self.loop.run_until_complete(self.start_server()) - log.debug( + self.log.debug( "Running Web server on URL: \"localhost:{}\"".format(self.port) ) @@ -101,7 +125,7 @@ class WebServerThread(threading.Thread): self.loop.run_forever() except Exception: - log.warning( + self.log.warning( "Web Server service has failed", exc_info=True ) finally: @@ -109,7 +133,7 @@ class WebServerThread(threading.Thread): self.is_running = False self.manager.thread_stopped() - log.info("Web server stopped") + self.log.info("Web server stopped") async def start_server(self): """ Starts runner and TCPsite """ @@ -129,17 +153,17 @@ class WebServerThread(threading.Thread): while self.is_running: while self.tasks: task = self.tasks.pop(0) - log.debug("waiting for task {}".format(task)) + self.log.debug("waiting for task {}".format(task)) await task - log.debug("returned value {}".format(task.result)) + self.log.debug("returned value {}".format(task.result)) await asyncio.sleep(0.5) - log.debug("Starting shutdown") + self.log.debug("Starting shutdown") await self.site.stop() - log.debug("Site stopped") + self.log.debug("Site stopped") await self.runner.cleanup() - log.debug("Runner stopped") + self.log.debug("Runner stopped") tasks = [ task for task in asyncio.all_tasks() @@ -147,7 +171,9 @@ class WebServerThread(threading.Thread): ] list(map(lambda task: task.cancel(), tasks)) # cancel all the tasks results = await asyncio.gather(*tasks, return_exceptions=True) - log.debug(f'Finished awaiting cancelled tasks, results: {results}...') + self.log.debug( + f'Finished awaiting cancelled tasks, results: {results}...' + ) await self.loop.shutdown_asyncgens() # to really make sure everything else has time to stop await asyncio.sleep(0.07) diff --git a/openpype/modules/webserver/webserver_module.py b/openpype/modules/webserver/webserver_module.py index 686bd27bfd..354ab1e4f9 100644 --- a/openpype/modules/webserver/webserver_module.py +++ b/openpype/modules/webserver/webserver_module.py @@ -24,8 +24,7 @@ import os import socket from openpype import resources -from openpype.modules import OpenPypeModule -from openpype_interfaces import ITrayService +from openpype.modules import OpenPypeModule, ITrayService class WebServerModule(OpenPypeModule, ITrayService): @@ -53,9 +52,12 @@ class WebServerModule(OpenPypeModule, ITrayService): try: module.webserver_initialization(self.server_manager) except Exception: - self.log.warning(( - "Failed to connect module \"{}\" to webserver." - ).format(module.name)) + self.log.warning( + ( + "Failed to connect module \"{}\" to webserver." + ).format(module.name), + exc_info=True + ) def tray_init(self): self.create_server_manager() diff --git a/openpype/pipeline/__init__.py b/openpype/pipeline/__init__.py index 511e4c7b94..7a2ef59a5a 100644 --- a/openpype/pipeline/__init__.py +++ b/openpype/pipeline/__init__.py @@ -3,16 +3,27 @@ from .constants import ( HOST_WORKFILE_EXTENSIONS, ) +from .mongodb import ( + AvalonMongoDB, +) +from .anatomy import Anatomy + from .create import ( BaseCreator, Creator, AutoCreator, CreatedInstance, - CreatorError, LegacyCreator, legacy_create, + + discover_creator_plugins, + discover_legacy_creator_plugins, + register_creator_plugin, + deregister_creator_plugin, + register_creator_plugin_path, + deregister_creator_plugin_path, ) from .load import ( @@ -34,6 +45,7 @@ from .load import ( loaders_from_representation, get_representation_path, + get_representation_context, get_repres_contexts, ) @@ -61,18 +73,46 @@ from .actions import ( deregister_inventory_action_path, ) +from .context_tools import ( + install_openpype_plugins, + install_host, + uninstall_host, + is_installed, + + register_root, + registered_root, + + register_host, + registered_host, + deregister_host, + get_process_id, + + get_current_context, + get_current_host_name, + get_current_project_name, + get_current_asset_name, + get_current_task_name, +) +install = install_host +uninstall = uninstall_host + __all__ = ( "AVALON_CONTAINER_ID", "HOST_WORKFILE_EXTENSIONS", - "attribute_definitions", + # --- MongoDB --- + "AvalonMongoDB", + + # --- Anatomy --- + "Anatomy", # --- Create --- "BaseCreator", "Creator", "AutoCreator", "CreatedInstance", + "CreatorError", "CreatorError", @@ -80,6 +120,13 @@ __all__ = ( "LegacyCreator", "legacy_create", + "discover_creator_plugins", + "discover_legacy_creator_plugins", + "register_creator_plugin", + "deregister_creator_plugin", + "register_creator_plugin_path", + "deregister_creator_plugin_path", + # --- Load --- "HeroVersionType", "IncompatibleLoaderError", @@ -99,6 +146,7 @@ __all__ = ( "loaders_from_representation", "get_representation_path", + "get_representation_context", "get_repres_contexts", # --- Publish --- @@ -121,4 +169,28 @@ __all__ = ( "register_inventory_action_path", "deregister_inventory_action", "deregister_inventory_action_path", + + # --- Process context --- + "install_openpype_plugins", + "install_host", + "uninstall_host", + "is_installed", + + "register_root", + "registered_root", + + "register_host", + "registered_host", + "deregister_host", + "get_process_id", + + "get_current_context", + "get_current_host_name", + "get_current_project_name", + "get_current_asset_name", + "get_current_task_name", + + # Backwards compatible function names + "install", + "uninstall", ) diff --git a/openpype/pipeline/actions.py b/openpype/pipeline/actions.py index 141e277db3..b488fe3e1f 100644 --- a/openpype/pipeline/actions.py +++ b/openpype/pipeline/actions.py @@ -1,4 +1,11 @@ import logging +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) class LauncherAction(object): @@ -90,28 +97,20 @@ class InventoryAction(object): # Launcher action def discover_launcher_actions(): - import avalon.api - - return avalon.api.discover(LauncherAction) + return discover(LauncherAction) def register_launcher_action(plugin): - import avalon.api - - return avalon.api.register_plugin(LauncherAction, plugin) + return register_plugin(LauncherAction, plugin) def register_launcher_action_path(path): - import avalon.api - - return avalon.api.register_plugin_path(LauncherAction, path) + return register_plugin_path(LauncherAction, path) # Inventory action def discover_inventory_actions(): - import avalon.api - - actions = avalon.api.discover(InventoryAction) + actions = discover(InventoryAction) filtered_actions = [] for action in actions: if action is not InventoryAction: @@ -121,24 +120,16 @@ def discover_inventory_actions(): def register_inventory_action(plugin): - import avalon.api - - return avalon.api.register_plugin(InventoryAction, plugin) + return register_plugin(InventoryAction, plugin) def deregister_inventory_action(plugin): - import avalon.api - - avalon.api.deregister_plugin(InventoryAction, plugin) + deregister_plugin(InventoryAction, plugin) def register_inventory_action_path(path): - import avalon.api - - return avalon.api.register_plugin_path(InventoryAction, path) + return register_plugin_path(InventoryAction, path) def deregister_inventory_action_path(path): - import avalon.api - - return avalon.api.deregister_plugin_path(InventoryAction, path) + return deregister_plugin_path(InventoryAction, path) diff --git a/openpype/lib/anatomy.py b/openpype/pipeline/anatomy.py similarity index 84% rename from openpype/lib/anatomy.py rename to openpype/pipeline/anatomy.py index 3fbc05ee88..49d86d69d6 100644 --- a/openpype/lib/anatomy.py +++ b/openpype/pipeline/anatomy.py @@ -5,24 +5,28 @@ import platform import collections import numbers +import six +import time + from openpype.settings.lib import ( - get_default_anatomy_settings, - get_anatomy_settings + get_project_settings, + get_local_settings, ) -from .path_templates import ( +from openpype.settings.constants import ( + DEFAULT_PROJECT_KEY +) + +from openpype.client import get_project +from openpype.lib.path_templates import ( TemplateUnsolved, TemplateResult, TemplatesDict, FormatObject, ) -from .log import PypeLogger +from openpype.lib.log import Logger +from openpype.lib import get_local_site_id -log = PypeLogger().get_logger(__name__) - -try: - StringType = basestring -except NameError: - StringType = str +log = Logger.get_logger(__name__) class ProjectNotSet(Exception): @@ -45,34 +49,29 @@ class RootCombinationError(Exception): super(RootCombinationError, self).__init__(msg) -class Anatomy: +class BaseAnatomy(object): """Anatomy module helps to keep project settings. Wraps key project specifications, AnatomyTemplates and Roots. - - Args: - project_name (str): Project name to look on overrides. """ - root_key_regex = re.compile(r"{(root?[^}]+)}") root_name_regex = re.compile(r"root\[([^]]+)\]") - def __init__(self, project_name=None, site_name=None): - if not project_name: - project_name = os.environ.get("AVALON_PROJECT") - - if not project_name: - raise ProjectNotSet(( - "Implementation bug: Project name is not set. Anatomy requires" - " to load data for specific project." - )) - + def __init__(self, project_doc, local_settings, site_name): + project_name = project_doc["name"] self.project_name = project_name + self.project_code = project_doc["data"]["code"] + + if (site_name and + site_name not in ["studio", "local", get_local_site_id()]): + raise RuntimeError("Anatomy could be created only for default " + "local sites not for {}".format(site_name)) + + self._site_name = site_name self._data = self._prepare_anatomy_data( - get_anatomy_settings(project_name, site_name) + project_doc, local_settings, site_name ) - self._site_name = site_name self._templates_obj = AnatomyTemplates(self) self._roots_obj = Roots(self) @@ -93,22 +92,14 @@ class Anatomy: def items(self): return copy.deepcopy(self._data).items() - @staticmethod - def default_data(): - """Default project anatomy data. - - Always return fresh loaded data. May be used as data for new project. - - Not used inside Anatomy itself. - """ - return get_default_anatomy_settings(clear_metadata=False) - - @staticmethod - def _prepare_anatomy_data(anatomy_data): + def _prepare_anatomy_data(self, project_doc, local_settings, site_name): """Prepare anatomy data for further processing. Method added to replace `{task}` with `{task[name]}` in templates. """ + project_name = project_doc["name"] + anatomy_data = self._project_doc_to_anatomy_data(project_doc) + templates_data = anatomy_data.get("templates") if templates_data: # Replace `{task}` with `{task[name]}` in templates @@ -119,23 +110,13 @@ class Anatomy: if not isinstance(item, dict): continue - for key in tuple(item.keys()): - value = item[key] - if isinstance(value, dict): - value_queue.append(value) + self._apply_local_settings_on_anatomy_data(anatomy_data, + local_settings, + project_name, + site_name) - elif isinstance(value, StringType): - item[key] = value.replace("{task}", "{task[name]}") return anatomy_data - def reset(self): - """Reset values of cached data in templates and roots objects.""" - self._data = self._prepare_anatomy_data( - get_anatomy_settings(self.project_name, self._site_name) - ) - self.templates_obj.reset() - self.roots_obj.reset() - @property def templates(self): """Wrap property `templates` of Anatomy's AnatomyTemplates instance.""" @@ -354,6 +335,161 @@ class Anatomy: data = self.root_environmets_fill_data(template) return rootless_path.format(**data) + def _project_doc_to_anatomy_data(self, project_doc): + """Convert project document to anatomy data. + + Probably should fill missing keys and values. + """ + + output = copy.deepcopy(project_doc["config"]) + output["attributes"] = copy.deepcopy(project_doc["data"]) + + return output + + def _apply_local_settings_on_anatomy_data( + self, anatomy_data, local_settings, project_name, site_name + ): + """Apply local settings on anatomy data. + + ATM local settings can modify project roots. Project name is required + as local settings have data stored data by project's name. + + Local settings override root values in this order: + 1.) Check if local settings contain overrides for default project and + apply it's values on roots if there are any. + 2.) If passed `project_name` is not None then check project specific + overrides in local settings for the project and apply it's value on + roots if there are any. + + NOTE: Root values of default project from local settings are always + applied if are set. + + Args: + anatomy_data (dict): Data for anatomy. + local_settings (dict): Data of local settings. + project_name (str): Name of project for which anatomy data are. + """ + if not local_settings: + return + + local_project_settings = local_settings.get("projects") or {} + + # Check for roots existence in local settings first + roots_project_locals = ( + local_project_settings + .get(project_name, {}) + ) + roots_default_locals = ( + local_project_settings + .get(DEFAULT_PROJECT_KEY, {}) + ) + + # Skip rest of processing if roots are not set + if not roots_project_locals and not roots_default_locals: + return + + # Combine roots from local settings + roots_locals = roots_default_locals.get(site_name) or {} + roots_locals.update(roots_project_locals.get(site_name) or {}) + # Skip processing if roots for current active site are not available in + # local settings + if not roots_locals: + return + + current_platform = platform.system().lower() + + root_data = anatomy_data["roots"] + for root_name, path in roots_locals.items(): + if root_name not in root_data: + continue + anatomy_data["roots"][root_name][current_platform] = ( + path + ) + + +class Anatomy(BaseAnatomy): + _project_cache = {} + _site_cache = {} + + def __init__(self, project_name=None, site_name=None): + if not project_name: + project_name = os.environ.get("AVALON_PROJECT") + + if not project_name: + raise ProjectNotSet(( + "Implementation bug: Project name is not set. Anatomy requires" + " to load data for specific project." + )) + + project_doc = self.get_project_doc_from_cache(project_name) + local_settings = get_local_settings() + if not site_name: + site_name = self.get_site_name_from_cache( + project_name, local_settings + ) + + super(Anatomy, self).__init__( + project_doc, + local_settings, + site_name + ) + + @classmethod + def get_project_doc_from_cache(cls, project_name): + project_cache = cls._project_cache.get(project_name) + if project_cache is not None: + if time.time() - project_cache["start"] > 10: + cls._project_cache.pop(project_name) + project_cache = None + + if project_cache is None: + project_cache = { + "project_doc": get_project(project_name), + "start": time.time() + } + cls._project_cache[project_name] = project_cache + + return copy.deepcopy( + cls._project_cache[project_name]["project_doc"] + ) + + @classmethod + def get_site_name_from_cache(cls, project_name, local_settings): + site_cache = cls._site_cache.get(project_name) + if site_cache is not None: + if time.time() - site_cache["start"] > 10: + cls._site_cache.pop(project_name) + site_cache = None + + if site_cache: + return site_cache["site_name"] + + local_project_settings = local_settings.get("projects") + if not local_project_settings: + return + + project_locals = local_project_settings.get(project_name) or {} + default_locals = local_project_settings.get(DEFAULT_PROJECT_KEY) or {} + active_site = ( + project_locals.get("active_site") + or default_locals.get("active_site") + ) + if not active_site: + project_settings = get_project_settings(project_name) + active_site = ( + project_settings + ["global"] + ["sync_server"] + ["config"] + ["active_site"] + ) + + cls._site_cache[project_name] = { + "site_name": active_site, + "start": time.time() + } + return active_site + class AnatomyTemplateUnsolved(TemplateUnsolved): """Exception for unsolved template when strict is set to True.""" @@ -385,6 +521,30 @@ class AnatomyTemplateResult(TemplateResult): self.invalid_types ) + def copy(self): + tmp = TemplateResult( + str(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + return self.__class__(tmp, self.rootless) + + def normalized(self): + """Convert to normalized path.""" + + tmp = TemplateResult( + os.path.normpath(self), + self.template, + self.solved, + self.used_values, + self.missing_keys, + self.invalid_types + ) + return self.__class__(tmp, self.rootless) + class AnatomyTemplates(TemplatesDict): inner_key_pattern = re.compile(r"(\{@.*?[^{}0]*\})") @@ -462,7 +622,7 @@ class AnatomyTemplates(TemplatesDict): v_queue.append(value) elif ( - isinstance(value, StringType) + isinstance(value, six.string_types) and "{task}" in value ): item[key] = value.replace("{task}", "{task[name]}") @@ -522,8 +682,10 @@ class AnatomyTemplates(TemplatesDict): " invalid inner key `{1}`." ).format(key, anatomy_sub_key)) - valid = isinstance(replace_value, (numbers.Number, StringType)) - if not valid: + if not ( + isinstance(replace_value, numbers.Number) + or isinstance(replace_value, six.string_types) + ): raise ValueError(( "Anatomy templates can't be filled." " Anatomy key `{0}` has" @@ -550,7 +712,7 @@ class AnatomyTemplates(TemplatesDict): for key in tuple(keys_to_solve): value = key_values[key] - if isinstance(value, StringType): + if isinstance(value, six.string_types): matches = cls.inner_key_pattern.findall(value) if not matches: keys_to_solve.remove(key) @@ -951,14 +1113,18 @@ class RootItem(FormatObject): result = False output = str(path) - root_paths = list(self.cleaned_data.values()) mod_path = self.clean_path(path) - for root_path in root_paths: + for root_os, root_path in self.cleaned_data.items(): # Skip empty paths if not root_path: continue - if mod_path.startswith(root_path): + _mod_path = mod_path # reset to original cleaned value + if root_os == "windows": + root_path = root_path.lower() + _mod_path = _mod_path.lower() + + if _mod_path.startswith(root_path): result = True replacement = "{" + self.full_key() + "}" output = replacement + mod_path[len(root_path):] @@ -1245,7 +1411,7 @@ class Roots: parent_keys = [] is_last = False for value in data.values(): - if isinstance(value, StringType): + if isinstance(value, six.string_types): is_last = True break diff --git a/openpype/pipeline/colorspace.py b/openpype/pipeline/colorspace.py new file mode 100644 index 0000000000..cb37b2c4ae --- /dev/null +++ b/openpype/pipeline/colorspace.py @@ -0,0 +1,470 @@ +from copy import deepcopy +import re +import os +import sys +import json +import platform +import contextlib +import tempfile +from openpype import PACKAGE_DIR +from openpype.settings import get_project_settings +from openpype.lib import ( + StringTemplate, + run_openpype_process, + Logger +) +from openpype.pipeline import Anatomy + +log = Logger.get_logger(__name__) + + +@contextlib.contextmanager +def _make_temp_json_file(): + """Wrapping function for json temp file + """ + try: + # Store dumped json to temporary file + temporary_json_file = tempfile.NamedTemporaryFile( + mode="w", suffix=".json", delete=False + ) + temporary_json_file.close() + temporary_json_filepath = temporary_json_file.name.replace( + "\\", "/" + ) + + yield temporary_json_filepath + + except IOError as _error: + raise IOError( + "Unable to create temp json file: {}".format( + _error + ) + ) + + finally: + # Remove the temporary json + os.remove(temporary_json_filepath) + + +def get_ocio_config_script_path(): + """Get path to ocio wrapper script + + Returns: + str: path string + """ + return os.path.normpath( + os.path.join( + PACKAGE_DIR, + "scripts", + "ocio_wrapper.py" + ) + ) + + +def get_imageio_colorspace_from_filepath( + path, host_name, project_name, + config_data=None, file_rules=None, + project_settings=None, + validate=True +): + """Get colorspace name from filepath + + ImageIO Settings file rules are tested for matching rule. + + Args: + path (str): path string, file rule pattern is tested on it + host_name (str): host name + project_name (str): project name + config_data (dict, optional): config path and template in dict. + Defaults to None. + file_rules (dict, optional): file rule data from settings. + Defaults to None. + project_settings (dict, optional): project settings. Defaults to None. + validate (bool, optional): should resulting colorspace be validated + with config file? Defaults to True. + + Returns: + str: name of colorspace + """ + if not any([config_data, file_rules]): + project_settings = project_settings or get_project_settings( + project_name + ) + config_data = get_imageio_config( + project_name, host_name, project_settings) + file_rules = get_imageio_file_rules( + project_name, host_name, project_settings) + + # match file rule from path + colorspace_name = None + for _frule_name, file_rule in file_rules.items(): + pattern = file_rule["pattern"] + extension = file_rule["ext"] + ext_match = re.match( + r".*(?=.{})".format(extension), path + ) + file_match = re.search( + pattern, path + ) + + if ext_match and file_match: + colorspace_name = file_rule["colorspace"] + + if not colorspace_name: + log.info("No imageio file rule matched input path: '{}'".format( + path + )) + return None + + # validate matching colorspace with config + if validate and config_data: + validate_imageio_colorspace_in_config( + config_data["path"], colorspace_name) + + return colorspace_name + + +def parse_colorspace_from_filepath( + path, host_name, project_name, + config_data=None, + project_settings=None +): + """Parse colorspace name from filepath + + An input path can have colorspace name used as part of name + or as folder name. + + Args: + path (str): path string + host_name (str): host name + project_name (str): project name + config_data (dict, optional): config path and template in dict. + Defaults to None. + project_settings (dict, optional): project settings. Defaults to None. + + Returns: + str: name of colorspace + """ + if not config_data: + project_settings = project_settings or get_project_settings( + project_name + ) + config_data = get_imageio_config( + project_name, host_name, project_settings) + + config_path = config_data["path"] + + # match file rule from path + colorspace_name = None + colorspaces = get_ocio_config_colorspaces(config_path) + for colorspace_key in colorspaces: + # check underscored variant of colorspace name + # since we are reformating it in integrate.py + if colorspace_key.replace(" ", "_") in path: + colorspace_name = colorspace_key + break + if colorspace_key in path: + colorspace_name = colorspace_key + break + + if not colorspace_name: + log.info("No matching colorspace in config '{}' for path: '{}'".format( + config_path, path + )) + return None + + return colorspace_name + + +def validate_imageio_colorspace_in_config(config_path, colorspace_name): + """Validator making sure colorspace name is used in config.ocio + + Args: + config_path (str): path leading to config.ocio file + colorspace_name (str): tested colorspace name + + Raises: + KeyError: missing colorspace name + + Returns: + bool: True if exists + """ + colorspaces = get_ocio_config_colorspaces(config_path) + if colorspace_name not in colorspaces: + raise KeyError( + "Missing colorspace '{}' in config file '{}'".format( + colorspace_name, config_path) + ) + return True + + +def get_ocio_config_colorspaces(config_path): + """Get all colorspace data + + Wrapper function for aggregating all names and its families. + Families can be used for building menu and submenus in gui. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: colorspace and family in couple + """ + if sys.version_info[0] == 2: + return get_colorspace_data_subprocess(config_path) + + from ..scripts.ocio_wrapper import _get_colorspace_data + return _get_colorspace_data(config_path) + + +def get_colorspace_data_subprocess(config_path): + """Get colorspace data via subprocess + + Wrapper for Python 2 hosts. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: colorspace and family in couple + """ + with _make_temp_json_file() as tmp_json_path: + # Prepare subprocess arguments + args = [ + "run", get_ocio_config_script_path(), + "config", "get_colorspace", + "--in_path", config_path, + "--out_path", tmp_json_path + + ] + log.info("Executing: {}".format(" ".join(args))) + + process_kwargs = { + "logger": log, + "env": {} + } + + run_openpype_process(*args, **process_kwargs) + + # return all colorspaces + return_json_data = open(tmp_json_path).read() + return json.loads(return_json_data) + + +def get_ocio_config_views(config_path): + """Get all viewer data + + Wrapper function for aggregating all display and related viewers. + Key can be used for building gui menu with submenus. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: `display/viewer` and viewer data + """ + if sys.version_info[0] == 2: + return get_views_data_subprocess(config_path) + + from ..scripts.ocio_wrapper import _get_views_data + return _get_views_data(config_path) + + +def get_views_data_subprocess(config_path): + """Get viewers data via subprocess + + Wrapper for Python 2 hosts. + + Args: + config_path (str): path leading to config.ocio file + + Returns: + dict: `display/viewer` and viewer data + """ + with _make_temp_json_file() as tmp_json_path: + # Prepare subprocess arguments + args = [ + "run", get_ocio_config_script_path(), + "config", "get_views", + "--in_path", config_path, + "--out_path", tmp_json_path + + ] + log.info("Executing: {}".format(" ".join(args))) + + process_kwargs = { + "logger": log, + "env": {} + } + + run_openpype_process(*args, **process_kwargs) + + # return all colorspaces + return_json_data = open(tmp_json_path).read() + return json.loads(return_json_data) + + +def get_imageio_config( + project_name, host_name, + project_settings=None, + anatomy_data=None, + anatomy=None +): + """Returns config data from settings + + Config path is formatted in `path` key + and original settings input is saved into `template` key. + + Args: + project_name (str): project name + host_name (str): host name + project_settings (dict, optional): project settings. + Defaults to None. + anatomy_data (dict, optional): anatomy formatting data. + Defaults to None. + anatomy (lib.Anatomy, optional): Anatomy object. + Defaults to None. + + Returns: + dict or bool: config path data or None + """ + project_settings = project_settings or get_project_settings(project_name) + anatomy = anatomy or Anatomy(project_name) + + if not anatomy_data: + from openpype.pipeline.context_tools import ( + get_template_data_from_session) + anatomy_data = get_template_data_from_session() + + # add project roots to anatomy data + anatomy_data["root"] = anatomy.roots + anatomy_data["platform"] = platform.system().lower() + + # get colorspace settings + imageio_global, imageio_host = _get_imageio_settings( + project_settings, host_name) + + config_host = imageio_host.get("ocio_config", {}) + + if config_host.get("enabled"): + config_data = _get_config_data( + config_host["filepath"], anatomy_data + ) + else: + config_data = None + + if not config_data: + # get config path from either global or host_name + config_global = imageio_global["ocio_config"] + config_data = _get_config_data( + config_global["filepath"], anatomy_data + ) + + if not config_data: + raise FileExistsError( + "No OCIO config found in settings. It is " + "either missing or there is typo in path inputs" + ) + + return config_data + + +def _get_config_data(path_list, anatomy_data): + """Return first existing path in path list. + + If template is used in path inputs, + then it is formated by anatomy data + and environment variables + + Args: + path_list (list[str]): list of abs paths + anatomy_data (dict): formating data + + Returns: + dict: config data + """ + formatting_data = deepcopy(anatomy_data) + + # format the path for potential env vars + formatting_data.update(dict(**os.environ)) + + # first try host config paths + for path_ in path_list: + formated_path = _format_path(path_, formatting_data) + + if not os.path.exists(formated_path): + continue + + return { + "path": os.path.normpath(formated_path), + "template": path_ + } + + +def _format_path(tempate_path, formatting_data): + """Single template path formating. + + Args: + tempate_path (str): template string + formatting_data (dict): data to be used for + template formating + + Returns: + str: absolute formated path + """ + # format path for anatomy keys + formatted_path = StringTemplate(tempate_path).format( + formatting_data) + + return os.path.abspath(formatted_path) + + +def get_imageio_file_rules(project_name, host_name, project_settings=None): + """Get ImageIO File rules from project settings + + Args: + project_name (str): project name + host_name (str): host name + project_settings (dict, optional): project settings. + Defaults to None. + + Returns: + dict: file rules data + """ + project_settings = project_settings or get_project_settings(project_name) + + imageio_global, imageio_host = _get_imageio_settings( + project_settings, host_name) + + # get file rules from global and host_name + frules_global = imageio_global["file_rules"] + # host is optional, some might not have any settings + frules_host = imageio_host.get("file_rules", {}) + + # compile file rules dictionary + file_rules = {} + if frules_global["enabled"]: + file_rules.update(frules_global["rules"]) + if frules_host and frules_host["enabled"]: + file_rules.update(frules_host["rules"]) + + return file_rules + + +def _get_imageio_settings(project_settings, host_name): + """Get ImageIO settings for global and host + + Args: + project_settings (dict): project settings. + Defaults to None. + host_name (str): host name + + Returns: + tuple[dict, dict]: image io settings for global and host + """ + # get image io from global and host_name + imageio_global = project_settings["global"]["imageio"] + # host is optional, some might not have any settings + imageio_host = project_settings.get(host_name, {}).get("imageio", {}) + + return imageio_global, imageio_host diff --git a/openpype/pipeline/context_tools.py b/openpype/pipeline/context_tools.py new file mode 100644 index 0000000000..6610fd7da7 --- /dev/null +++ b/openpype/pipeline/context_tools.py @@ -0,0 +1,625 @@ +"""Core pipeline functionality""" + +import os +import json +import types +import logging +import platform +import uuid + +import pyblish.api +from pyblish.lib import MessageHandler + +import openpype +from openpype.host import HostBase +from openpype.client import ( + get_project, + get_asset_by_id, + get_asset_by_name, + version_is_latest, +) +from openpype.lib.events import emit_event +from openpype.modules import load_modules, ModulesManager +from openpype.settings import get_project_settings + +from .publish.lib import filter_pyblish_plugins +from .anatomy import Anatomy +from .template_data import get_template_data_with_names +from .workfile import ( + get_workfile_template_key, + get_custom_workfile_template_by_string_context, +) +from . import ( + legacy_io, + register_loader_plugin_path, + register_inventory_action_path, + register_creator_plugin_path, + deregister_loader_plugin_path, +) + + +_is_installed = False +_process_id = None +_registered_root = {"_": ""} +_registered_host = {"_": None} +# Keep modules manager (and it's modules) in memory +# - that gives option to register modules' callbacks +_modules_manager = None + +log = logging.getLogger(__name__) + +PACKAGE_DIR = os.path.dirname(os.path.abspath(openpype.__file__)) +PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") + +# Global plugin paths +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "load") + + +def _get_modules_manager(): + """Get or create modules manager for host installation. + + This is not meant for public usage. Reason is to keep modules + in memory of process to be able trigger their event callbacks if they + need any. + + Returns: + ModulesManager: Manager wrapping discovered modules. + """ + + global _modules_manager + if _modules_manager is None: + _modules_manager = ModulesManager() + return _modules_manager + + +def register_root(path): + """Register currently active root""" + log.info("Registering root: %s" % path) + _registered_root["_"] = path + + +def registered_root(): + """Return currently registered root""" + root = _registered_root["_"] + if root: + return root + + root = legacy_io.Session.get("AVALON_PROJECTS") + if root: + return os.path.normpath(root) + return "" + + +def install_host(host): + """Install `host` into the running Python session. + + Args: + host (module): A Python module containing the Avalon + avalon host-interface. + """ + global _is_installed + + _is_installed = True + + legacy_io.install() + modules_manager = _get_modules_manager() + + missing = list() + for key in ("AVALON_PROJECT", "AVALON_ASSET"): + if key not in legacy_io.Session: + missing.append(key) + + assert not missing, ( + "%s missing from environment, %s" % ( + ", ".join(missing), + json.dumps(legacy_io.Session, indent=4, sort_keys=True) + )) + + project_name = legacy_io.Session["AVALON_PROJECT"] + log.info("Activating %s.." % project_name) + + # Optional host install function + if hasattr(host, "install"): + host.install() + + register_host(host) + + def modified_emit(obj, record): + """Method replacing `emit` in Pyblish's MessageHandler.""" + record.msg = record.getMessage() + obj.records.append(record) + + MessageHandler.emit = modified_emit + + if os.environ.get("OPENPYPE_REMOTE_PUBLISH"): + # target "farm" == rendering on farm, expects OPENPYPE_PUBLISH_DATA + # target "remote" == remote execution, installs host + print("Registering pyblish target: remote") + pyblish.api.register_target("remote") + else: + pyblish.api.register_target("local") + + project_name = os.environ.get("AVALON_PROJECT") + host_name = os.environ.get("AVALON_APP") + + # Give option to handle host installation + for module in modules_manager.get_enabled_modules(): + module.on_host_install(host, host_name, project_name) + + install_openpype_plugins(project_name, host_name) + + +def install_openpype_plugins(project_name=None, host_name=None): + # Make sure modules are loaded + load_modules() + + log.info("Registering global plug-ins..") + pyblish.api.register_plugin_path(PUBLISH_PATH) + pyblish.api.register_discovery_filter(filter_pyblish_plugins) + register_loader_plugin_path(LOAD_PATH) + + if host_name is None: + host_name = os.environ.get("AVALON_APP") + + modules_manager = _get_modules_manager() + publish_plugin_dirs = modules_manager.collect_publish_plugin_paths( + host_name) + for path in publish_plugin_dirs: + pyblish.api.register_plugin_path(path) + + create_plugin_paths = modules_manager.collect_create_plugin_paths( + host_name) + for path in create_plugin_paths: + register_creator_plugin_path(path) + + load_plugin_paths = modules_manager.collect_load_plugin_paths( + host_name) + for path in load_plugin_paths: + register_loader_plugin_path(path) + + if project_name is None: + project_name = os.environ.get("AVALON_PROJECT") + + # Register studio specific plugins + if project_name: + anatomy = Anatomy(project_name) + anatomy.set_root_environments() + register_root(anatomy.roots) + + project_settings = get_project_settings(project_name) + platform_name = platform.system().lower() + project_plugins = ( + project_settings + .get("global", {}) + .get("project_plugins", {}) + .get(platform_name) + ) or [] + for path in project_plugins: + try: + path = str(path.format(**os.environ)) + except KeyError: + pass + + if not path or not os.path.exists(path): + continue + + pyblish.api.register_plugin_path(path) + register_loader_plugin_path(path) + register_creator_plugin_path(path) + register_inventory_action_path(path) + + +def uninstall_host(): + """Undo all of what `install()` did""" + host = registered_host() + + try: + host.uninstall() + except AttributeError: + pass + + log.info("Deregistering global plug-ins..") + pyblish.api.deregister_plugin_path(PUBLISH_PATH) + pyblish.api.deregister_discovery_filter(filter_pyblish_plugins) + deregister_loader_plugin_path(LOAD_PATH) + log.info("Global plug-ins unregistred") + + deregister_host() + + legacy_io.uninstall() + + log.info("Successfully uninstalled Avalon!") + + +def is_installed(): + """Return state of installation + + Returns: + True if installed, False otherwise + + """ + + return _is_installed + + +def register_host(host): + """Register a new host for the current process + + Arguments: + host (ModuleType): A module implementing the + Host API interface. See the Host API + documentation for details on what is + required, or browse the source code. + + """ + + _registered_host["_"] = host + + +def registered_host(): + """Return currently registered host""" + return _registered_host["_"] + + +def deregister_host(): + _registered_host["_"] = None + + +def debug_host(): + """A debug host, useful to debugging features that depend on a host""" + + host = types.ModuleType("debugHost") + + def ls(): + containers = [ + { + "representation": "ee-ft-a-uuid1", + "schema": "openpype:container-1.0", + "name": "Bruce01", + "objectName": "Bruce01_node", + "namespace": "_bruce01_", + "version": 3, + }, + { + "representation": "aa-bc-s-uuid2", + "schema": "openpype:container-1.0", + "name": "Bruce02", + "objectName": "Bruce01_node", + "namespace": "_bruce02_", + "version": 2, + } + ] + + for container in containers: + yield container + + host.__dict__.update({ + "ls": ls, + "open_file": lambda fname: None, + "save_file": lambda fname: None, + "current_file": lambda: os.path.expanduser("~/temp.txt"), + "has_unsaved_changes": lambda: False, + "work_root": lambda: os.path.expanduser("~/temp"), + "file_extensions": lambda: ["txt"], + }) + + return host + + +def get_current_host_name(): + """Current host name. + + Function is based on currently registered host integration or environment + variant 'AVALON_APP'. + + Returns: + Union[str, None]: Name of host integration in current process or None. + """ + + host = registered_host() + if isinstance(host, HostBase): + return host.name + return os.environ.get("AVALON_APP") + + +def get_global_context(): + return { + "project_name": os.environ.get("AVALON_PROJECT"), + "asset_name": os.environ.get("AVALON_ASSET"), + "task_name": os.environ.get("AVALON_TASK"), + } + + +def get_current_context(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_context() + return get_global_context() + + +def get_current_project_name(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_project_name() + return get_global_context()["project_name"] + + +def get_current_asset_name(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_asset_name() + return get_global_context()["asset_name"] + + +def get_current_task_name(): + host = registered_host() + if isinstance(host, HostBase): + return host.get_current_task_name() + return get_global_context()["task_name"] + + +def get_current_project(fields=None): + """Helper function to get project document based on global Session. + + This function should be called only in process where host is installed. + + Returns: + dict: Project document. + None: Project is not set. + """ + + project_name = get_current_project_name() + return get_project(project_name, fields=fields) + + +def get_current_project_asset(asset_name=None, asset_id=None, fields=None): + """Helper function to get asset document based on global Session. + + This function should be called only in process where host is installed. + + Asset is found out based on passed asset name or id (not both). Asset name + is not used for filtering if asset id is passed. When both asset name and + id are missing then asset name from current process is used. + + Args: + asset_name (str): Name of asset used for filter. + asset_id (Union[str, ObjectId]): Asset document id. If entered then + is used as only filter. + fields (Union[List[str], None]): Limit returned data of asset documents + to specific keys. + + Returns: + dict: Asset document. + None: Asset is not set or not exist. + """ + + project_name = get_current_project_name() + if asset_id: + return get_asset_by_id(project_name, asset_id, fields=fields) + + if not asset_name: + asset_name = get_current_asset_name() + # Skip if is not set even on context + if not asset_name: + return None + return get_asset_by_name(project_name, asset_name, fields=fields) + + +def is_representation_from_latest(representation): + """Return whether the representation is from latest version + + Args: + representation (dict): The representation document from the database. + + Returns: + bool: Whether the representation is of latest version. + """ + + project_name = get_current_project_name() + return version_is_latest(project_name, representation["parent"]) + + +def get_template_data_from_session(session=None, system_settings=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + system_settings (Union[Dict[str, Any], Any]): Prepared system settings. + Optional are auto received if not passed. + + Returns: + Dict[str, Any]: All available data from session. + """ + + if session is None: + session = legacy_io.Session + + project_name = session["AVALON_PROJECT"] + asset_name = session["AVALON_ASSET"] + task_name = session["AVALON_TASK"] + host_name = session["AVALON_APP"] + + return get_template_data_with_names( + project_name, asset_name, task_name, host_name, system_settings + ) + + +def get_workdir_from_session(session=None, template_key=None): + """Template data for template fill from session keys. + + Args: + session (Union[Dict[str, str], None]): The Session to use. If not + provided use the currently active global Session. + template_key (str): Prepared template key from which workdir is + calculated. + + Returns: + str: Workdir path. + """ + + if session is None: + session = legacy_io.Session + project_name = session["AVALON_PROJECT"] + host_name = session["AVALON_APP"] + anatomy = Anatomy(project_name) + template_data = get_template_data_from_session(session) + anatomy_filled = anatomy.format(template_data) + + if not template_key: + task_type = template_data["task"]["type"] + template_key = get_workfile_template_key( + task_type, + host_name, + project_name=project_name + ) + path = anatomy_filled[template_key]["folder"] + if path: + path = os.path.normpath(path) + return path + + +def get_custom_workfile_template_from_session( + session=None, project_settings=None +): + """Filter and fill workfile template profiles by current context. + + Current context is defined by `legacy_io.Session`. That's why this + function should be used only inside host where context is set and stable. + + Args: + session (Union[None, Dict[str, str]]): Session from which are taken + data. + project_settings(Dict[str, Any]): Template profiles from settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + """ + + if session is None: + session = legacy_io.Session + + return get_custom_workfile_template_by_string_context( + session["AVALON_PROJECT"], + session["AVALON_ASSET"], + session["AVALON_TASK"], + session["AVALON_APP"], + project_settings=project_settings + ) + + +def compute_session_changes( + session, asset_doc, task_name, template_key=None +): + """Compute the changes for a session object on task under asset. + + Function does not change the session object, only returns changes. + + Args: + session (Dict[str, str]): The initial session to compute changes to. + This is required for computing the full Work Directory, as that + also depends on the values that haven't changed. + asset_doc (Dict[str, Any]): Asset document to switch to. + task_name (str): Name of task to switch to. + template_key (Union[str, None]): Prepare workfile template key in + anatomy templates. + + Returns: + Dict[str, str]: Changes in the Session dictionary. + """ + + changes = {} + + # Get asset document and asset + if not asset_doc: + task_name = None + asset_name = None + else: + asset_name = asset_doc["name"] + + # Detect any changes compared session + mapping = { + "AVALON_ASSET": asset_name, + "AVALON_TASK": task_name, + } + changes = { + key: value + for key, value in mapping.items() + if value != session.get(key) + } + if not changes: + return changes + + # Compute work directory (with the temporary changed session so far) + changed_session = session.copy() + changed_session.update(changes) + + workdir = None + if asset_doc: + workdir = get_workdir_from_session( + changed_session, template_key + ) + + changes["AVALON_WORKDIR"] = workdir + + return changes + + +def change_current_context(asset_doc, task_name, template_key=None): + """Update active Session to a new task work area. + + This updates the live Session to a different task under asset. + + Args: + asset_doc (Dict[str, Any]): The asset document to set. + task_name (str): The task to set under asset. + template_key (Union[str, None]): Prepared template key to be used for + workfile template in Anatomy. + + Returns: + Dict[str, str]: The changed key, values in the current Session. + """ + + changes = compute_session_changes( + legacy_io.Session, + asset_doc, + task_name, + template_key=template_key + ) + + # Update the Session and environments. Pop from environments all keys with + # value set to None. + for key, value in changes.items(): + legacy_io.Session[key] = value + if value is None: + os.environ.pop(key, None) + else: + os.environ[key] = value + + data = changes.copy() + # Convert env keys to human readable keys + data["project_name"] = legacy_io.Session["AVALON_PROJECT"] + data["asset_name"] = legacy_io.Session["AVALON_ASSET"] + data["task_name"] = legacy_io.Session["AVALON_TASK"] + + # Emit session change + emit_event("taskChanged", data) + + return changes + + +def get_process_id(): + """Fake process id created on demand using uuid. + + Can be used to create process specific folders in temp directory. + + Returns: + str: Process id. + """ + + global _process_id + if _process_id is None: + _process_id = str(uuid.uuid4()) + return _process_id diff --git a/openpype/pipeline/create/__init__.py b/openpype/pipeline/create/__init__.py index 9571f56b8f..c89fb04c42 100644 --- a/openpype/pipeline/create/__init__.py +++ b/openpype/pipeline/create/__init__.py @@ -1,12 +1,33 @@ from .constants import ( - SUBSET_NAME_ALLOWED_SYMBOLS + SUBSET_NAME_ALLOWED_SYMBOLS, + DEFAULT_SUBSET_TEMPLATE, + PRE_CREATE_THUMBNAIL_KEY, ) + +from .subset_name import ( + TaskNotSetError, + get_subset_name_template, + get_subset_name, +) + from .creator_plugins import ( CreatorError, BaseCreator, Creator, - AutoCreator + AutoCreator, + HiddenCreator, + + discover_legacy_creator_plugins, + get_legacy_creator_by_name, + + discover_creator_plugins, + register_creator_plugin, + deregister_creator_plugin, + register_creator_plugin_path, + deregister_creator_plugin_path, + + cache_and_get_instances, ) from .context import ( @@ -22,12 +43,28 @@ from .legacy_create import ( __all__ = ( "SUBSET_NAME_ALLOWED_SYMBOLS", + "DEFAULT_SUBSET_TEMPLATE", + "PRE_CREATE_THUMBNAIL_KEY", + + "TaskNotSetError", + "get_subset_name_template", + "get_subset_name", "CreatorError", "BaseCreator", "Creator", "AutoCreator", + "HiddenCreator", + + "discover_legacy_creator_plugins", + "get_legacy_creator_by_name", + + "discover_creator_plugins", + "register_creator_plugin", + "deregister_creator_plugin", + "register_creator_plugin_path", + "deregister_creator_plugin_path", "CreatedInstance", "CreateContext", diff --git a/openpype/pipeline/create/constants.py b/openpype/pipeline/create/constants.py index bfbbccfd12..375cfc4a12 100644 --- a/openpype/pipeline/create/constants.py +++ b/openpype/pipeline/create/constants.py @@ -1,6 +1,10 @@ SUBSET_NAME_ALLOWED_SYMBOLS = "a-zA-Z0-9_." +DEFAULT_SUBSET_TEMPLATE = "{family}{Variant}" +PRE_CREATE_THUMBNAIL_KEY = "thumbnail_source" __all__ = ( "SUBSET_NAME_ALLOWED_SYMBOLS", + "DEFAULT_SUBSET_TEMPLATE", + "PRE_CREATE_THUMBNAIL_KEY", ) diff --git a/openpype/pipeline/create/context.py b/openpype/pipeline/create/context.py index eeb08a6294..7672c49eb3 100644 --- a/openpype/pipeline/create/context.py +++ b/openpype/pipeline/create/context.py @@ -1,25 +1,51 @@ import os +import sys import copy import logging +import traceback import collections import inspect from uuid import uuid4 from contextlib import contextmanager -from .creator_plugins import ( - BaseCreator, - Creator, - AutoCreator -) +import pyblish.logic +import pyblish.api -from openpype.api import ( +from openpype.client import get_assets, get_asset_by_name +from openpype.settings import ( get_system_settings, get_project_settings ) +from openpype.lib.attribute_definitions import ( + UnknownDef, + serialize_attr_defs, + deserialize_attr_defs, + get_default_values, +) +from openpype.host import IPublishHost +from openpype.pipeline import legacy_io +from openpype.pipeline.plugin_discover import DiscoverResult + +from .creator_plugins import ( + Creator, + AutoCreator, + discover_creator_plugins, + discover_convertor_plugins, + CreatorError, +) + +# Changes of instances and context are send as tuple of 2 information +UpdateData = collections.namedtuple("UpdateData", ["instance", "changes"]) + + +class UnavailableSharedData(Exception): + """Shared data are not available at the moment when are accessed.""" + pass class ImmutableKeyError(TypeError): """Accessed key is immutable so does not allow changes or removements.""" + def __init__(self, key, msg=None): self.immutable_key = key if not msg: @@ -31,6 +57,7 @@ class ImmutableKeyError(TypeError): class HostMissRequiredMethod(Exception): """Host does not have implemented required functions for creation.""" + def __init__(self, host, missing_methods): self.missing_methods = missing_methods self.host = host @@ -51,12 +78,432 @@ class HostMissRequiredMethod(Exception): super(HostMissRequiredMethod, self).__init__(msg) +class ConvertorsOperationFailed(Exception): + def __init__(self, msg, failed_info): + super(ConvertorsOperationFailed, self).__init__(msg) + self.failed_info = failed_info + + +class ConvertorsFindFailed(ConvertorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to find incompatible subsets" + super(ConvertorsFindFailed, self).__init__( + msg, failed_info + ) + + +class ConvertorsConversionFailed(ConvertorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to convert incompatible subsets" + super(ConvertorsConversionFailed, self).__init__( + msg, failed_info + ) + + +def prepare_failed_convertor_operation_info(identifier, exc_info): + exc_type, exc_value, exc_traceback = exc_info + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + + return { + "convertor_identifier": identifier, + "message": str(exc_value), + "traceback": formatted_traceback + } + + +class CreatorsOperationFailed(Exception): + """Raised when a creator process crashes in 'CreateContext'. + + The exception contains information about the creator and error. The data + are prepared using 'prepare_failed_creator_operation_info' and can be + serialized using json. + + Usage is for UI purposes which may not have access to exceptions directly + and would not have ability to catch exceptions 'per creator'. + + Args: + msg (str): General error message. + failed_info (list[dict[str, Any]]): List of failed creators with + exception message and optionally formatted traceback. + """ + + def __init__(self, msg, failed_info): + super(CreatorsOperationFailed, self).__init__(msg) + self.failed_info = failed_info + + +class CreatorsCollectionFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to collect instances" + super(CreatorsCollectionFailed, self).__init__( + msg, failed_info + ) + + +class CreatorsSaveFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed update instance changes" + super(CreatorsSaveFailed, self).__init__( + msg, failed_info + ) + + +class CreatorsRemoveFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to remove instances" + super(CreatorsRemoveFailed, self).__init__( + msg, failed_info + ) + + +class CreatorsCreateFailed(CreatorsOperationFailed): + def __init__(self, failed_info): + msg = "Failed to create instances" + super(CreatorsCreateFailed, self).__init__( + msg, failed_info + ) + + +def prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback=True +): + formatted_traceback = None + exc_type, exc_value, exc_traceback = exc_info + if add_traceback: + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + + return { + "creator_identifier": identifier, + "creator_label": label, + "message": str(exc_value), + "traceback": formatted_traceback + } + + +_EMPTY_VALUE = object() + + +class TrackChangesItem(object): + """Helper object to track changes in data. + + Has access to full old and new data and will create deep copy of them, + so it is not needed to create copy before passed in. + + Can work as a dictionary if old or new value is a dictionary. In + that case received object is another object of 'TrackChangesItem'. + + Goal is to be able to get old or new value as was or only changed values + or get information about removed/changed keys, and all of that on + any "dictionary level". + + ``` + # Example of possible usages + >>> old_value = { + ... "key_1": "value_1", + ... "key_2": { + ... "key_sub_1": 1, + ... "key_sub_2": { + ... "enabled": True + ... } + ... }, + ... "key_3": "value_2" + ... } + >>> new_value = { + ... "key_1": "value_1", + ... "key_2": { + ... "key_sub_2": { + ... "enabled": False + ... }, + ... "key_sub_3": 3 + ... }, + ... "key_3": "value_3" + ... } + + >>> changes = TrackChangesItem(old_value, new_value) + >>> changes.changed + True + + >>> changes["key_2"]["key_sub_1"].new_value is None + True + + >>> list(sorted(changes.changed_keys)) + ['key_2', 'key_3'] + + >>> changes["key_2"]["key_sub_2"]["enabled"].changed + True + + >>> changes["key_2"].removed_keys + {'key_sub_1'} + + >>> list(sorted(changes["key_2"].available_keys)) + ['key_sub_1', 'key_sub_2', 'key_sub_3'] + + >>> changes.new_value == new_value + True + + # Get only changed values + only_changed_new_values = { + key: changes[key].new_value + for key in changes.changed_keys + } + ``` + + Args: + old_value (Any): Old value. + new_value (Any): New value. + """ + + def __init__(self, old_value, new_value): + self._changed = old_value != new_value + # Resolve if value is '_EMPTY_VALUE' after comparison of the values + if old_value is _EMPTY_VALUE: + old_value = None + if new_value is _EMPTY_VALUE: + new_value = None + self._old_value = copy.deepcopy(old_value) + self._new_value = copy.deepcopy(new_value) + + self._old_is_dict = isinstance(old_value, dict) + self._new_is_dict = isinstance(new_value, dict) + + self._old_keys = None + self._new_keys = None + self._available_keys = None + self._removed_keys = None + + self._changed_keys = None + + self._sub_items = None + + def __getitem__(self, key): + """Getter looks into subitems if object is dictionary.""" + + if self._sub_items is None: + self._prepare_sub_items() + return self._sub_items[key] + + def __bool__(self): + """Boolean of object is if old and new value are the same.""" + + return self._changed + + def get(self, key, default=None): + """Try to get sub item.""" + + if self._sub_items is None: + self._prepare_sub_items() + return self._sub_items.get(key, default) + + @property + def old_value(self): + """Get copy of old value. + + Returns: + Any: Whatever old value was. + """ + + return copy.deepcopy(self._old_value) + + @property + def new_value(self): + """Get copy of new value. + + Returns: + Any: Whatever new value was. + """ + + return copy.deepcopy(self._new_value) + + @property + def changed(self): + """Value changed. + + Returns: + bool: If data changed. + """ + + return self._changed + + @property + def is_dict(self): + """Object can be used as dictionary. + + Returns: + bool: When can be used that way. + """ + + return self._old_is_dict or self._new_is_dict + + @property + def changes(self): + """Get changes in raw data. + + This method should be used only if 'is_dict' value is 'True'. + + Returns: + Dict[str, Tuple[Any, Any]]: Changes are by key in tuple + (, ). If 'is_dict' is 'False' then + output is always empty dictionary. + """ + + output = {} + if not self.is_dict: + return output + + old_value = self.old_value + new_value = self.new_value + for key in self.changed_keys: + _old = None + _new = None + if self._old_is_dict: + _old = old_value.get(key) + if self._new_is_dict: + _new = new_value.get(key) + output[key] = (_old, _new) + return output + + # Methods/properties that can be used when 'is_dict' is 'True' + @property + def old_keys(self): + """Keys from old value. + + Empty set is returned if old value is not a dict. + + Returns: + Set[str]: Keys from old value. + """ + + if self._old_keys is None: + self._prepare_keys() + return set(self._old_keys) + + @property + def new_keys(self): + """Keys from new value. + + Empty set is returned if old value is not a dict. + + Returns: + Set[str]: Keys from new value. + """ + + if self._new_keys is None: + self._prepare_keys() + return set(self._new_keys) + + @property + def changed_keys(self): + """Keys that has changed from old to new value. + + Empty set is returned if both old and new value are not a dict. + + Returns: + Set[str]: Keys of changed keys. + """ + + if self._changed_keys is None: + self._prepare_sub_items() + return set(self._changed_keys) + + @property + def available_keys(self): + """All keys that are available in old and new value. + + Empty set is returned if both old and new value are not a dict. + Output is Union of 'old_keys' and 'new_keys'. + + Returns: + Set[str]: All keys from old and new value. + """ + + if self._available_keys is None: + self._prepare_keys() + return set(self._available_keys) + + @property + def removed_keys(self): + """Key that are not available in new value but were in old value. + + Returns: + Set[str]: All removed keys. + """ + + if self._removed_keys is None: + self._prepare_sub_items() + return set(self._removed_keys) + + def _prepare_keys(self): + old_keys = set() + new_keys = set() + if self._old_is_dict and self._new_is_dict: + old_keys = set(self._old_value.keys()) + new_keys = set(self._new_value.keys()) + + elif self._old_is_dict: + old_keys = set(self._old_value.keys()) + + elif self._new_is_dict: + new_keys = set(self._new_value.keys()) + + self._old_keys = old_keys + self._new_keys = new_keys + self._available_keys = old_keys | new_keys + self._removed_keys = old_keys - new_keys + + def _prepare_sub_items(self): + sub_items = {} + changed_keys = set() + + old_keys = self.old_keys + new_keys = self.new_keys + new_value = self.new_value + old_value = self.old_value + if self._old_is_dict and self._new_is_dict: + for key in self.available_keys: + item = TrackChangesItem( + old_value.get(key), new_value.get(key) + ) + sub_items[key] = item + if item.changed or key not in old_keys or key not in new_keys: + changed_keys.add(key) + + elif self._old_is_dict: + old_keys = set(old_value.keys()) + available_keys = set(old_keys) + changed_keys = set(available_keys) + for key in available_keys: + # NOTE Use '_EMPTY_VALUE' because old value could be 'None' + # which would result in "unchanged" item + sub_items[key] = TrackChangesItem( + old_value.get(key), _EMPTY_VALUE + ) + + elif self._new_is_dict: + new_keys = set(new_value.keys()) + available_keys = set(new_keys) + changed_keys = set(available_keys) + for key in available_keys: + # NOTE Use '_EMPTY_VALUE' because new value could be 'None' + # which would result in "unchanged" item + sub_items[key] = TrackChangesItem( + _EMPTY_VALUE, new_value.get(key) + ) + + self._sub_items = sub_items + self._changed_keys = changed_keys + + class InstanceMember: """Representation of instance member. TODO: Implement and use! """ + def __init__(self, instance, name): self.instance = instance @@ -72,7 +519,7 @@ class InstanceMember: }) -class AttributeValues: +class AttributeValues(object): """Container which keep values of Attribute definitions. Goal is to have one object which hold values of attribute definitions for @@ -81,13 +528,12 @@ class AttributeValues: Has dictionary like methods. Not all of them are allowed all the time. Args: - attr_defs(AbtractAttrDef): Defintions of value type and properties. + attr_defs(AbstractAttrDef): Defintions of value type and properties. values(dict): Values after possible conversion. origin_data(dict): Values loaded from host before conversion. """ - def __init__(self, attr_defs, values, origin_data=None): - from openpype.lib.attribute_definitions import UnknownDef + def __init__(self, attr_defs, values, origin_data=None): if origin_data is None: origin_data = copy.deepcopy(values) self._origin_data = origin_data @@ -153,32 +599,49 @@ class AttributeValues: return self._data.pop(key, default) def reset_values(self): - self._data = [] + self._data = {} + + def mark_as_stored(self): + self._origin_data = copy.deepcopy(self._data) @property def attr_defs(self): - """Pointer to attribute definitions.""" - return self._attr_defs + """Pointer to attribute definitions. + + Returns: + List[AbstractAttrDef]: Attribute definitions. + """ + + return list(self._attr_defs) + + @property + def origin_data(self): + return copy.deepcopy(self._origin_data) def data_to_store(self): - """Create new dictionary with data to store.""" + """Create new dictionary with data to store. + + Returns: + Dict[str, Any]: Attribute values that should be stored. + """ + output = {} for key in self._data: output[key] = self[key] + + for key, attr_def in self._attr_defs_by_key.items(): + if key not in output: + output[key] = attr_def.default return output - @staticmethod - def calculate_changes(new_data, old_data): - """Calculate changes of 2 dictionary objects.""" - changes = {} - for key, new_value in new_data.items(): - old_value = old_data.get(key) - if old_value != new_value: - changes[key] = (old_value, new_value) - return changes + def get_serialized_attr_defs(self): + """Serialize attribute definitions to json serializable types. - def changes(self): - return self.calculate_changes(self._data, self._origin_data) + Returns: + List[Dict[str, Any]]: Serialized attribute definitions. + """ + + return serialize_attr_defs(self._attr_defs) class CreatorAttributeValues(AttributeValues): @@ -187,6 +650,7 @@ class CreatorAttributeValues(AttributeValues): Args: instance (CreatedInstance): Instance for which are values hold. """ + def __init__(self, instance, *args, **kwargs): self.instance = instance super(CreatorAttributeValues, self).__init__(*args, **kwargs) @@ -202,6 +666,7 @@ class PublishAttributeValues(AttributeValues): publish_attributes(PublishAttributes): Wrapper for multiple publish attributes is used as parent object. """ + def __init__(self, publish_attributes, *args, **kwargs): self.publish_attributes = publish_attributes super(PublishAttributeValues, self).__init__(*args, **kwargs) @@ -215,14 +680,16 @@ class PublishAttributes: """Wrapper for publish plugin attribute definitions. Cares about handling attribute definitions of multiple publish plugins. + Keep information about attribute definitions and their values. Args: parent(CreatedInstance, CreateContext): Parent for which will be data stored and from which are data loaded. origin_data(dict): Loaded data by plugin class name. - attr_plugins(list): List of publish plugins that may have defined - attribute definitions. + attr_plugins(Union[List[pyblish.api.Plugin], None]): List of publish + plugins that may have defined attribute definitions. """ + def __init__(self, parent, origin_data, attr_plugins=None): self.parent = parent self._origin_data = copy.deepcopy(origin_data) @@ -261,6 +728,7 @@ class PublishAttributes: key(str): Plugin name. default: Default value if plugin was not found. """ + if key not in self._data: return default @@ -278,33 +746,28 @@ class PublishAttributes: def plugin_names_order(self): """Plugin names order by their 'order' attribute.""" + for name in self._plugin_names_order: yield name + def mark_as_stored(self): + self._origin_data = copy.deepcopy(self._data) + def data_to_store(self): """Convert attribute values to "data to store".""" + output = {} for key, attr_value in self._data.items(): output[key] = attr_value.data_to_store() return output - def changes(self): - """Return changes per each key.""" - changes = {} - for key, attr_val in self._data.items(): - attr_changes = attr_val.changes() - if attr_changes: - if key not in changes: - changes[key] = {} - changes[key].update(attr_val) - - for key, value in self._origin_data.items(): - if key not in self._data: - changes[key] = (value, None) - return changes + @property + def origin_data(self): + return copy.deepcopy(self._origin_data) def set_publish_plugins(self, attr_plugins): """Set publish plugins attribute definitions.""" + self._plugin_names_order = [] self._missing_plugins = [] self.attr_plugins = attr_plugins or [] @@ -338,6 +801,42 @@ class PublishAttributes: self, [], value, value ) + def serialize_attributes(self): + return { + "attr_defs": { + plugin_name: attrs_value.get_serialized_attr_defs() + for plugin_name, attrs_value in self._data.items() + }, + "plugin_names_order": self._plugin_names_order, + "missing_plugins": self._missing_plugins + } + + def deserialize_attributes(self, data): + self._plugin_names_order = data["plugin_names_order"] + self._missing_plugins = data["missing_plugins"] + + attr_defs = deserialize_attr_defs(data["attr_defs"]) + + origin_data = self._origin_data + data = self._data + self._data = {} + + added_keys = set() + for plugin_name, attr_defs_data in attr_defs.items(): + attr_defs = deserialize_attr_defs(attr_defs_data) + value = data.get(plugin_name) or {} + orig_value = copy.deepcopy(origin_data.get(plugin_name) or {}) + self._data[plugin_name] = PublishAttributeValues( + self, attr_defs, value, orig_value + ) + + for key, value in data.items(): + if key not in added_keys: + self._missing_plugins.append(key) + self._data[key] = PublishAttributeValues( + self, [], value, value + ) + class CreatedInstance: """Instance entity with data that will be stored to workfile. @@ -346,16 +845,24 @@ class CreatedInstance: about instance like "asset" and "task" and all data used for filling subset name as creators may have custom data for subset name filling. + Notes: + Object have 2 possible initialization. One using 'creator' object which + is recommended for api usage. Second by passing information about + creator. + Args: - family(str): Name of family that will be created. - subset_name(str): Name of subset that will be created. - data(dict): Data used for filling subset name or override data from - already existing instance. - creator(BaseCreator): Creator responsible for instance. - host(ModuleType): Host implementation loaded with - `avalon.api.registered_host`. - new(bool): Is instance new. + family (str): Name of family that will be created. + subset_name (str): Name of subset that will be created. + data (Dict[str, Any]): Data used for filling subset name or override + data from already existing instance. + creator (Union[BaseCreator, None]): Creator responsible for instance. + creator_identifier (str): Identifier of creator plugin. + creator_label (str): Creator plugin label. + group_label (str): Default group label from creator plugin. + creator_attr_defs (List[AbstractAttrDef]): Attribute definitions from + creator. """ + # Keys that can't be changed or removed from data after loading using # creator. # - 'creator_attributes' and 'publish_attributes' can change values of @@ -370,13 +877,32 @@ class CreatedInstance: ) def __init__( - self, family, subset_name, data, creator, new=True + self, + family, + subset_name, + data, + creator=None, + creator_identifier=None, + creator_label=None, + group_label=None, + creator_attr_defs=None, ): - self.creator = creator + if creator is not None: + creator_identifier = creator.identifier + group_label = creator.get_group_label() + creator_label = creator.label + creator_attr_defs = creator.get_instance_attr_defs() + + self._creator_label = creator_label + self._group_label = group_label or creator_identifier # Instance members may have actions on them + # TODO implement members logic self._members = [] + # Data that can be used for lifetime of object + self._transient_data = {} + # Create a copy of passed data to avoid changing them on the fly data = copy.deepcopy(data or {}) # Store original value of passed data @@ -398,7 +924,7 @@ class CreatedInstance: self._data["family"] = family self._data["subset"] = subset_name self._data["active"] = data.get("active", True) - self._data["creator_identifier"] = creator.identifier + self._data["creator_identifier"] = creator_identifier # Pop from source data all keys that are defined in `_data` before # this moment and through their values away @@ -408,13 +934,16 @@ class CreatedInstance: if key in data: data.pop(key) + self._data["variant"] = self._data.get("variant") or "" # Stored creator specific attribute values # {key: value} creator_values = copy.deepcopy(orig_creator_attributes) - creator_attr_defs = creator.get_instance_attr_defs() self._data["creator_attributes"] = CreatorAttributeValues( - self, creator_attr_defs, creator_values, orig_creator_attributes + self, + list(creator_attr_defs), + creator_values, + orig_creator_attributes ) # Stored publish specific attribute values @@ -487,62 +1016,40 @@ class CreatedInstance: def subset_name(self): return self._data["subset"] + @property + def label(self): + label = self._data.get("label") + if not label: + label = self.subset_name + return label + + @property + def group_label(self): + label = self._data.get("group") + if label: + return label + return self._group_label + + @property + def origin_data(self): + return copy.deepcopy(self._orig_data) + @property def creator_identifier(self): - return self.creator.identifier + return self._data["creator_identifier"] @property def creator_label(self): - return self.creator.label or self.creator_identifier - - @property - def create_context(self): - return self.creator.create_context - - @property - def host(self): - return self.create_context.host - - @property - def has_set_asset(self): - """Asset name is set in data.""" - return "asset" in self._data - - @property - def has_set_task(self): - """Task name is set in data.""" - return "task" in self._data - - @property - def has_valid_context(self): - """Context data are valid for publishing.""" - return self.has_valid_asset and self.has_valid_task - - @property - def has_valid_asset(self): - """Asset set in context exists in project.""" - if not self.has_set_asset: - return False - return self._asset_is_valid - - @property - def has_valid_task(self): - """Task set in context exists in project.""" - if not self.has_set_task: - return False - return self._task_is_valid - - def set_asset_invalid(self, invalid): - # TODO replace with `set_asset_name` - self._asset_is_valid = not invalid - - def set_task_invalid(self, invalid): - # TODO replace with `set_task_name` - self._task_is_valid = not invalid + return self._creator_label or self.creator_identifier @property def id(self): - """Instance identifier.""" + """Instance identifier. + + Returns: + str: UUID of instance. + """ + return self._data["instance_id"] @property @@ -550,34 +1057,57 @@ class CreatedInstance: """Legacy access to data. Access to data is needed to modify values. + + Returns: + CreatedInstance: Object can be used as dictionary but with + validations of immutable keys. """ + return self + @property + def transient_data(self): + """Data stored for lifetime of instance object. + + These data are not stored to scene and will be lost on object + deletion. + + Can be used to store objects. In some host implementations is not + possible to reference to object in scene with some unique identifier + (e.g. node in Fusion.). In that case it is handy to store the object + here. Should be used that way only if instance data are stored on the + node itself. + + Returns: + Dict[str, Any]: Dictionary object where you can store data related + to instance for lifetime of instance object. + """ + + return self._transient_data + def changes(self): """Calculate and return changes.""" - changes = {} - new_keys = set() - for key, new_value in self._data.items(): - new_keys.add(key) + + return TrackChangesItem(self._orig_data, self.data_to_store()) + + def mark_as_stored(self): + """Should be called when instance data are stored. + + Origin data are replaced by current data so changes are cleared. + """ + + orig_keys = set(self._orig_data.keys()) + for key, value in self._data.items(): + orig_keys.discard(key) if key in ("creator_attributes", "publish_attributes"): continue + self._orig_data[key] = copy.deepcopy(value) - old_value = self._orig_data.get(key) - if old_value != new_value: - changes[key] = (old_value, new_value) + for key in orig_keys: + self._orig_data.pop(key) - creator_attr_changes = self.creator_attributes.changes() - if creator_attr_changes: - changes["creator_attributes"] = creator_attr_changes - - publish_attr_changes = self.publish_attributes.changes() - if publish_attr_changes: - changes["publish_attributes"] = publish_attr_changes - - for key, old_value in self._orig_data.items(): - if key not in new_keys: - changes[key] = (old_value, None) - return changes + self.creator_attributes.mark_as_stored() + self.publish_attributes.mark_as_stored() @property def creator_attributes(self): @@ -585,6 +1115,12 @@ class CreatedInstance: @property def creator_attribute_defs(self): + """Attribute defintions defined by creator plugin. + + Returns: + List[AbstractAttrDef]: Attribute defitions. + """ + return self.creator_attributes.attr_defs @property @@ -592,6 +1128,18 @@ class CreatedInstance: return self._data["publish_attributes"] def data_to_store(self): + """Collect data that contain json parsable types. + + It is possible to recreate the instance using these data. + + Todos: + We probably don't need OrderedDict. When data are loaded they + are not ordered anymore. + + Returns: + OrderedDict: Ordered dictionary with instance data. + """ + output = collections.OrderedDict() for key, value in self._data.items(): if key in ("creator_attributes", "publish_attributes"): @@ -605,7 +1153,15 @@ class CreatedInstance: @classmethod def from_existing(cls, instance_data, creator): - """Convert instance data from workfile to CreatedInstance.""" + """Convert instance data from workfile to CreatedInstance. + + Args: + instance_data (Dict[str, Any]): Data in a structure ready for + 'CreatedInstance' object. + creator (Creator): Creator plugin which is creating the instance + of for which the instance belong. + """ + instance_data = copy.deepcopy(instance_data) family = instance_data.get("family", None) @@ -614,18 +1170,161 @@ class CreatedInstance: subset_name = instance_data.get("subset", None) return cls( - family, subset_name, instance_data, creator, new=False + family, subset_name, instance_data, creator ) def set_publish_plugins(self, attr_plugins): + """Set publish plugins with attribute definitions. + + This method should be called only from 'CreateContext'. + + Args: + attr_plugins (List[pyblish.api.Plugin]): Pyblish plugins which + inherit from 'OpenPypePyblishPluginMixin' and may contain + attribute definitions. + """ + self.publish_attributes.set_publish_plugins(attr_plugins) def add_members(self, members): """Currently unused method.""" + for member in members: if member not in self._members: self._members.append(member) + def serialize_for_remote(self): + """Serialize object into data to be possible recreated object. + + Returns: + Dict[str, Any]: Serialized data. + """ + + creator_attr_defs = self.creator_attributes.get_serialized_attr_defs() + publish_attributes = self.publish_attributes.serialize_attributes() + return { + "data": self.data_to_store(), + "orig_data": copy.deepcopy(self._orig_data), + "creator_attr_defs": creator_attr_defs, + "publish_attributes": publish_attributes, + "creator_label": self._creator_label, + "group_label": self._group_label, + } + + @classmethod + def deserialize_on_remote(cls, serialized_data): + """Convert instance data to CreatedInstance. + + This is fake instance in remote process e.g. in UI process. The creator + is not a full creator and should not be used for calling methods when + instance is created from this method (matters on implementation). + + Args: + serialized_data (Dict[str, Any]): Serialized data for remote + recreating. Should contain 'data' and 'orig_data'. + """ + + instance_data = copy.deepcopy(serialized_data["data"]) + creator_identifier = instance_data["creator_identifier"] + + family = instance_data["family"] + subset_name = instance_data.get("subset", None) + + creator_label = serialized_data["creator_label"] + group_label = serialized_data["group_label"] + creator_attr_defs = deserialize_attr_defs( + serialized_data["creator_attr_defs"] + ) + publish_attributes = serialized_data["publish_attributes"] + + obj = cls( + family, + subset_name, + instance_data, + creator_identifier=creator_identifier, + creator_label=creator_label, + group_label=group_label, + creator_attributes=creator_attr_defs + ) + obj._orig_data = serialized_data["orig_data"] + obj.publish_attributes.deserialize_attributes(publish_attributes) + + return obj + + # Context validation related methods/properties + @property + def has_set_asset(self): + """Asset name is set in data.""" + + return "asset" in self._data + + @property + def has_set_task(self): + """Task name is set in data.""" + + return "task" in self._data + + @property + def has_valid_context(self): + """Context data are valid for publishing.""" + + return self.has_valid_asset and self.has_valid_task + + @property + def has_valid_asset(self): + """Asset set in context exists in project.""" + + if not self.has_set_asset: + return False + return self._asset_is_valid + + @property + def has_valid_task(self): + """Task set in context exists in project.""" + + if not self.has_set_task: + return False + return self._task_is_valid + + def set_asset_invalid(self, invalid): + # TODO replace with `set_asset_name` + self._asset_is_valid = not invalid + + def set_task_invalid(self, invalid): + # TODO replace with `set_task_name` + self._task_is_valid = not invalid + + +class ConvertorItem(object): + """Item representing convertor plugin. + + Args: + identifier (str): Identifier of convertor. + label (str): Label which will be shown in UI. + """ + + def __init__(self, identifier, label): + self._id = str(uuid4()) + self.identifier = identifier + self.label = label + + @property + def id(self): + return self._id + + def to_data(self): + return { + "id": self.id, + "identifier": self.identifier, + "label": self.label + } + + @classmethod + def from_data(cls, data): + obj = cls(data["identifier"], data["label"]) + obj._id = data["id"] + return obj + class CreateContext: """Context of instance creation. @@ -633,36 +1332,22 @@ class CreateContext: Context itself also can store data related to whole creation (workfile). - those are mainly for Context publish plugins + Todos: + Don't use 'AvalonMongoDB'. It's used only to keep track about current + context which should be handled by host. + Args: host(ModuleType): Host implementation which handles implementation and global metadata. - dbcon(AvalonMongoDB): Connection to mongo with context (at least - project). headless(bool): Context is created out of UI (Current not used). reset(bool): Reset context on initialization. discover_publish_plugins(bool): Discover publish plugins during reset phase. """ - # Methods required in host implementaion to be able create instances - # or change context data. - required_methods = ( - "get_context_data", - "update_context_data" - ) def __init__( - self, host, dbcon=None, headless=False, reset=True, - discover_publish_plugins=True + self, host, headless=False, reset=True, discover_publish_plugins=True ): - # Create conncetion if is not passed - if dbcon is None: - import avalon.api - - session = avalon.api.session_data_from_environment(True) - dbcon = avalon.api.AvalonMongoDB(session) - dbcon.install() - - self.dbcon = dbcon self.host = host # Prepare attribute for logger (Created on demand in `log` property) @@ -686,6 +1371,10 @@ class CreateContext: " Missing methods: {}" ).format(joined_methods)) + self._current_project_name = None + self._current_asset_name = None + self._current_task_name = None + self._host_is_valid = host_is_valid # Currently unused variable self.headless = headless @@ -693,6 +1382,8 @@ class CreateContext: # Instances by their ID self._instances_by_id = {} + self.creator_discover_result = None + self.convertor_discover_result = None # Discovered creators self.creators = {} # Prepare categories of creators @@ -700,7 +1391,11 @@ class CreateContext: # Manual creators self.manual_creators = {} + self.convertors_plugins = {} + self.convertor_items_by_id = {} + self.publish_discover_result = None + self.publish_plugins_mismatch_targets = [] self.publish_plugins = [] self.plugins_with_defs = [] self._attr_plugins_by_family = {} @@ -712,6 +1407,11 @@ class CreateContext: self._bulk_counter = 0 self._bulk_instances_to_process = [] + # Shared data across creators during collection phase + self._collection_shared_data = None + + self.thumbnail_paths_by_instance_id = {} + # Trigger reset if was enabled if reset: self.reset(discover_publish_plugins) @@ -720,11 +1420,62 @@ class CreateContext: def instances(self): return self._instances_by_id.values() + @property + def instances_by_id(self): + return self._instances_by_id + @property def publish_attributes(self): """Access to global publish attributes.""" return self._publish_attributes + def get_sorted_creators(self, identifiers=None): + """Sorted creators by 'order' attribute. + + Args: + identifiers (Iterable[str]): Filter creators by identifiers. All + creators are returned if 'None' is passed. + + Returns: + List[BaseCreator]: Sorted creator plugins by 'order' value. + """ + + if identifiers is not None: + identifiers = set(identifiers) + creators = [ + creator + for identifier, creator in self.creators.items() + if identifier in identifiers + ] + else: + creators = self.creators.values() + + return sorted( + creators, key=lambda creator: creator.order + ) + + @property + def sorted_creators(self): + """Sorted creators by 'order' attribute. + + Returns: + List[BaseCreator]: Sorted creator plugins by 'order' value. + """ + + return self.get_sorted_creators() + + @property + def sorted_autocreators(self): + """Sorted auto-creators by 'order' attribute. + + Returns: + List[AutoCreator]: Sorted plugins by 'order' value. + """ + + return sorted( + self.autocreators.values(), key=lambda creator: creator.order + ) + @classmethod def get_host_misssing_methods(cls, host): """Collect missing methods from host. @@ -732,10 +1483,10 @@ class CreateContext: Args: host(ModuleType): Host implementaion. """ - missing = set() - for attr_name in cls.required_methods: - if not hasattr(host, attr_name): - missing.add(attr_name) + + missing = set( + IPublishHost.get_missing_publish_methods(host) + ) return missing @property @@ -743,6 +1494,23 @@ class CreateContext: """Is host valid for creation.""" return self._host_is_valid + @property + def host_name(self): + if hasattr(self.host, "name"): + return self.host.name + return os.environ["AVALON_APP"] + + def get_current_project_name(self): + return self._current_project_name + + def get_current_asset_name(self): + return self._current_asset_name + + def get_current_task_name(self): + return self._current_task_name + + project_name = property(get_current_project_name) + @property def log(self): """Dynamic access to logger.""" @@ -755,24 +1523,73 @@ class CreateContext: All changes will be lost if were not saved explicitely. """ - self.reset_avalon_context() + + self.reset_preparation() + + self.reset_current_context() self.reset_plugins(discover_publish_plugins) self.reset_context_data() with self.bulk_instances_collection(): self.reset_instances() + self.find_convertor_items() self.execute_autocreators() - def reset_avalon_context(self): - """Give ability to reset avalon context. + self.reset_finalization() + + def refresh_thumbnails(self): + """Cleanup thumbnail paths. + + Remove all thumbnail filepaths that are empty or lead to files which + does not exists or of instances that are not available anymore. + """ + + invalid = set() + for instance_id, path in self.thumbnail_paths_by_instance_id.items(): + instance_available = True + if instance_id is not None: + instance_available = instance_id in self._instances_by_id + + if ( + not instance_available + or not path + or not os.path.exists(path) + ): + invalid.add(instance_id) + + for instance_id in invalid: + self.thumbnail_paths_by_instance_id.pop(instance_id) + + def reset_preparation(self): + """Prepare attributes that must be prepared/cleaned before reset.""" + + # Give ability to store shared data for collection phase + self._collection_shared_data = {} + + def reset_finalization(self): + """Cleanup of attributes after reset.""" + + # Stop access to collection shared data + self._collection_shared_data = None + self.refresh_thumbnails() + + def reset_current_context(self): + """Refresh current context. Reset is based on optional host implementation of `get_current_context` - function or using `avalon.api.Session`. + function or using `legacy_io.Session`. Some hosts have ability to change context file without using workfiles - tool but that change is not propagated to + tool but that change is not propagated to 'legacy_io.Session' + nor 'os.environ'. + + Todos: + UI: Current context should be also checked on save - compare + initial values vs. current values. + Related to UI checks: Current workfile can be also considered + as current context information as that's where the metadata + are stored. We should store the workfile (if is available) too. """ - import avalon.api project_name = asset_name = task_name = None if hasattr(self.host, "get_current_context"): @@ -783,18 +1600,15 @@ class CreateContext: task_name = host_context.get("task_name") if not project_name: - project_name = avalon.api.Session.get("AVALON_PROJECT") + project_name = legacy_io.Session.get("AVALON_PROJECT") if not asset_name: - asset_name = avalon.api.Session.get("AVALON_ASSET") + asset_name = legacy_io.Session.get("AVALON_ASSET") if not task_name: - task_name = avalon.api.Session.get("AVALON_TASK") + task_name = legacy_io.Session.get("AVALON_TASK") - if project_name: - self.dbcon.Session["AVALON_PROJECT"] = project_name - if asset_name: - self.dbcon.Session["AVALON_ASSET"] = asset_name - if task_name: - self.dbcon.Session["AVALON_TASK"] = task_name + self._current_project_name = project_name + self._current_asset_name = asset_name + self._current_task_name = task_name def reset_plugins(self, discover_publish_plugins=True): """Reload plugins. @@ -802,48 +1616,62 @@ class CreateContext: Reloads creators from preregistered paths and can load publish plugins if it's enabled on context. """ - import avalon.api - import pyblish.logic + self._reset_publish_plugins(discover_publish_plugins) + self._reset_creator_plugins() + self._reset_convertor_plugins() + + def _reset_publish_plugins(self, discover_publish_plugins): from openpype.pipeline import OpenPypePyblishPluginMixin from openpype.pipeline.publish import ( - publish_plugins_discover, - DiscoverResult + publish_plugins_discover ) # Reset publish plugins self._attr_plugins_by_family = {} - discover_result = DiscoverResult() + discover_result = DiscoverResult(pyblish.api.Plugin) plugins_with_defs = [] plugins_by_targets = [] + plugins_mismatch_targets = [] if discover_publish_plugins: discover_result = publish_plugins_discover() publish_plugins = discover_result.plugins - targets = pyblish.logic.registered_targets() or ["default"] + targets = set(pyblish.logic.registered_targets()) + targets.add("default") plugins_by_targets = pyblish.logic.plugins_by_targets( - publish_plugins, targets + publish_plugins, list(targets) ) + # Collect plugins that can have attribute definitions for plugin in publish_plugins: if OpenPypePyblishPluginMixin in inspect.getmro(plugin): plugins_with_defs.append(plugin) + plugins_mismatch_targets = [ + plugin + for plugin in publish_plugins + if plugin not in plugins_by_targets + ] + + self.publish_plugins_mismatch_targets = plugins_mismatch_targets self.publish_discover_result = discover_result self.publish_plugins = plugins_by_targets self.plugins_with_defs = plugins_with_defs + def _reset_creator_plugins(self): # Prepare settings - project_name = self.dbcon.Session["AVALON_PROJECT"] system_settings = get_system_settings() - project_settings = get_project_settings(project_name) + project_settings = get_project_settings(self.project_name) # Discover and prepare creators creators = {} autocreators = {} manual_creators = {} - for creator_class in avalon.api.discover(BaseCreator): + report = discover_creator_plugins(return_report=True) + self.creator_discover_result = report + for creator_class in report.plugins: if inspect.isabstract(creator_class): self.log.info( "Skipping abstract Creator {}".format(str(creator_class)) @@ -857,10 +1685,22 @@ class CreateContext: "Using first and skipping following" )) continue + + # Filter by host name + if ( + creator_class.host_name + and creator_class.host_name != self.host_name + ): + self.log.info(( + "Creator's host name \"{}\"" + " is not supported for current host \"{}\"" + ).format(creator_class.host_name, self.host_name)) + continue + creator = creator_class( - self, - system_settings, project_settings, + system_settings, + self, self.headless ) creators[creator_identifier] = creator @@ -874,6 +1714,29 @@ class CreateContext: self.creators = creators + def _reset_convertor_plugins(self): + convertors_plugins = {} + report = discover_convertor_plugins(return_report=True) + self.convertor_discover_result = report + for convertor_class in report.plugins: + if inspect.isabstract(convertor_class): + self.log.info( + "Skipping abstract Creator {}".format(str(convertor_class)) + ) + continue + + convertor_identifier = convertor_class.identifier + if convertor_identifier in convertors_plugins: + self.log.warning(( + "Duplicated Converter identifier. " + "Using first and skipping following" + )) + continue + + convertors_plugins[convertor_identifier] = convertor_class(self) + + self.convertors_plugins = convertors_plugins + def reset_context_data(self): """Reload context data using host implementation. @@ -906,11 +1769,10 @@ class CreateContext: def context_data_changes(self): """Changes of attributes.""" - changes = {} - publish_attribute_changes = self._publish_attributes.changes() - if publish_attribute_changes: - changes["publish_attributes"] = publish_attribute_changes - return changes + + return TrackChangesItem( + self._original_context_data, self.context_data_to_store() + ) def creator_adds_instance(self, instance): """Creator adds new instance to context. @@ -933,7 +1795,7 @@ class CreateContext: self._instances_by_id[instance.id] = instance # Prepare publish plugin attributes and set it on instance attr_plugins = self._get_publish_plugins_with_attr_for_family( - instance.creator.family + instance.family ) instance.set_publish_plugins(attr_plugins) @@ -942,9 +1804,189 @@ class CreateContext: with self.bulk_instances_collection(): self._bulk_instances_to_process.append(instance) - def creator_removed_instance(self, instance): + def _get_creator_in_create(self, identifier): + """Creator by identifier with unified error. + + Helper method to get creator by identifier with same error when creator + is not available. + + Args: + identifier (str): Identifier of creator plugin. + + Returns: + BaseCreator: Creator found by identifier. + + Raises: + CreatorError: When identifier is not known. + """ + + creator = self.creators.get(identifier) + # Fake CreatorError (Could be maybe specific exception?) + if creator is None: + raise CreatorError( + "Creator {} was not found".format(identifier) + ) + return creator + + def create( + self, + creator_identifier, + variant, + asset_doc=None, + task_name=None, + pre_create_data=None + ): + """Trigger create of plugins with standartized arguments. + + Arguments 'asset_doc' and 'task_name' use current context as default + values. If only 'task_name' is provided it will be overriden by + task name from current context. If 'task_name' is not provided + when 'asset_doc' is, it is considered that task name is not specified, + which can lead to error if subset name template requires task name. + + Args: + creator_identifier (str): Identifier of creator plugin. + variant (str): Variant used for subset name. + asset_doc (Dict[str, Any]): Asset document which define context of + creation (possible context of created instance/s). + task_name (str): Name of task to which is context related. + pre_create_data (Dict[str, Any]): Pre-create attribute values. + + Returns: + Any: Output of triggered creator's 'create' method. + + Raises: + CreatorError: If creator was not found or asset is empty. + """ + + creator = self._get_creator_in_create(creator_identifier) + + project_name = self.project_name + if asset_doc is None: + asset_name = self.get_current_asset_name() + asset_doc = get_asset_by_name(project_name, asset_name) + task_name = self.get_current_task_name() + if asset_doc is None: + raise CreatorError( + "Asset with name {} was not found".format(asset_name) + ) + + if pre_create_data is None: + pre_create_data = {} + + precreate_attr_defs = creator.get_pre_create_attr_defs() or [] + # Create default values of precreate data + _pre_create_data = get_default_values(precreate_attr_defs) + # Update passed precreate data to default values + # TODO validate types + _pre_create_data.update(pre_create_data) + + subset_name = creator.get_subset_name( + variant, + task_name, + asset_doc, + project_name, + self.host_name + ) + instance_data = { + "asset": asset_doc["name"], + "task": task_name, + "family": creator.family, + "variant": variant + } + return creator.create( + subset_name, + instance_data, + _pre_create_data + ) + + def _create_with_unified_error( + self, identifier, creator, *args, **kwargs + ): + error_message = "Failed to run Creator with identifier \"{}\". {}" + + label = None + add_traceback = False + result = None + fail_info = None + success = False + + try: + # Try to get creator and his label + if creator is None: + creator = self._get_creator_in_create(identifier) + label = getattr(creator, "label", label) + + # Run create + result = creator.create(*args, **kwargs) + success = True + + except CreatorError: + exc_info = sys.exc_info() + self.log.warning(error_message.format(identifier, exc_info[1])) + + except: + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), + exc_info=True + ) + + if not success: + fail_info = prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + return result, fail_info + + def create_with_unified_error(self, identifier, *args, **kwargs): + """Trigger create but raise only one error if anything fails. + + Added to raise unified exception. Capture any possible issues and + reraise it with unified information. + + Args: + identifier (str): Identifier of creator. + *args (Tuple[Any]): Arguments for create method. + **kwargs (Dict[Any, Any]): Keyword argument for create method. + + Raises: + CreatorsCreateFailed: When creation fails due to any possible + reason. If anything goes wrong this is only possible exception + the method should raise. + """ + + result, fail_info = self._create_with_unified_error( + identifier, None, *args, **kwargs + ) + if fail_info is not None: + raise CreatorsCreateFailed([fail_info]) + return result + + def _remove_instance(self, instance): self._instances_by_id.pop(instance.id, None) + def creator_removed_instance(self, instance): + """When creator removes instance context should be acknowledged. + + If creator removes instance conext should know about it to avoid + possible issues in the session. + + Args: + instance (CreatedInstance): Object of instance which was removed + from scene metadata. + """ + + self._remove_instance(instance) + + def add_convertor_item(self, convertor_identifier, label): + self.convertor_items_by_id[convertor_identifier] = ConvertorItem( + convertor_identifier, label + ) + + def remove_convertor_item(self, convertor_identifier): + self.convertor_items_by_id.pop(convertor_identifier, None) + @contextmanager def bulk_instances_collection(self): """Validate context of instances in bulk. @@ -977,24 +2019,87 @@ class CreateContext: self._instances_by_id = {} # Collect instances - for creator in self.creators.values(): - creator.collect_instances() + error_message = "Collection of instances for creator {} failed. {}" + failed_info = [] + for creator in self.sorted_creators: + label = creator.label + identifier = creator.identifier + failed = False + add_traceback = False + exc_info = None + try: + creator.collect_instances() + + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning(error_message.format(identifier, exc_info[1])) + + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), + exc_info=True + ) + + if failed: + failed_info.append( + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ) + + if failed_info: + raise CreatorsCollectionFailed(failed_info) + + def find_convertor_items(self): + """Go through convertor plugins to look for items to convert. + + Raises: + ConvertorsFindFailed: When one or more convertors fails during + finding. + """ + + self.convertor_items_by_id = {} + + failed_info = [] + for convertor in self.convertors_plugins.values(): + try: + convertor.find_instances() + + except: + failed_info.append( + prepare_failed_convertor_operation_info( + convertor.identifier, sys.exc_info() + ) + ) + self.log.warning( + "Failed to find instances of convertor \"{}\"".format( + convertor.identifier + ), + exc_info=True + ) + + if failed_info: + raise ConvertorsFindFailed(failed_info) def execute_autocreators(self): """Execute discovered AutoCreator plugins. Reset instances if any autocreator executed properly. """ - for identifier, creator in self.autocreators.items(): - try: - creator.create() - except Exception: - # TODO raise report exception if any crashed - msg = ( - "Failed to run AutoCreator with identifier \"{}\" ({})." - ).format(identifier, inspect.getfile(creator.__class__)) - self.log.warning(msg, exc_info=True) + failed_info = [] + for creator in self.sorted_autocreators: + identifier = creator.identifier + _, fail_info = self._create_with_unified_error(identifier, creator) + if fail_info is not None: + failed_info.append(fail_info) + + if failed_info: + raise CreatorsCreateFailed(failed_info) def validate_instances_context(self, instances=None): """Validate 'asset' and 'task' instance context.""" @@ -1020,15 +2125,10 @@ class CreateContext: for asset_name in task_names_by_asset_name.keys() if asset_name is not None ] - asset_docs = list(self.dbcon.find( - { - "type": "asset", - "name": {"$in": asset_names} - }, - { - "name": True, - "data.tasks": True - } + asset_docs = list(get_assets( + self.project_name, + asset_names=asset_names, + fields=["name", "data.tasks"] )) task_names_by_asset_name = {} @@ -1073,35 +2173,122 @@ class CreateContext: """Save instance specific values.""" instances_by_identifier = collections.defaultdict(list) for instance in self._instances_by_id.values(): + instance_changes = instance.changes() + if not instance_changes: + continue + identifier = instance.creator_identifier - instances_by_identifier[identifier].append(instance) + instances_by_identifier[identifier].append( + UpdateData(instance, instance_changes) + ) - for identifier, cretor_instances in instances_by_identifier.items(): - update_list = [] - for instance in cretor_instances: - instance_changes = instance.changes() - if instance_changes: - update_list.append((instance, instance_changes)) + if not instances_by_identifier: + return - creator = self.creators[identifier] - if update_list: + error_message = "Instances update of creator \"{}\" failed. {}" + failed_info = [] + + for creator in self.get_sorted_creators( + instances_by_identifier.keys() + ): + identifier = creator.identifier + update_list = instances_by_identifier[identifier] + if not update_list: + continue + + label = creator.label + failed = False + add_traceback = False + exc_info = None + try: creator.update_instances(update_list) + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning(error_message.format(identifier, exc_info[1])) + + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), exc_info=True) + + if failed: + failed_info.append( + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ) + + if failed_info: + raise CreatorsSaveFailed(failed_info) + def remove_instances(self, instances): """Remove instances from context. + All instances that don't have creator identifier leading to existing + creator are just removed from context. + Args: - instances(list): Instances that should be removed - from context. + instances(List[CreatedInstance]): Instances that should be removed. + Remove logic is done using creator, which may require to + do other cleanup than just remove instance from context. """ + instances_by_identifier = collections.defaultdict(list) for instance in instances: identifier = instance.creator_identifier instances_by_identifier[identifier].append(instance) - for identifier, creator_instances in instances_by_identifier.items(): - creator = self.creators.get(identifier) - creator.remove_instances(creator_instances) + # Just remove instances from context if creator is not available + missing_creators = set(instances_by_identifier) - set(self.creators) + for identifier in missing_creators: + for instance in instances_by_identifier[identifier]: + self._remove_instance(instance) + + error_message = "Instances removement of creator \"{}\" failed. {}" + failed_info = [] + # Remove instances by creator plugin order + for creator in self.get_sorted_creators( + instances_by_identifier.keys() + ): + identifier = creator.identifier + creator_instances = instances_by_identifier[identifier] + + label = creator.label + failed = False + add_traceback = False + exc_info = None + try: + creator.remove_instances(creator_instances) + + except CreatorError: + failed = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, exc_info[1]) + ) + + except: + failed = True + add_traceback = True + exc_info = sys.exc_info() + self.log.warning( + error_message.format(identifier, ""), + exc_info=True + ) + + if failed: + failed_info.append( + prepare_failed_creator_operation_info( + identifier, label, exc_info, add_traceback + ) + ) + + if failed_info: + raise CreatorsRemoveFailed(failed_info) def _get_publish_plugins_with_attr_for_family(self, family): """Publish plugin attributes for passed family. @@ -1112,6 +2299,7 @@ class CreateContext: family(str): Instance family for which should be attribute definitions returned. """ + if family not in self._attr_plugins_by_family: import pyblish.logic @@ -1127,9 +2315,80 @@ class CreateContext: return self._attr_plugins_by_family[family] def _get_publish_plugins_with_attr_for_context(self): - """Publish plugins attributes for Context plugins.""" + """Publish plugins attributes for Context plugins. + + Returns: + List[pyblish.api.Plugin]: Publish plugins that have attribute + definitions for context. + """ + plugins = [] for plugin in self.plugins_with_defs: if not plugin.__instanceEnabled__: plugins.append(plugin) return plugins + + @property + def collection_shared_data(self): + """Access to shared data that can be used during creator's collection. + + Retruns: + Dict[str, Any]: Shared data. + + Raises: + UnavailableSharedData: When called out of collection phase. + """ + + if self._collection_shared_data is None: + raise UnavailableSharedData( + "Accessed Collection shared data out of collection phase" + ) + return self._collection_shared_data + + def run_convertor(self, convertor_identifier): + """Run convertor plugin by identifier. + + Conversion is skipped if convertor is not available. + + Args: + convertor_identifier (str): Identifier of convertor. + """ + + convertor = self.convertors_plugins.get(convertor_identifier) + if convertor is not None: + convertor.convert() + + def run_convertors(self, convertor_identifiers): + """Run convertor plugins by identifiers. + + Conversion is skipped if convertor is not available. It is recommended + to trigger reset after conversion to reload instances. + + Args: + convertor_identifiers (Iterator[str]): Identifiers of convertors + to run. + + Raises: + ConvertorsConversionFailed: When one or more convertors fails. + """ + + failed_info = [] + for convertor_identifier in convertor_identifiers: + try: + self.run_convertor(convertor_identifier) + + except: + failed_info.append( + prepare_failed_convertor_operation_info( + convertor_identifier, sys.exc_info() + ) + ) + self.log.warning( + "Failed to convert instances of convertor \"{}\"".format( + convertor_identifier + ), + exc_info=True + ) + + if failed_info: + raise ConvertorsConversionFailed(failed_info) diff --git a/openpype/pipeline/create/creator_plugins.py b/openpype/pipeline/create/creator_plugins.py index 1ac2c420a2..628245faf2 100644 --- a/openpype/pipeline/create/creator_plugins.py +++ b/openpype/pipeline/create/creator_plugins.py @@ -1,14 +1,27 @@ +import os import copy -import logging +import collections from abc import ( ABCMeta, abstractmethod, abstractproperty ) + import six -from openpype.lib import get_subset_name_with_asset_doc +from openpype.settings import get_system_settings, get_project_settings +from openpype.lib import Logger +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) + +from .subset_name import get_subset_name +from .legacy_create import LegacyCreator class CreatorError(Exception): @@ -21,6 +34,115 @@ class CreatorError(Exception): super(CreatorError, self).__init__(message) +@six.add_metaclass(ABCMeta) +class SubsetConvertorPlugin(object): + """Helper for conversion of instances created using legacy creators. + + Conversion from legacy creators would mean to loose legacy instances, + convert them automatically or write a script which must user run. All of + these solutions are workign but will happen without asking or user must + know about them. This plugin can be used to show legacy instances in + Publisher and give user ability to run conversion script. + + Convertor logic should be very simple. Method 'find_instances' is to + look for legacy instances in scene a possibly call + pre-implemented 'add_convertor_item'. + + User will have ability to trigger conversion which is executed by calling + 'convert' which should call 'remove_convertor_item' when is done. + + It does make sense to add only one or none legacy item to create context + for convertor as it's not possible to choose which instace are converted + and which are not. + + Convertor can use 'collection_shared_data' property like creators. Also + can store any information to it's object for conversion purposes. + + Args: + create_context + """ + + _log = None + + def __init__(self, create_context): + self._create_context = create_context + + @property + def log(self): + """Logger of the plugin. + + Returns: + logging.Logger: Logger with name of the plugin. + """ + + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @abstractproperty + def identifier(self): + """Converted identifier. + + Returns: + str: Converted identifier unique for all converters in host. + """ + + pass + + @abstractmethod + def find_instances(self): + """Look for legacy instances in the scene. + + Should call 'add_convertor_item' if there is at least one instance to + convert. + """ + + pass + + @abstractmethod + def convert(self): + """Conversion code.""" + + pass + + @property + def create_context(self): + """Quick access to create context. + + Returns: + CreateContext: Context which initialized the plugin. + """ + + return self._create_context + + @property + def collection_shared_data(self): + """Access to shared data that can be used during 'find_instances'. + + Retruns: + Dict[str, Any]: Shared data. + + Raises: + UnavailableSharedData: When called out of collection phase. + """ + + return self._create_context.collection_shared_data + + def add_convertor_item(self, label): + """Add item to CreateContext. + + Args: + label (str): Label of item which will show in UI. + """ + + self._create_context.add_convertor_item(self.identifier, label) + + def remove_convertor_item(self): + """Remove legacy item from create context when conversion finished.""" + + self._create_context.remove_convertor_item(self.identifier) + + @six.add_metaclass(ABCMeta) class BaseCreator: """Plugin that create and modify instance data before publishing process. @@ -31,10 +153,23 @@ class BaseCreator: Single object should be used for multiple instances instead of single instance per one creator object. Do not store temp data or mid-process data to `self` if it's not Plugin specific. + + Args: + project_settings (Dict[str, Any]): Project settings. + system_settings (Dict[str, Any]): System settings. + create_context (CreateContext): Context which initialized creator. + headless (bool): Running in headless mode. """ # Label shown in UI label = None + group_label = None + # Cached group label after first call 'get_group_label' + _cached_group_label = None + + # Order in which will be plugin executed (collect & update instances) + # less == earlier -> Order '90' will be processed before '100' + order = 100 # Variable to store logger _log = None @@ -46,37 +181,118 @@ class BaseCreator: # - may not be used if `get_icon` is reimplemented icon = None + # Instance attribute definitions that can be changed per instance + # - returns list of attribute definitions from + # `openpype.pipeline.attribute_definitions` + instance_attr_defs = [] + + # Filtering by host name - can be used to be filtered by host name + # - used on all hosts when set to 'None' for Backwards compatibility + # - was added afterwards + # QUESTION make this required? + host_name = None + def __init__( - self, create_context, system_settings, project_settings, headless=False + self, project_settings, system_settings, create_context, headless=False ): # Reference to CreateContext self.create_context = create_context + self.project_settings = project_settings # Creator is running in headless mode (without UI elemets) # - we may use UI inside processing this attribute should be checked self.headless = headless - @abstractproperty - def identifier(self): - """Identifier of creator (must be unique).""" + self.apply_settings(project_settings, system_settings) + + def apply_settings(self, project_settings, system_settings): + """Method called on initialization of plugin to apply settings.""" + pass + @property + def identifier(self): + """Identifier of creator (must be unique). + + Default implementation returns plugin's family. + """ + + return self.family + @abstractproperty def family(self): """Family that plugin represents.""" + pass + @property + def project_name(self): + """Family that plugin represents.""" + + return self.create_context.project_name + + @property + def host(self): + return self.create_context.host + + def get_group_label(self): + """Group label under which are instances grouped in UI. + + Default implementation use attributes in this order: + - 'group_label' -> 'label' -> 'identifier' + Keep in mind that 'identifier' use 'family' by default. + + Returns: + str: Group label that can be used for grouping of instances in UI. + Group label can be overriden by instance itself. + """ + + if self._cached_group_label is None: + label = self.identifier + if self.group_label: + label = self.group_label + elif self.label: + label = self.label + self._cached_group_label = label + return self._cached_group_label + @property def log(self): + """Logger of the plugin. + + Returns: + logging.Logger: Logger with name of the plugin. + """ + if self._log is None: - self._log = logging.getLogger(self.__class__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log def _add_instance_to_context(self, instance): - """Helper method to ad d""" + """Helper method to add instance to create context. + + Instances should be stored to DCC workfile metadata to be able reload + them and also stored to CreateContext in which is creator plugin + existing at the moment to be able use it without refresh of + CreateContext. + + Args: + instance (CreatedInstance): New created instance. + """ + self.create_context.creator_adds_instance(instance) def _remove_instance_from_context(self, instance): + """Helper method to remove instance from create context. + + Instances must be removed from DCC workfile metadat aand from create + context in which plugin is existing at the moment of removement to + propagate the change without restarting create context. + + Args: + instance (CreatedInstance): Instance which should be removed. + """ + self.create_context.creator_removed_instance(instance) @abstractmethod @@ -87,14 +303,45 @@ class BaseCreator: - must expect all data that were passed to init in previous implementation """ + pass @abstractmethod - def collect_instances(self, attr_plugins=None): + def collect_instances(self): + """Collect existing instances related to this creator plugin. + + The implementation differs on host abilities. The creator has to + collect metadata about instance and create 'CreatedInstance' object + which should be added to 'CreateContext'. + + Example: + ```python + def collect_instances(self): + # Getting existing instances is different per host implementation + for instance_data in pipeline.list_instances(): + # Process only instances that were created by this creator + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data, self + ) + # Add instance to create context + self._add_instance_to_context(instance) + ``` + """ + pass @abstractmethod def update_instances(self, update_list): + """Store changes of existing instances so they can be recollected. + + Args: + update_list(List[UpdateData]): Gets list of tuples. Each item + contain changed instance and it's changes. + """ + pass @abstractmethod @@ -105,9 +352,10 @@ class BaseCreator: 'True' if did so. Args: - instance(list): Instance objects which should be + instance(List[CreatedInstance]): Instance objects which should be removed. """ + pass def get_icon(self): @@ -115,20 +363,28 @@ class BaseCreator: Can return path to image file or awesome icon name. """ + return self.icon def get_dynamic_data( - self, variant, task_name, asset_doc, project_name, host_name + self, variant, task_name, asset_doc, project_name, host_name, instance ): """Dynamic data for subset name filling. These may be get dynamically created based on current context of workfile. """ + return {} def get_subset_name( - self, variant, task_name, asset_doc, project_name, host_name=None + self, + variant, + task_name, + asset_doc, + project_name, + host_name=None, + instance=None ): """Return subset name for passed context. @@ -142,25 +398,33 @@ class BaseCreator: Asset document is not used yet but is required if would like to use task type in subset templates. + Method is also called on subset name update. In that case origin + instance is passed in. + Args: variant(str): Subset name variant. In most of cases user input. task_name(str): For which task subset is created. asset_doc(dict): Asset document for which subset is created. project_name(str): Project name. host_name(str): Which host creates subset. + instance(CreatedInstance|None): Object of 'CreatedInstance' for + which is subset name updated. Passed only on subset name + update. """ + dynamic_data = self.get_dynamic_data( - variant, task_name, asset_doc, project_name, host_name + variant, task_name, asset_doc, project_name, host_name, instance ) - return get_subset_name_with_asset_doc( + return get_subset_name( self.family, variant, task_name, asset_doc, project_name, host_name, - dynamic_data=dynamic_data + dynamic_data=dynamic_data, + project_settings=self.project_settings ) def get_instance_attr_defs(self): @@ -175,10 +439,31 @@ class BaseCreator: keys/values when plugin attributes change. Returns: - list: Attribute definitions that can be tweaked for - created instance. + List[AbstractAttrDef]: Attribute definitions that can be tweaked + for created instance. """ - return [] + + return self.instance_attr_defs + + @property + def collection_shared_data(self): + """Access to shared data that can be used during creator's collection. + + Retruns: + Dict[str, Any]: Shared data. + + Raises: + UnavailableSharedData: When called out of collection phase. + """ + + return self.create_context.collection_shared_data + + def set_instance_thumbnail_path(self, instance_id, thumbnail_path=None): + """Set path to thumbnail for instance.""" + + self.create_context.thumbnail_paths_by_instance_id[instance_id] = ( + thumbnail_path + ) class Creator(BaseCreator): @@ -191,6 +476,9 @@ class Creator(BaseCreator): # - default_variants may not be used if `get_default_variants` is overriden default_variants = [] + # Default variant used in 'get_default_variant' + default_variant = None + # Short description of family # - may not be used if `get_description` is overriden description = None @@ -203,6 +491,28 @@ class Creator(BaseCreator): # - in some cases it may confuse artists because it would not be used # e.g. for buld creators create_allow_context_change = True + # A thumbnail can be passed in precreate attributes + # - if is set to True is should expect that a thumbnail path under key + # PRE_CREATE_THUMBNAIL_KEY can be sent in data with precreate data + # - is disabled by default because the feature was added in later stages + # and creators who would not expect PRE_CREATE_THUMBNAIL_KEY could + # cause issues with instance data + create_allow_thumbnail = False + + # Precreate attribute definitions showed before creation + # - similar to instance attribute definitions + pre_create_attr_defs = [] + + @property + def show_order(self): + """Order in which is creator shown in UI. + + Returns: + int: Order in which is creator shown (less == earlier). By default + is using Creator's 'order' or processing. + """ + + return self.order @abstractmethod def create(self, subset_name, instance_data, pre_create_data): @@ -228,6 +538,7 @@ class Creator(BaseCreator): Returns: str: Short description of family. """ + return self.description def get_detail_description(self): @@ -238,6 +549,7 @@ class Creator(BaseCreator): Returns: str: Detailed description of family for artist. """ + return self.detailed_description def get_default_variants(self): @@ -249,8 +561,9 @@ class Creator(BaseCreator): By default returns `default_variants` value. Returns: - list: Whisper variants for user input. + List[str]: Whisper variants for user input. """ + return copy.deepcopy(self.default_variants) def get_default_variant(self): @@ -263,20 +576,28 @@ class Creator(BaseCreator): `get_default_variants` should be used. """ - return None + return self.default_variant def get_pre_create_attr_defs(self): """Plugin attribute definitions needed for creation. Attribute definitions of plugin that define how creation will work. Values of these definitions are passed to `create` method. - NOTE: - Convert method should be implemented which should care about updating - keys/values when plugin attributes change. + + Note: + Convert method should be implemented which should care about + updating keys/values when plugin attributes change. + Returns: - list: Attribute definitions that can be tweaked for - created instance. + List[AbstractAttrDef]: Attribute definitions that can be tweaked + for created instance. """ - return [] + return self.pre_create_attr_defs + + +class HiddenCreator(BaseCreator): + @abstractmethod + def create(self, instance_data, source_data): + pass class AutoCreator(BaseCreator): @@ -284,6 +605,130 @@ class AutoCreator(BaseCreator): Can be used e.g. for `workfile`. """ + def remove_instances(self, instances): """Skip removement.""" pass + + +def discover_creator_plugins(*args, **kwargs): + return discover(BaseCreator, *args, **kwargs) + + +def discover_convertor_plugins(*args, **kwargs): + return discover(SubsetConvertorPlugin, *args, **kwargs) + + +def discover_legacy_creator_plugins(): + from openpype.lib import Logger + + log = Logger.get_logger("CreatorDiscover") + + plugins = discover(LegacyCreator) + project_name = os.environ.get("AVALON_PROJECT") + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to creator {}".format( + plugin.__name__ + ), + exc_info=True + ) + return plugins + + +def get_legacy_creator_by_name(creator_name, case_sensitive=False): + """Find creator plugin by name. + + Args: + creator_name (str): Name of creator class that should be returned. + case_sensitive (bool): Match of creator plugin name is case sensitive. + Set to `False` by default. + + Returns: + Creator: Return first matching plugin or `None`. + """ + + # Lower input creator name if is not case sensitive + if not case_sensitive: + creator_name = creator_name.lower() + + for creator_plugin in discover_legacy_creator_plugins(): + _creator_name = creator_plugin.__name__ + + # Lower creator plugin name if is not case sensitive + if not case_sensitive: + _creator_name = _creator_name.lower() + + if _creator_name == creator_name: + return creator_plugin + return None + + +def register_creator_plugin(plugin): + if issubclass(plugin, BaseCreator): + register_plugin(BaseCreator, plugin) + + elif issubclass(plugin, LegacyCreator): + register_plugin(LegacyCreator, plugin) + + elif issubclass(plugin, SubsetConvertorPlugin): + register_plugin(SubsetConvertorPlugin, plugin) + + +def deregister_creator_plugin(plugin): + if issubclass(plugin, BaseCreator): + deregister_plugin(BaseCreator, plugin) + + elif issubclass(plugin, LegacyCreator): + deregister_plugin(LegacyCreator, plugin) + + elif issubclass(plugin, SubsetConvertorPlugin): + deregister_plugin(SubsetConvertorPlugin, plugin) + + +def register_creator_plugin_path(path): + register_plugin_path(BaseCreator, path) + register_plugin_path(LegacyCreator, path) + register_plugin_path(SubsetConvertorPlugin, path) + + +def deregister_creator_plugin_path(path): + deregister_plugin_path(BaseCreator, path) + deregister_plugin_path(LegacyCreator, path) + deregister_plugin_path(SubsetConvertorPlugin, path) + + +def cache_and_get_instances(creator, shared_key, list_instances_func): + """Common approach to cache instances in shared data. + + This is helper function which does not handle cases when a 'shared_key' is + used for different list instances functions. The same approach of caching + instances into 'collection_shared_data' is not required but is so common + we've decided to unify it to some degree. + + Function 'list_instances_func' is called only if 'shared_key' is not + available in 'collection_shared_data' on creator. + + Args: + creator (Creator): Plugin which would like to get instance data. + shared_key (str): Key under which output of function will be stored. + list_instances_func (Function): Function that will return instance data + if data were not yet stored under 'shared_key'. + + Returns: + Dict[str, Dict[str, Any]]: Cached instances by creator identifier from + result of passed function. + """ + + if shared_key not in creator.collection_shared_data: + value = collections.defaultdict(list) + for instance in list_instances_func(): + identifier = instance.get("creator_identifier") + value[identifier].append(instance) + creator.collection_shared_data[shared_key] = value + return creator.collection_shared_data[shared_key] diff --git a/openpype/pipeline/create/legacy_create.py b/openpype/pipeline/create/legacy_create.py index cf6629047e..7380e9f9c7 100644 --- a/openpype/pipeline/create/legacy_create.py +++ b/openpype/pipeline/create/legacy_create.py @@ -5,10 +5,13 @@ Renamed classes and functions - 'create' -> 'legacy_create' """ +import os import logging import collections -from openpype.lib import get_subset_name +from openpype.client import get_asset_by_id + +from .subset_name import get_subset_name class LegacyCreator(object): @@ -17,6 +20,7 @@ class LegacyCreator(object): family = None defaults = None maintain_selection = True + enabled = True dynamic_subset_keys = [] @@ -37,6 +41,47 @@ class LegacyCreator(object): self.data.update(data or {}) + @classmethod + def apply_settings(cls, project_settings, system_settings): + """Apply OpenPype settings to a plugin class.""" + + host_name = os.environ.get("AVALON_APP") + plugin_type = "create" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + print(" - is disabled by preset") + else: + print(" - setting `{}`: `{}`".format(option, value)) + setattr(cls, option, value) + def process(self): pass @@ -104,11 +149,15 @@ class LegacyCreator(object): variant, task_name, asset_id, project_name, host_name ) + asset_doc = get_asset_by_id( + project_name, asset_id, fields=["data.tasks"] + ) + return get_subset_name( cls.family, variant, task_name, - asset_id, + asset_doc, project_name, host_name, dynamic_data=dynamic_data @@ -142,7 +191,8 @@ def legacy_create(Creator, name, asset, options=None, data=None): Name of instance """ - from avalon.api import registered_host + from openpype.pipeline import registered_host + host = registered_host() plugin = Creator(name, asset, options, data) diff --git a/openpype/pipeline/create/subset_name.py b/openpype/pipeline/create/subset_name.py new file mode 100644 index 0000000000..ed05dd6083 --- /dev/null +++ b/openpype/pipeline/create/subset_name.py @@ -0,0 +1,144 @@ +import os + +from openpype.settings import get_project_settings +from openpype.lib import filter_profiles, prepare_template_data +from openpype.pipeline import legacy_io + +from .constants import DEFAULT_SUBSET_TEMPLATE + + +class TaskNotSetError(KeyError): + def __init__(self, msg=None): + if not msg: + msg = "Creator's subset name template requires task name." + super(TaskNotSetError, self).__init__(msg) + + +def get_subset_name_template( + project_name, + family, + task_name, + task_type, + host_name, + default_template=None, + project_settings=None +): + """Get subset name template based on passed context. + + Args: + project_name (str): Project on which the context lives. + family (str): Family (subset type) for which the subset name is + calculated. + host_name (str): Name of host in which the subset name is calculated. + task_name (str): Name of task in which context the subset is created. + task_type (str): Type of task in which context the subset is created. + default_template (Union[str, None]): Default template which is used if + settings won't find any matching possitibility. Constant + 'DEFAULT_SUBSET_TEMPLATE' is used if not defined. + project_settings (Union[Dict[str, Any], None]): Prepared settings for + project. Settings are queried if not passed. + """ + + if project_settings is None: + project_settings = get_project_settings(project_name) + tools_settings = project_settings["global"]["tools"] + profiles = tools_settings["creator"]["subset_name_profiles"] + filtering_criteria = { + "families": family, + "hosts": host_name, + "tasks": task_name, + "task_types": task_type + } + + matching_profile = filter_profiles(profiles, filtering_criteria) + template = None + if matching_profile: + template = matching_profile["template"] + + # Make sure template is set (matching may have empty string) + if not template: + template = default_template or DEFAULT_SUBSET_TEMPLATE + return template + + +def get_subset_name( + family, + variant, + task_name, + asset_doc, + project_name=None, + host_name=None, + default_template=None, + dynamic_data=None, + project_settings=None +): + """Calculate subset name based on passed context and OpenPype settings. + + Subst name templates are defined in `project_settings/global/tools/creator + /subset_name_profiles` where are profiles with host name, family, task name + and task type filters. If context does not match any profile then + `DEFAULT_SUBSET_TEMPLATE` is used as default template. + + That's main reason why so many arguments are required to calculate subset + name. + + Args: + family (str): Instance family. + variant (str): In most of the cases it is user input during creation. + task_name (str): Task name on which context is instance created. + asset_doc (dict): Queried asset document with its tasks in data. + Used to get task type. + project_name (str): Name of project on which is instance created. + Important for project settings that are loaded. + host_name (str): One of filtering criteria for template profile + filters. + default_template (str): Default template if any profile does not match + passed context. Constant 'DEFAULT_SUBSET_TEMPLATE' is used if + is not passed. + dynamic_data (dict): Dynamic data specific for a creator which creates + instance. + project_settings (Union[Dict[str, Any], None]): Prepared settings for + project. Settings are queried if not passed. + """ + + if not family: + return "" + + if not host_name: + host_name = os.environ.get("AVALON_APP") + + # Use only last part of class family value split by dot (`.`) + family = family.rsplit(".", 1)[-1] + + if project_name is None: + project_name = legacy_io.Session["AVALON_PROJECT"] + + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + template = get_subset_name_template( + project_name, + family, + task_name, + task_type, + host_name, + default_template=default_template, + project_settings=project_settings + ) + # Simple check of task name existence for template with {task} in + # - missing task should be possible only in Standalone publisher + if not task_name and "{task" in template.lower(): + raise TaskNotSetError() + + fill_pairs = { + "variant": variant, + "family": family, + "task": task_name + } + if dynamic_data: + # Dynamic data may override default values + for key, value in dynamic_data.items(): + fill_pairs[key] = value + + return template.format(**prepare_template_data(fill_pairs)) diff --git a/openpype/pipeline/delivery.py b/openpype/pipeline/delivery.py new file mode 100644 index 0000000000..8cf9a43aac --- /dev/null +++ b/openpype/pipeline/delivery.py @@ -0,0 +1,310 @@ +"""Functions useful for delivery of published representations.""" +import os +import shutil +import glob +import clique +import collections + +from openpype.lib import create_hard_link + + +def _copy_file(src_path, dst_path): + """Hardlink file if possible(to save space), copy if not. + + Because of using hardlinks should not be function used in other parts + of pipeline. + """ + + if os.path.exists(dst_path): + return + try: + create_hard_link( + src_path, + dst_path + ) + except OSError: + shutil.copyfile(src_path, dst_path) + + +def get_format_dict(anatomy, location_path): + """Returns replaced root values from user provider value. + + Args: + anatomy (Anatomy): Project anatomy. + location_path (str): User provided value. + + Returns: + (dict): Prepared data for formatting of a template. + """ + + format_dict = {} + if not location_path: + return format_dict + + location_path = location_path.replace("\\", "/") + root_names = anatomy.root_names_from_templates( + anatomy.templates["delivery"] + ) + format_dict["root"] = {} + for name in root_names: + format_dict["root"][name] = location_path + return format_dict + + +def check_destination_path( + repre_id, + anatomy, + anatomy_data, + datetime_data, + template_name +): + """ Try to create destination path based on 'template_name'. + + In the case that path cannot be filled, template contains unmatched + keys, provide error message to filter out repre later. + + Args: + repre_id (str): Representation id. + anatomy (Anatomy): Project anatomy. + anatomy_data (dict): Template data to fill anatomy templates. + datetime_data (dict): Values with actual date. + template_name (str): Name of template which should be used from anatomy + templates. + Returns: + Dict[str, List[str]]: Report of happened errors. Key is message title + value is detailed information. + """ + + anatomy_data.update(datetime_data) + anatomy_filled = anatomy.format_all(anatomy_data) + dest_path = anatomy_filled["delivery"][template_name] + report_items = collections.defaultdict(list) + + if not dest_path.solved: + msg = ( + "Missing keys in Representation's context" + " for anatomy template \"{}\"." + ).format(template_name) + + sub_msg = ( + "Representation: {}
" + ).format(repre_id) + + if dest_path.missing_keys: + keys = ", ".join(dest_path.missing_keys) + sub_msg += ( + "- Missing keys: \"{}\"
" + ).format(keys) + + if dest_path.invalid_types: + items = [] + for key, value in dest_path.invalid_types.items(): + items.append("\"{}\" {}".format(key, str(value))) + + keys = ", ".join(items) + sub_msg += ( + "- Invalid value DataType: \"{}\"
" + ).format(keys) + + report_items[msg].append(sub_msg) + + return report_items + + +def deliver_single_file( + src_path, + repre, + anatomy, + template_name, + anatomy_data, + format_dict, + report_items, + log +): + """Copy single file to calculated path based on template + + Args: + src_path(str): path of source representation file + repre (dict): full repre, used only in deliver_sequence, here only + as to share same signature + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (logging.Logger): for log printing + + Returns: + (collections.defaultdict, int) + """ + + # Make sure path is valid for all platforms + src_path = os.path.normpath(src_path.replace("\\", "/")) + + if not os.path.exists(src_path): + msg = "{} doesn't exist for {}".format(src_path, repre["_id"]) + report_items["Source file was not found"].append(msg) + return report_items, 0 + + anatomy_filled = anatomy.format(anatomy_data) + if format_dict: + template_result = anatomy_filled["delivery"][template_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][template_name] + + # Backwards compatibility when extension contained `.` + delivery_path = delivery_path.replace("..", ".") + # Make sure path is valid for all platforms + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) + + delivery_folder = os.path.dirname(delivery_path) + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + log.debug("Copying single: {} -> {}".format(src_path, delivery_path)) + _copy_file(src_path, delivery_path) + + return report_items, 1 + + +def deliver_sequence( + src_path, + repre, + anatomy, + template_name, + anatomy_data, + format_dict, + report_items, + log +): + """ For Pype2(mainly - works in 3 too) where representation might not + contain files. + + Uses listing physical files (not 'files' on repre as a)might not be + present, b)might not be reliable for representation and copying them. + + TODO Should be refactored when files are sufficient to drive all + representations. + + Args: + src_path(str): path of source representation file + repre (dict): full representation + anatomy (Anatomy) + template_name (string): user selected delivery template name + anatomy_data (dict): data from repre to fill anatomy with + format_dict (dict): root dictionary with names and values + report_items (collections.defaultdict): to return error messages + log (logging.Logger): for log printing + + Returns: + (collections.defaultdict, int) + """ + + src_path = os.path.normpath(src_path.replace("\\", "/")) + + def hash_path_exist(myPath): + res = myPath.replace('#', '*') + glob_search_results = glob.glob(res) + if len(glob_search_results) > 0: + return True + return False + + if not hash_path_exist(src_path): + msg = "{} doesn't exist for {}".format( + src_path, repre["_id"]) + report_items["Source file was not found"].append(msg) + return report_items, 0 + + delivery_templates = anatomy.templates.get("delivery") or {} + delivery_template = delivery_templates.get(template_name) + if delivery_template is None: + msg = ( + "Delivery template \"{}\" in anatomy of project \"{}\"" + " was not found" + ).format(template_name, anatomy.project_name) + report_items[""].append(msg) + return report_items, 0 + + # Check if 'frame' key is available in template which is required + # for sequence delivery + if "{frame" not in delivery_template: + msg = ( + "Delivery template \"{}\" in anatomy of project \"{}\"" + "does not contain '{{frame}}' key to fill. Delivery of sequence" + " can't be processed." + ).format(template_name, anatomy.project_name) + report_items[""].append(msg) + return report_items, 0 + + dir_path, file_name = os.path.split(str(src_path)) + + context = repre["context"] + ext = context.get("ext", context.get("representation")) + + if not ext: + msg = "Source extension not found, cannot find collection" + report_items[msg].append(src_path) + log.warning("{} <{}>".format(msg, context)) + return report_items, 0 + + ext = "." + ext + # context.representation could be .psd + ext = ext.replace("..", ".") + + src_collections, remainder = clique.assemble(os.listdir(dir_path)) + src_collection = None + for col in src_collections: + if col.tail != ext: + continue + + src_collection = col + break + + if src_collection is None: + msg = "Source collection of files was not found" + report_items[msg].append(src_path) + log.warning("{} <{}>".format(msg, src_path)) + return report_items, 0 + + frame_indicator = "@####@" + + anatomy_data["frame"] = frame_indicator + anatomy_filled = anatomy.format(anatomy_data) + + if format_dict: + template_result = anatomy_filled["delivery"][template_name] + delivery_path = template_result.rootless.format(**format_dict) + else: + delivery_path = anatomy_filled["delivery"][template_name] + + delivery_path = os.path.normpath(delivery_path.replace("\\", "/")) + delivery_folder = os.path.dirname(delivery_path) + dst_head, dst_tail = delivery_path.split(frame_indicator) + dst_padding = src_collection.padding + dst_collection = clique.Collection( + head=dst_head, + tail=dst_tail, + padding=dst_padding + ) + + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + src_head = src_collection.head + src_tail = src_collection.tail + uploaded = 0 + for index in src_collection.indexes: + src_padding = src_collection.format("{padding}") % index + src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) + src = os.path.normpath( + os.path.join(dir_path, src_file_name) + ) + + dst_padding = dst_collection.format("{padding}") % index + dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) + log.debug("Copying single: {} -> {}".format(src, dst)) + _copy_file(src, dst) + uploaded += 1 + + return report_items, uploaded diff --git a/openpype/lib/editorial.py b/openpype/pipeline/editorial.py similarity index 91% rename from openpype/lib/editorial.py rename to openpype/pipeline/editorial.py index bf868953ea..564d78ea6f 100644 --- a/openpype/lib/editorial.py +++ b/openpype/pipeline/editorial.py @@ -1,23 +1,16 @@ import os import re import clique -from .import_utils import discover_host_vendor_module -try: - import opentimelineio as otio - from opentimelineio import opentime as _ot -except ImportError: - if not os.environ.get("AVALON_APP"): - raise - otio = discover_host_vendor_module("opentimelineio") - _ot = discover_host_vendor_module("opentimelineio.opentime") +import opentimelineio as otio +from opentimelineio import opentime as _ot def otio_range_to_frame_range(otio_range): start = _ot.to_frames( otio_range.start_time, otio_range.start_time.rate) end = start + _ot.to_frames( - otio_range.duration, otio_range.duration.rate) - 1 + otio_range.duration, otio_range.duration.rate) return start, end @@ -125,9 +118,9 @@ def range_from_frames(start, duration, fps): ) -def frames_to_secons(frames, framerate): +def frames_to_seconds(frames, framerate): """ - Returning secons. + Returning seconds. Args: frames (int): frame @@ -135,8 +128,8 @@ def frames_to_secons(frames, framerate): Returns: float: second value - """ + rt = _ot.from_frames(frames, framerate) return _ot.to_seconds(rt) @@ -168,7 +161,7 @@ def make_sequence_collection(path, otio_range, metadata): first, last = otio_range_to_frame_range(otio_range) collection = clique.Collection( head=head, tail=tail, padding=metadata["padding"]) - collection.indexes.update([i for i in range(first, (last + 1))]) + collection.indexes.update([i for i in range(first, last)]) return dir_path, collection @@ -218,6 +211,7 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end): "name": name } tw_node.update(metadata) + tw_node["lookup"] = list(lookup) # get first and last frame offsets offset_in += lookup[0] @@ -269,16 +263,17 @@ def get_media_range_with_retimes(otio_clip, handle_start, handle_end): "retime": True, "speed": time_scalar, "timewarps": time_warp_nodes, - "handleStart": handle_start, - "handleEnd": handle_end + "handleStart": int(round(handle_start)), + "handleEnd": int(round(handle_end)) } } returning_dict = { "mediaIn": media_in_trimmed, "mediaOut": media_out_trimmed, - "handleStart": handle_start, - "handleEnd": handle_end + "handleStart": int(round(handle_start)), + "handleEnd": int(round(handle_end)), + "speed": time_scalar } # add version data only if retime diff --git a/openpype/pipeline/farm/__init__.py b/openpype/pipeline/farm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openpype/pipeline/farm/patterning.py b/openpype/pipeline/farm/patterning.py new file mode 100644 index 0000000000..1e4b5bf37d --- /dev/null +++ b/openpype/pipeline/farm/patterning.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +import re + + +def match_aov_pattern(host_name, aov_patterns, render_file_name): + """Matching against a `AOV` pattern in the render files. + + In order to match the AOV name we must compare + against the render filename string that we are + grabbing the render filename string from the collection + that we have grabbed from `exp_files`. + + Args: + app (str): Host name. + aov_patterns (dict): AOV patterns from AOV filters. + render_file_name (str): Incoming file name to match against. + + Returns: + bool: Review state for rendered file (render_file_name). + """ + aov_pattern = aov_patterns.get(host_name, []) + if not aov_pattern: + return False + return any(re.match(p, render_file_name) for p in aov_pattern) diff --git a/openpype/pipeline/legacy_io.py b/openpype/pipeline/legacy_io.py new file mode 100644 index 0000000000..bde2b24c2a --- /dev/null +++ b/openpype/pipeline/legacy_io.py @@ -0,0 +1,159 @@ +"""Wrapper around interactions with the database""" + +import sys +import logging +import functools + +from . import schema +from .mongodb import AvalonMongoDB, session_data_from_environment + +module = sys.modules[__name__] + +Session = {} +_is_installed = False +_connection_object = AvalonMongoDB(Session) +_mongo_client = None +_database = database = None + +log = logging.getLogger(__name__) + + +def is_installed(): + return module._is_installed + + +def install(): + """Establish a persistent connection to the database""" + if is_installed(): + return + + session = session_data_from_environment(context_keys=True) + + session["schema"] = "openpype:session-3.0" + try: + schema.validate(session) + except schema.ValidationError as e: + # TODO(marcus): Make this mandatory + log.warning(e) + + _connection_object.Session.update(session) + _connection_object.install() + + module._mongo_client = _connection_object.mongo_client + module._database = module.database = _connection_object.database + + module._is_installed = True + + +def uninstall(): + """Close any connection to the database""" + module._mongo_client = None + module._database = module.database = None + module._is_installed = False + try: + module._connection_object.uninstall() + except AttributeError: + pass + + +def requires_install(func): + @functools.wraps(func) + def decorated(*args, **kwargs): + if not is_installed(): + install() + return func(*args, **kwargs) + return decorated + + +@requires_install +def projects(*args, **kwargs): + return _connection_object.projects(*args, **kwargs) + + +@requires_install +def insert_one(doc, *args, **kwargs): + return _connection_object.insert_one(doc, *args, **kwargs) + + +@requires_install +def insert_many(docs, *args, **kwargs): + return _connection_object.insert_many(docs, *args, **kwargs) + + +@requires_install +def update_one(*args, **kwargs): + return _connection_object.update_one(*args, **kwargs) + + +@requires_install +def update_many(*args, **kwargs): + return _connection_object.update_many(*args, **kwargs) + + +@requires_install +def replace_one(*args, **kwargs): + return _connection_object.replace_one(*args, **kwargs) + + +@requires_install +def replace_many(*args, **kwargs): + return _connection_object.replace_many(*args, **kwargs) + + +@requires_install +def delete_one(*args, **kwargs): + return _connection_object.delete_one(*args, **kwargs) + + +@requires_install +def delete_many(*args, **kwargs): + return _connection_object.delete_many(*args, **kwargs) + + +@requires_install +def find(*args, **kwargs): + return _connection_object.find(*args, **kwargs) + + +@requires_install +def find_one(*args, **kwargs): + return _connection_object.find_one(*args, **kwargs) + + +@requires_install +def distinct(*args, **kwargs): + return _connection_object.distinct(*args, **kwargs) + + +@requires_install +def aggregate(*args, **kwargs): + return _connection_object.aggregate(*args, **kwargs) + + +@requires_install +def save(*args, **kwargs): + return _connection_object.save(*args, **kwargs) + + +@requires_install +def drop(*args, **kwargs): + return _connection_object.drop(*args, **kwargs) + + +@requires_install +def parenthood(*args, **kwargs): + return _connection_object.parenthood(*args, **kwargs) + + +@requires_install +def bulk_write(*args, **kwargs): + return _connection_object.bulk_write(*args, **kwargs) + + +@requires_install +def active_project(*args, **kwargs): + return _connection_object.active_project(*args, **kwargs) + + +def current_project(*args, **kwargs): + return Session.get("AVALON_PROJECT") diff --git a/openpype/pipeline/load/__init__.py b/openpype/pipeline/load/__init__.py index 6e7612d4c1..e9ac0df924 100644 --- a/openpype/pipeline/load/__init__.py +++ b/openpype/pipeline/load/__init__.py @@ -1,8 +1,12 @@ from .utils import ( HeroVersionType, + + LoadError, IncompatibleLoaderError, + InvalidRepresentationContext, get_repres_contexts, + get_contexts_for_repre_docs, get_subset_contexts, get_representation_context, @@ -16,14 +20,20 @@ from .utils import ( switch_container, get_loader_identifier, + get_loaders_by_name, get_representation_path_from_context, get_representation_path, + get_representation_path_with_anatomy, is_compatible_loader, loaders_from_repre_context, loaders_from_representation, + + any_outdated_containers, + get_outdated_containers, + filter_containers, ) from .plugins import ( @@ -41,9 +51,13 @@ from .plugins import ( __all__ = ( # utils.py "HeroVersionType", + + "LoadError", "IncompatibleLoaderError", + "InvalidRepresentationContext", "get_repres_contexts", + "get_contexts_for_repre_docs", "get_subset_contexts", "get_representation_context", @@ -57,15 +71,21 @@ __all__ = ( "switch_container", "get_loader_identifier", + "get_loaders_by_name", "get_representation_path_from_context", "get_representation_path", + "get_representation_path_with_anatomy", "is_compatible_loader", "loaders_from_repre_context", "loaders_from_representation", + "any_outdated_containers", + "get_outdated_containers", + "filter_containers", + # plugins.py "LoaderPlugin", "SubsetLoaderPlugin", diff --git a/openpype/pipeline/load/plugins.py b/openpype/pipeline/load/plugins.py index 9b2b6bb084..9b891a4da3 100644 --- a/openpype/pipeline/load/plugins.py +++ b/openpype/pipeline/load/plugins.py @@ -1,5 +1,18 @@ +import os import logging +from openpype.settings import get_system_settings, get_project_settings +from openpype.pipeline import ( + schema, + legacy_io, +) +from openpype.pipeline.plugin_discover import ( + discover, + register_plugin, + register_plugin_path, + deregister_plugin, + deregister_plugin_path +) from .utils import get_representation_path_from_context @@ -20,6 +33,7 @@ class LoaderPlugin(list): representations = list() order = 0 is_multiple_contexts_compatible = False + enabled = True options = [] @@ -29,11 +43,90 @@ class LoaderPlugin(list): def __init__(self, context): self.fname = self.filepath_from_context(context) + @classmethod + def apply_settings(cls, project_settings, system_settings): + host_name = os.environ.get("AVALON_APP") + plugin_type = "load" + plugin_type_settings = ( + project_settings + .get(host_name, {}) + .get(plugin_type, {}) + ) + global_type_settings = ( + project_settings + .get("global", {}) + .get(plugin_type, {}) + ) + if not global_type_settings and not plugin_type_settings: + return + + plugin_name = cls.__name__ + + plugin_settings = None + # Look for plugin settings in host specific settings + if plugin_name in plugin_type_settings: + plugin_settings = plugin_type_settings[plugin_name] + + # Look for plugin settings in global settings + elif plugin_name in global_type_settings: + plugin_settings = global_type_settings[plugin_name] + + if not plugin_settings: + return + + print(">>> We have preset for {}".format(plugin_name)) + for option, value in plugin_settings.items(): + if option == "enabled" and value is False: + print(" - is disabled by preset") + else: + print(" - setting `{}`: `{}`".format(option, value)) + setattr(cls, option, value) + + @classmethod + def is_compatible_loader(cls, context): + """Return whether a loader is compatible with a context. + + This checks the version's families and the representation for the given + Loader. + + Returns: + bool + """ + + plugin_repre_names = cls.get_representations() + plugin_families = cls.families + if not plugin_repre_names or not plugin_families: + return False + + repre_doc = context.get("representation") + if not repre_doc: + return False + + plugin_repre_names = set(plugin_repre_names) + if ( + "*" not in plugin_repre_names + and repre_doc["name"] not in plugin_repre_names + ): + return False + + maj_version, _ = schema.get_schema_version(context["subset"]["schema"]) + if maj_version < 3: + families = context["version"]["data"].get("families", []) + else: + families = context["subset"]["data"]["families"] + + plugin_families = set(plugin_families) + return ( + "*" in plugin_families + or any(family in plugin_families for family in families) + ) + @classmethod def get_representations(cls): return cls.representations - def filepath_from_context(self, context): + @classmethod + def filepath_from_context(cls, context): return get_representation_path_from_context(context) def load(self, context, name=None, namespace=None, options=None): @@ -101,31 +194,39 @@ class SubsetLoaderPlugin(LoaderPlugin): pass -def discover_loader_plugins(): - import avalon.api +def discover_loader_plugins(project_name=None): + from openpype.lib import Logger - return avalon.api.discover(LoaderPlugin) + log = Logger.get_logger("LoaderDiscover") + plugins = discover(LoaderPlugin) + if not project_name: + project_name = legacy_io.active_project() + system_settings = get_system_settings() + project_settings = get_project_settings(project_name) + for plugin in plugins: + try: + plugin.apply_settings(project_settings, system_settings) + except Exception: + log.warning( + "Failed to apply settings to loader {}".format( + plugin.__name__ + ), + exc_info=True + ) + return plugins def register_loader_plugin(plugin): - import avalon.api - - return avalon.api.register_plugin(LoaderPlugin, plugin) - - -def deregister_loader_plugin_path(path): - import avalon.api - - avalon.api.deregister_plugin_path(LoaderPlugin, path) - - -def register_loader_plugin_path(path): - import avalon.api - - return avalon.api.register_plugin_path(LoaderPlugin, path) + return register_plugin(LoaderPlugin, plugin) def deregister_loader_plugin(plugin): - import avalon.api + deregister_plugin(LoaderPlugin, plugin) - avalon.api.deregister_plugin(LoaderPlugin, plugin) + +def deregister_loader_plugin_path(path): + deregister_plugin_path(LoaderPlugin, path) + + +def register_loader_plugin_path(path): + return register_plugin_path(LoaderPlugin, path) diff --git a/openpype/pipeline/load/utils.py b/openpype/pipeline/load/utils.py index 53ac6b626d..fefdb8537b 100644 --- a/openpype/pipeline/load/utils.py +++ b/openpype/pipeline/load/utils.py @@ -4,18 +4,41 @@ import copy import getpass import logging import inspect +import collections import numbers -import six -from bson.objectid import ObjectId - -from avalon import io, schema -from avalon.api import Session, registered_root - -from openpype.lib import Anatomy +from openpype.host import ILoadHost +from openpype.client import ( + get_project, + get_assets, + get_subsets, + get_versions, + get_version_by_id, + get_last_version_by_subset_id, + get_hero_version_by_subset_id, + get_version_by_name, + get_last_versions, + get_representations, + get_representation_by_id, + get_representation_by_name, + get_representation_parents +) +from openpype.lib import ( + StringTemplate, + TemplateUnsolved, +) +from openpype.pipeline import ( + legacy_io, + Anatomy, +) log = logging.getLogger(__name__) +ContainersFilterResult = collections.namedtuple( + "ContainersFilterResult", + ["latest", "outdated", "not_found", "invalid"] +) + class HeroVersionType(object): def __init__(self, version): @@ -36,11 +59,26 @@ class HeroVersionType(object): return self.version.__format__(format_spec) +class LoadError(Exception): + """Known error that happened during loading. + + A message is shown to user (without traceback). Make sure an artist can + understand the problem. + """ + + pass + + class IncompatibleLoaderError(ValueError): """Error when Loader is incompatible with a representation.""" pass +class InvalidRepresentationContext(ValueError): + """Representation path can't be received using representation document.""" + pass + + def get_repres_contexts(representation_ids, dbcon=None): """Return parenthood context for representation. @@ -51,40 +89,36 @@ def get_repres_contexts(representation_ids, dbcon=None): Returns: dict: The full representation context by representation id. - keys are repre_id, value is dictionary with full: - asset_doc - version_doc - subset_doc - repre_doc - + keys are repre_id, value is dictionary with full documents of + asset, subset, version and representation. """ - if not dbcon: - dbcon = io - contexts = {} + if not dbcon: + dbcon = legacy_io + if not representation_ids: + return {} + + project_name = dbcon.active_project() + repre_docs = get_representations(project_name, representation_ids) + + return get_contexts_for_repre_docs(project_name, repre_docs) + + +def get_contexts_for_repre_docs(project_name, repre_docs): + contexts = {} + if not repre_docs: return contexts - _representation_ids = [] - for repre_id in representation_ids: - if isinstance(repre_id, six.string_types): - repre_id = ObjectId(repre_id) - _representation_ids.append(repre_id) - - repre_docs = dbcon.find({ - "type": "representation", - "_id": {"$in": _representation_ids} - }) repre_docs_by_id = {} version_ids = set() for repre_doc in repre_docs: version_ids.add(repre_doc["parent"]) repre_docs_by_id[repre_doc["_id"]] = repre_doc - version_docs = dbcon.find({ - "type": {"$in": ["version", "hero_version"]}, - "_id": {"$in": list(version_ids)} - }) + version_docs = get_versions( + project_name, version_ids, hero=True + ) version_docs_by_id = {} hero_version_docs = [] @@ -98,10 +132,7 @@ def get_repres_contexts(representation_ids, dbcon=None): subset_ids.add(version_doc["parent"]) if versions_for_hero: - _version_docs = dbcon.find({ - "type": "version", - "_id": {"$in": list(versions_for_hero)} - }) + _version_docs = get_versions(project_name, versions_for_hero) _version_data_by_id = { version_doc["_id"]: version_doc["data"] for version_doc in _version_docs @@ -113,26 +144,20 @@ def get_repres_contexts(representation_ids, dbcon=None): version_data = copy.deepcopy(_version_data_by_id[version_id]) version_docs_by_id[hero_version_id]["data"] = version_data - subset_docs = dbcon.find({ - "type": "subset", - "_id": {"$in": list(subset_ids)} - }) + subset_docs = get_subsets(project_name, subset_ids) subset_docs_by_id = {} asset_ids = set() for subset_doc in subset_docs: subset_docs_by_id[subset_doc["_id"]] = subset_doc asset_ids.add(subset_doc["parent"]) - asset_docs = dbcon.find({ - "type": "asset", - "_id": {"$in": list(asset_ids)} - }) + asset_docs = get_assets(project_name, asset_ids) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) for repre_id, repre_doc in repre_docs_by_id.items(): version_doc = version_docs_by_id[repre_doc["parent"]] @@ -166,38 +191,27 @@ def get_subset_contexts(subset_ids, dbcon=None): dict: The full representation context by representation id. """ if not dbcon: - dbcon = io + dbcon = legacy_io contexts = {} if not subset_ids: return contexts - _subset_ids = set() - for subset_id in subset_ids: - if isinstance(subset_id, six.string_types): - subset_id = ObjectId(subset_id) - _subset_ids.add(subset_id) - - subset_docs = dbcon.find({ - "type": "subset", - "_id": {"$in": list(_subset_ids)} - }) + project_name = dbcon.active_project() + subset_docs = get_subsets(project_name, subset_ids) subset_docs_by_id = {} asset_ids = set() for subset_doc in subset_docs: subset_docs_by_id[subset_doc["_id"]] = subset_doc asset_ids.add(subset_doc["parent"]) - asset_docs = dbcon.find({ - "type": "asset", - "_id": {"$in": list(asset_ids)} - }) + asset_docs = get_assets(project_name, asset_ids) asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } - project_doc = dbcon.find_one({"type": "project"}) + project_doc = get_project(project_name) for subset_id, subset_doc in subset_docs_by_id.items(): asset_doc = asset_docs_by_id[subset_doc["parent"]] @@ -223,20 +237,30 @@ def get_representation_context(representation): Returns: dict: The full representation context. - """ assert representation is not None, "This is a bug" - if isinstance(representation, (six.string_types, ObjectId)): - representation = io.find_one( - {"_id": ObjectId(str(representation))}) + project_name = legacy_io.active_project() + if not isinstance(representation, dict): + representation = get_representation_by_id( + project_name, representation + ) - version, subset, asset, project = io.parenthood(representation) + if not representation: + raise AssertionError("Representation was not found in database") - assert all([representation, version, subset, asset, project]), ( - "This is a bug" + version, subset, asset, project = get_representation_parents( + project_name, representation ) + if not version: + raise AssertionError("Version was not found in database") + if not subset: + raise AssertionError("Subset was not found in database") + if not asset: + raise AssertionError("Asset was not found in database") + if not project: + raise AssertionError("Project was not found in database") context = { "project": { @@ -377,6 +401,20 @@ def get_loader_identifier(loader): return loader.__name__ +def get_loaders_by_name(): + from .plugins import discover_loader_plugins + + loaders_by_name = {} + for loader in discover_loader_plugins(): + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {} !".format(loader_name) + ) + loaders_by_name[loader_name] = loader + return loaders_by_name + + def _get_container_loader(container): """Return the Loader corresponding to the container""" from .plugins import discover_loader_plugins @@ -404,42 +442,36 @@ def update_container(container, version=-1): """Update a container""" # Compute the different version from 'representation' - current_representation = io.find_one({ - "_id": ObjectId(container["representation"]) - }) + project_name = legacy_io.active_project() + current_representation = get_representation_by_id( + project_name, container["representation"] + ) assert current_representation is not None, "This is a bug" - current_version, subset, asset, project = io.parenthood( - current_representation) - + current_version = get_version_by_id( + project_name, current_representation["parent"], fields=["parent"] + ) if version == -1: - new_version = io.find_one({ - "type": "version", - "parent": subset["_id"] - }, sort=[("name", -1)]) + new_version = get_last_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + + elif isinstance(version, HeroVersionType): + new_version = get_hero_version_by_subset_id( + project_name, current_version["parent"], fields=["_id"] + ) + else: - if isinstance(version, HeroVersionType): - version_query = { - "parent": subset["_id"], - "type": "hero_version" - } - else: - version_query = { - "parent": subset["_id"], - "type": "version", - "name": version - } - new_version = io.find_one(version_query) + new_version = get_version_by_name( + project_name, version, current_version["parent"], fields=["_id"] + ) assert new_version is not None, "This is a bug" - new_representation = io.find_one({ - "type": "representation", - "parent": new_version["_id"], - "name": current_representation["name"] - }) - + new_representation = get_representation_by_name( + project_name, current_representation["name"], new_version["_id"] + ) assert new_representation is not None, "Representation wasn't found" path = get_representation_path(new_representation) @@ -481,10 +513,10 @@ def switch_container(container, representation, loader_plugin=None): )) # Get the new representation to switch to - new_representation = io.find_one({ - "type": "representation", - "_id": representation["_id"], - }) + project_name = legacy_io.active_project() + new_representation = get_representation_by_id( + project_name, representation["_id"] + ) new_context = get_representation_context(new_representation) if not is_compatible_loader(loader_plugin, new_context): @@ -500,7 +532,7 @@ def get_representation_path_from_context(context): representation = context['representation'] project_doc = context.get("project") root = None - session_project = Session.get("AVALON_PROJECT") + session_project = legacy_io.Session.get("AVALON_PROJECT") if project_doc and project_doc["name"] != session_project: anatomy = Anatomy(project_doc["name"]) root = anatomy.roots @@ -508,6 +540,52 @@ def get_representation_path_from_context(context): return get_representation_path(representation, root) +def get_representation_path_with_anatomy(repre_doc, anatomy): + """Receive representation path using representation document and anatomy. + + Anatomy is used to replace 'root' key in representation file. Ideally + should be used instead of 'get_representation_path' which is based on + "current context". + + Future notes: + We want also be able store resources into representation and I can + imagine the result should also contain paths to possible resources. + + Args: + repre_doc (Dict[str, Any]): Representation document. + anatomy (Anatomy): Project anatomy object. + + Returns: + Union[None, TemplateResult]: None if path can't be received + + Raises: + InvalidRepresentationContext: When representation data are probably + invalid or not available. + """ + + try: + template = repre_doc["data"]["template"] + + except KeyError: + raise InvalidRepresentationContext(( + "Representation document does not" + " contain template in data ('data.template')" + )) + + try: + context = repre_doc["context"] + context["root"] = anatomy.roots + path = StringTemplate.format_strict_template(template, context) + + except TemplateUnsolved as exc: + raise InvalidRepresentationContext(( + "Couldn't resolve representation template with available data." + " Reason: {}".format(str(exc)) + )) + + return path.normalized() + + def get_representation_path(representation, root=None, dbcon=None): """Get filename from representation document @@ -526,12 +604,12 @@ def get_representation_path(representation, root=None, dbcon=None): """ - from openpype.lib import StringTemplate, TemplateUnsolved - if dbcon is None: - dbcon = io + dbcon = legacy_io if root is None: + from openpype.pipeline import registered_root + root = registered_root() def path_from_represenation(): @@ -564,7 +642,10 @@ def get_representation_path(representation, root=None, dbcon=None): def path_from_config(): try: - version_, subset, asset, project = dbcon.parenthood(representation) + project_name = dbcon.active_project() + version_, subset, asset, project = get_representation_parents( + project_name, representation + ) except ValueError: log.debug( "Representation %s wasn't found in database, " @@ -669,25 +750,9 @@ def is_compatible_loader(Loader, context): Returns: bool - """ - maj_version, _ = schema.get_schema_version(context["subset"]["schema"]) - if maj_version < 3: - families = context["version"]["data"].get("families", []) - else: - families = context["subset"]["data"]["families"] - representation = context["representation"] - has_family = ( - "*" in Loader.families or any( - family in Loader.families for family in families - ) - ) - representations = Loader.get_representations() - has_representation = ( - "*" in representations or representation["name"] in representations - ) - return has_family and has_representation + return Loader.is_compatible_loader(context) def loaders_from_repre_context(loaders, repre_context): @@ -705,3 +770,165 @@ def loaders_from_representation(loaders, representation): context = get_representation_context(representation) return loaders_from_repre_context(loaders, context) + + +def any_outdated_containers(host=None, project_name=None): + """Check if there are any outdated containers in scene.""" + + if get_outdated_containers(host, project_name): + return True + return False + + +def get_outdated_containers(host=None, project_name=None): + """Collect outdated containers from host scene. + + Currently registered host and project in global session are used if + arguments are not passed. + + Args: + host (ModuleType): Host implementation with 'ls' function available. + project_name (str): Name of project in which context we are. + """ + + if host is None: + from openpype.pipeline import registered_host + + host = registered_host() + + if project_name is None: + project_name = legacy_io.active_project() + + if isinstance(host, ILoadHost): + containers = host.get_containers() + else: + containers = host.ls() + return filter_containers(containers, project_name).outdated + + +def filter_containers(containers, project_name): + """Filter containers and split them into 4 categories. + + Categories are 'latest', 'outdated', 'invalid' and 'not_found'. + The 'lastest' containers are from last version, 'outdated' are not, + 'invalid' are invalid containers (invalid content) and 'not_found' has + some missing entity in database. + + Args: + containers (Iterable[dict]): List of containers referenced into scene. + project_name (str): Name of project in which context shoud look for + versions. + + Returns: + ContainersFilterResult: Named tuple with 'latest', 'outdated', + 'invalid' and 'not_found' containers. + """ + + # Make sure containers is list that won't change + containers = list(containers) + + outdated_containers = [] + uptodate_containers = [] + not_found_containers = [] + invalid_containers = [] + output = ContainersFilterResult( + uptodate_containers, + outdated_containers, + not_found_containers, + invalid_containers + ) + # Query representation docs to get it's version ids + repre_ids = { + container["representation"] + for container in containers + if container["representation"] + } + if not repre_ids: + if containers: + invalid_containers.extend(containers) + return output + + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + # Store representations by stringified representation id + repre_docs_by_str_id = {} + repre_docs_by_version_id = collections.defaultdict(list) + for repre_doc in repre_docs: + repre_id = str(repre_doc["_id"]) + version_id = repre_doc["parent"] + repre_docs_by_str_id[repre_id] = repre_doc + repre_docs_by_version_id[version_id].append(repre_doc) + + # Query version docs to get it's subset ids + # - also query hero version to be able identify if representation + # belongs to existing version + version_docs = get_versions( + project_name, + version_ids=repre_docs_by_version_id.keys(), + hero=True, + fields=["_id", "parent", "type"] + ) + verisons_by_id = {} + versions_by_subset_id = collections.defaultdict(list) + hero_version_ids = set() + for version_doc in version_docs: + version_id = version_doc["_id"] + # Store versions by their ids + verisons_by_id[version_id] = version_doc + # There's no need to query subsets for hero versions + # - they are considered as latest? + if version_doc["type"] == "hero_version": + hero_version_ids.add(version_id) + continue + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + last_versions = get_last_versions( + project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id"] + ) + # Figure out which versions are outdated + outdated_version_ids = set() + for subset_id, last_version_doc in last_versions.items(): + for version_doc in versions_by_subset_id[subset_id]: + version_id = version_doc["_id"] + if version_id != last_version_doc["_id"]: + outdated_version_ids.add(version_id) + + # Based on all collected data figure out which containers are outdated + # - log out if there are missing representation or version documents + for container in containers: + container_name = container["objectName"] + repre_id = container["representation"] + if not repre_id: + invalid_containers.append(container) + continue + + repre_doc = repre_docs_by_str_id.get(repre_id) + if not repre_doc: + log.debug(( + "Container '{}' has an invalid representation." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + continue + + version_id = repre_doc["parent"] + if version_id in outdated_version_ids: + outdated_containers.append(container) + + elif version_id not in verisons_by_id: + log.debug(( + "Representation on container '{}' has an invalid version." + " It is missing in the database." + ).format(container_name)) + not_found_containers.append(container) + + else: + uptodate_containers.append(container) + + return output diff --git a/openpype/pipeline/mongodb.py b/openpype/pipeline/mongodb.py new file mode 100644 index 0000000000..be2b67a5e7 --- /dev/null +++ b/openpype/pipeline/mongodb.py @@ -0,0 +1,276 @@ +import os +import time +import functools +import logging +import pymongo +from uuid import uuid4 + +from openpype.client import OpenPypeMongoConnection + +from . import schema + + +def requires_install(func): + func_obj = getattr(func, "__self__", None) + + @functools.wraps(func) + def decorated(*args, **kwargs): + if func_obj is not None: + _obj = func_obj + else: + _obj = args[0] + if not _obj.is_installed(): + if _obj.auto_install: + _obj.install() + else: + raise IOError( + "'{}.{}()' requires to run install() first".format( + _obj.__class__.__name__, func.__name__ + ) + ) + return func(*args, **kwargs) + return decorated + + +def auto_reconnect(func): + """Handling auto reconnect in 3 retry times""" + retry_times = 3 + reconnect_msg = "Reconnecting..." + func_obj = getattr(func, "__self__", None) + + @functools.wraps(func) + def decorated(*args, **kwargs): + if func_obj is not None: + _obj = func_obj + else: + _obj = args[0] + + for retry in range(1, retry_times + 1): + try: + return func(*args, **kwargs) + except pymongo.errors.AutoReconnect: + if hasattr(_obj, "log"): + _obj.log.warning(reconnect_msg) + else: + print(reconnect_msg) + + if retry >= retry_times: + raise + time.sleep(0.1) + return decorated + + +SESSION_CONTEXT_KEYS = ( + # Root directory of projects on disk + "AVALON_PROJECTS", + # Name of current Project + "AVALON_PROJECT", + # Name of current Asset + "AVALON_ASSET", + # Name of current task + "AVALON_TASK", + # Name of current app + "AVALON_APP", + # Path to working directory + "AVALON_WORKDIR", + # Optional path to scenes directory (see Work Files API) + "AVALON_SCENEDIR" +) + + +def session_data_from_environment(context_keys=False): + session_data = {} + if context_keys: + for key in SESSION_CONTEXT_KEYS: + value = os.environ.get(key) + session_data[key] = value or "" + else: + for key in SESSION_CONTEXT_KEYS: + session_data[key] = None + + for key, default_value in ( + # Name of Avalon in graphical user interfaces + # Use this to customise the visual appearance of Avalon + # to better integrate with your surrounding pipeline + ("AVALON_LABEL", "Avalon"), + + # Used during any connections to the outside world + ("AVALON_TIMEOUT", "1000"), + + # Name of database used in MongoDB + ("AVALON_DB", "avalon"), + ): + value = os.environ.get(key) or default_value + if value is not None: + session_data[key] = value + + return session_data + + +class AvalonMongoDB: + def __init__(self, session=None, auto_install=True): + self._id = uuid4() + self._database = None + self.auto_install = auto_install + self._installed = False + + if session is None: + session = session_data_from_environment(context_keys=False) + + self.Session = session + + self.log = logging.getLogger(self.__class__.__name__) + + def __getattr__(self, attr_name): + attr = None + if not self.is_installed() and self.auto_install: + self.install() + + if not self.is_installed(): + raise IOError( + "'{}.{}()' requires to run install() first".format( + self.__class__.__name__, attr_name + ) + ) + + project_name = self.active_project() + if project_name is None: + raise ValueError( + "Value of 'Session[\"AVALON_PROJECT\"]' is not set." + ) + + collection = self._database[project_name] + not_set = object() + attr = getattr(collection, attr_name, not_set) + + if attr is not_set: + # Raise attribute error + raise AttributeError( + "{} has no attribute '{}'.".format( + collection.__class__.__name__, attr_name + ) + ) + + # Decorate function + if callable(attr): + attr = auto_reconnect(attr) + return attr + + @property + def mongo_client(self): + return OpenPypeMongoConnection.get_mongo_client() + + @property + def id(self): + return self._id + + @property + def database(self): + if not self.is_installed() and self.auto_install: + self.install() + + if self.is_installed(): + return self._database + + raise IOError( + "'{}.database' requires to run install() first".format( + self.__class__.__name__ + ) + ) + + def is_installed(self): + return self._installed + + def install(self): + """Establish a persistent connection to the database""" + if self.is_installed(): + return + + self._installed = True + self._database = self.mongo_client[str(os.environ["AVALON_DB"])] + + def uninstall(self): + """Close any connection to the database""" + self._installed = False + self._database = None + + @requires_install + def active_project(self): + """Return the name of the active project""" + return self.Session["AVALON_PROJECT"] + + def current_project(self): + """Currently set project in Session without triggering installation.""" + return self.Session.get("AVALON_PROJECT") + + @requires_install + @auto_reconnect + def projects(self, projection=None, only_active=True): + """Iter project documents + + Args: + projection (optional): MongoDB query projection operation + only_active (optional): Skip inactive projects, default True. + + Returns: + Project documents iterator + + """ + query_filter = {"type": "project"} + if only_active: + query_filter.update({ + "$or": [ + {"data.active": {"$exists": 0}}, + {"data.active": True}, + ] + }) + + for project_name in self._database.collection_names(): + if project_name in ("system.indexes",): + continue + + # Each collection will have exactly one project document + + doc = self._database[project_name].find_one( + query_filter, projection=projection + ) + if doc is not None: + yield doc + + @auto_reconnect + def insert_one(self, item, *args, **kwargs): + assert isinstance(item, dict), "item must be of type " + schema.validate(item) + return self._database[self.active_project()].insert_one( + item, *args, **kwargs + ) + + @auto_reconnect + def insert_many(self, items, *args, **kwargs): + # check if all items are valid + assert isinstance(items, list), "`items` must be of type " + for item in items: + assert isinstance(item, dict), "`item` must be of type " + schema.validate(item) + + return self._database[self.active_project()].insert_many( + items, *args, **kwargs + ) + + def parenthood(self, document): + assert document is not None, "This is a bug" + + parents = list() + + while document.get("parent") is not None: + document = self.find_one({"_id": document["parent"]}) + if document is None: + break + + if document.get("type") == "hero_version": + _document = self.find_one({"_id": document["version_id"]}) + document["data"] = _document["data"] + + parents.append(document) + + return parents diff --git a/openpype/pipeline/plugin_discover.py b/openpype/pipeline/plugin_discover.py new file mode 100644 index 0000000000..e5257b801a --- /dev/null +++ b/openpype/pipeline/plugin_discover.py @@ -0,0 +1,324 @@ +import os +import inspect +import traceback + +from openpype.lib import Logger +from openpype.lib.python_module_tools import ( + modules_from_path, + classes_from_module, +) + +log = Logger.get_logger(__name__) + + +class DiscoverResult: + """Result of Plug-ins discovery of a single superclass type. + + Stores discovered, duplicated, ignored and abstract plugins and file paths + which crashed on execution of file. + """ + + def __init__(self, superclass): + self.superclass = superclass + self.plugins = [] + self.crashed_file_paths = {} + self.duplicated_plugins = [] + self.abstract_plugins = [] + self.ignored_plugins = set() + # Store loaded modules to keep them in memory + self._modules = set() + + def __iter__(self): + for plugin in self.plugins: + yield plugin + + def __getitem__(self, item): + return self.plugins[item] + + def __setitem__(self, item, value): + self.plugins[item] = value + + def add_module(self, module): + """Add dynamically loaded python module to keep it in memory.""" + self._modules.add(module) + + def get_report(self, only_errors=True, exc_info=True, full_report=False): + lines = [] + if not only_errors: + # Successfully discovered plugins + if self.plugins or full_report: + lines.append( + "*** Discovered {} plugins".format(len(self.plugins)) + ) + for cls in self.plugins: + lines.append("- {}".format(cls.__class__.__name__)) + + # Plugin that were defined to be ignored + if self.ignored_plugins or full_report: + lines.append("*** Ignored plugins {}".format(len( + self.ignored_plugins + ))) + for cls in self.ignored_plugins: + lines.append("- {}".format(cls.__name__)) + + # Abstract classes + if self.abstract_plugins or full_report: + lines.append("*** Discovered {} abstract plugins".format(len( + self.abstract_plugins + ))) + for cls in self.abstract_plugins: + lines.append("- {}".format(cls.__name__)) + + # Abstract classes + if self.duplicated_plugins or full_report: + lines.append("*** There were {} duplicated plugins".format(len( + self.duplicated_plugins + ))) + for cls in self.duplicated_plugins: + lines.append("- {}".format(cls.__name__)) + + if self.crashed_file_paths or full_report: + lines.append("*** Failed to load {} files".format(len( + self.crashed_file_paths + ))) + for path, exc_info_args in self.crashed_file_paths.items(): + lines.append("- {}".format(path)) + if exc_info: + lines.append(10 * "*") + lines.extend(traceback.format_exception(*exc_info_args)) + lines.append(10 * "*") + + return "\n".join(lines) + + def log_report(self, only_errors=True, exc_info=True): + report = self.get_report(only_errors, exc_info) + if report: + log.info(report) + + +class PluginDiscoverContext(object): + """Store and discover registered types nad registered paths to types. + + Keeps in memory all registered types and their paths. Paths are dynamically + loaded on discover so different discover calls won't return the same + class objects even if were loaded from same file. + """ + + def __init__(self): + self._registered_plugins = {} + self._registered_plugin_paths = {} + self._last_discovered_plugins = {} + # Store the last result to memory + self._last_discovered_results = {} + + def get_last_discovered_plugins(self, superclass): + """Access last discovered plugin by a subperclass. + + Returns: + None: When superclass was not discovered yet. + list: Lastly discovered plugins of the superclass. + """ + + return self._last_discovered_plugins.get(superclass) + + def discover( + self, + superclass, + allow_duplicates=True, + ignore_classes=None, + return_report=False + ): + """Find and return subclasses of `superclass` + + Args: + superclass (type): Class which determines discovered subclasses. + allow_duplicates (bool): Validate class name duplications. + ignore_classes (list): List of classes that will be ignored + and not added to result. + return_report (bool): Output will be full report if set to 'True'. + + Returns: + Union[DiscoverResult, list[Any]]: Object holding successfully + discovered plugins, ignored plugins, plugins with missing + abstract implementation and duplicated plugin. + """ + + if not ignore_classes: + ignore_classes = [] + + result = DiscoverResult(superclass) + plugin_names = set() + registered_classes = self._registered_plugins.get(superclass) or [] + registered_paths = self._registered_plugin_paths.get(superclass) or [] + for cls in registered_classes: + if cls is superclass or cls in ignore_classes: + result.ignored_plugins.add(cls) + continue + + if inspect.isabstract(cls): + result.abstract_plugins.append(cls) + continue + + class_name = cls.__name__ + if class_name in plugin_names: + result.duplicated_plugins.append(cls) + continue + plugin_names.add(class_name) + result.plugins.append(cls) + + # Include plug-ins from registered paths + for path in registered_paths: + modules, crashed = modules_from_path(path) + for item in crashed: + filepath, exc_info = item + result.crashed_file_paths[filepath] = exc_info + + for item in modules: + filepath, module = item + result.add_module(module) + for cls in classes_from_module(superclass, module): + if cls is superclass or cls in ignore_classes: + result.ignored_plugins.add(cls) + continue + + if inspect.isabstract(cls): + result.abstract_plugins.append(cls) + continue + + if not allow_duplicates: + class_name = cls.__name__ + if class_name in plugin_names: + result.duplicated_plugins.append(cls) + continue + plugin_names.add(class_name) + + result.plugins.append(cls) + + # Store in memory last result to keep in memory loaded modules + self._last_discovered_results[superclass] = result + self._last_discovered_plugins[superclass] = list( + result.plugins + ) + result.log_report() + if return_report: + return result + return result.plugins + + def register_plugin(self, superclass, cls): + """Register a directory containing plug-ins of type `superclass` + + Arguments: + superclass (type): Superclass of plug-in + cls (object): Subclass of `superclass` + """ + + if superclass not in self._registered_plugins: + self._registered_plugins[superclass] = list() + + if cls not in self._registered_plugins[superclass]: + self._registered_plugins[superclass].append(cls) + + def register_plugin_path(self, superclass, path): + """Register a directory of one or more plug-ins + + Arguments: + superclass (type): Superclass of plug-ins to look for during + discovery + path (str): Absolute path to directory in which to discover + plug-ins + """ + + if superclass not in self._registered_plugin_paths: + self._registered_plugin_paths[superclass] = list() + + path = os.path.normpath(path) + if path not in self._registered_plugin_paths[superclass]: + self._registered_plugin_paths[superclass].append(path) + + def registered_plugin_paths(self): + """Return all currently registered plug-in paths""" + # Return shallow copy so we the original data can't be changed + return { + superclass: paths[:] + for superclass, paths in self._registered_plugin_paths.items() + } + + def deregister_plugin(self, superclass, plugin): + """Opposite of `register_plugin()`""" + if superclass in self._registered_plugins: + self._registered_plugins[superclass].remove(plugin) + + def deregister_plugin_path(self, superclass, path): + """Opposite of `register_plugin_path()`""" + self._registered_plugin_paths[superclass].remove(path) + + +class _GlobalDiscover: + """Access to global object of PluginDiscoverContext. + + Using singleton object to register/deregister plugins and plugin paths + and then discover them by superclass. + """ + + _context = None + + @classmethod + def get_context(cls): + if cls._context is None: + cls._context = PluginDiscoverContext() + return cls._context + + +def discover( + superclass, + allow_duplicates=True, + ignore_classes=None, + return_report=False +): + """Find and return subclasses of `superclass` + + Args: + superclass (type): Class which determines discovered subclasses. + allow_duplicates (bool): Validate class name duplications. + ignore_classes (list): List of classes that will be ignored + and not added to result. + return_report (bool): Output will be full report if set to 'True'. + + Returns: + Union[DiscoverResult, list[Any]]: Object holding successfully + discovered plugins, ignored plugins, plugins with missing + abstract implementation and duplicated plugin. + """ + + context = _GlobalDiscover.get_context() + return context.discover( + superclass, + allow_duplicates, + ignore_classes, + return_report + ) + + +def get_last_discovered_plugins(superclass): + context = _GlobalDiscover.get_context() + return context.get_last_discovered_plugins(superclass) + + +def register_plugin(superclass, cls): + context = _GlobalDiscover.get_context() + context.register_plugin(superclass, cls) + + +def register_plugin_path(superclass, path): + context = _GlobalDiscover.get_context() + context.register_plugin_path(superclass, path) + + +def deregister_plugin(superclass, cls): + context = _GlobalDiscover.get_context() + context.deregister_plugin(superclass, cls) + + +def deregister_plugin_path(superclass, path): + context = _GlobalDiscover.get_context() + context.deregister_plugin_path(superclass, path) diff --git a/openpype/pipeline/project_folders.py b/openpype/pipeline/project_folders.py new file mode 100644 index 0000000000..1bcba5c320 --- /dev/null +++ b/openpype/pipeline/project_folders.py @@ -0,0 +1,107 @@ +import os +import re +import json + +import six + +from openpype.settings import get_project_settings +from openpype.lib import Logger + +from .anatomy import Anatomy +from .template_data import get_project_template_data + + +def concatenate_splitted_paths(split_paths, anatomy): + log = Logger.get_logger("concatenate_splitted_paths") + pattern_array = re.compile(r"\[.*\]") + output = [] + for path_items in split_paths: + clean_items = [] + if isinstance(path_items, str): + path_items = [path_items] + + for path_item in path_items: + if not re.match(r"{.+}", path_item): + path_item = re.sub(pattern_array, "", path_item) + clean_items.append(path_item) + + # backward compatibility + if "__project_root__" in path_items: + for root, root_path in anatomy.roots.items(): + if not os.path.exists(str(root_path)): + log.debug("Root {} path path {} not exist on \ + computer!".format(root, root_path)) + continue + clean_items = ["{{root[{}]}}".format(root), + r"{project[name]}"] + clean_items[1:] + output.append(os.path.normpath(os.path.sep.join(clean_items))) + continue + + output.append(os.path.normpath(os.path.sep.join(clean_items))) + + return output + + +def fill_paths(path_list, anatomy): + format_data = get_project_template_data(project_name=anatomy.project_name) + format_data["root"] = anatomy.roots + filled_paths = [] + + for path in path_list: + new_path = path.format(**format_data) + filled_paths.append(new_path) + + return filled_paths + + +def create_project_folders(project_name, basic_paths=None): + log = Logger.get_logger("create_project_folders") + anatomy = Anatomy(project_name) + if basic_paths is None: + basic_paths = get_project_basic_paths(project_name) + + if not basic_paths: + return + + concat_paths = concatenate_splitted_paths(basic_paths, anatomy) + filled_paths = fill_paths(concat_paths, anatomy) + + # Create folders + for path in filled_paths: + if os.path.exists(path): + log.debug("Folder already exists: {}".format(path)) + else: + log.debug("Creating folder: {}".format(path)) + os.makedirs(path) + + +def _list_path_items(folder_structure): + output = [] + for key, value in folder_structure.items(): + if not value: + output.append(key) + continue + + paths = _list_path_items(value) + for path in paths: + if not isinstance(path, (list, tuple)): + path = [path] + + item = [key] + item.extend(path) + output.append(item) + + return output + + +def get_project_basic_paths(project_name): + project_settings = get_project_settings(project_name) + folder_structure = ( + project_settings["global"]["project_folder_structure"] + ) + if not folder_structure: + return [] + + if isinstance(folder_structure, six.string_types): + folder_structure = json.loads(folder_structure) + return _list_path_items(folder_structure) diff --git a/openpype/pipeline/publish/__init__.py b/openpype/pipeline/publish/__init__.py index af5d7c4a91..05ba1c9c33 100644 --- a/openpype/pipeline/publish/__init__.py +++ b/openpype/pipeline/publish/__init__.py @@ -1,28 +1,87 @@ +from .constants import ( + ValidatePipelineOrder, + ValidateContentsOrder, + ValidateSceneOrder, + ValidateMeshOrder, +) + from .publish_plugins import ( + AbstractMetaInstancePlugin, + AbstractMetaContextPlugin, + PublishValidationError, PublishXmlValidationError, KnownPublishError, OpenPypePyblishPluginMixin, OptionalPyblishPluginMixin, + + RepairAction, + RepairContextAction, + + Extractor, + ExtractorColormanaged, ) from .lib import ( - DiscoverResult, + get_publish_template_name, + publish_plugins_discover, load_help_content_from_plugin, load_help_content_from_filepath, + + get_errored_instances_from_context, + get_errored_plugins_from_context, + + filter_instances_for_context_plugin, + context_plugin_should_run, + get_instance_staging_dir, + get_publish_repre_path, +) + +from .abstract_expected_files import ExpectedFiles +from .abstract_collect_render import ( + RenderInstance, + AbstractCollectRender, ) __all__ = ( + "ValidatePipelineOrder", + "ValidateContentsOrder", + "ValidateSceneOrder", + "ValidateMeshOrder", + + "AbstractMetaInstancePlugin", + "AbstractMetaContextPlugin", + "PublishValidationError", "PublishXmlValidationError", "KnownPublishError", "OpenPypePyblishPluginMixin", "OptionalPyblishPluginMixin", - "DiscoverResult", + "RepairAction", + "RepairContextAction", + + "Extractor", + "ExtractorColormanaged", + + "get_publish_template_name", + "publish_plugins_discover", "load_help_content_from_plugin", "load_help_content_from_filepath", + + "get_errored_instances_from_context", + "get_errored_plugins_from_context", + + "filter_instances_for_context_plugin", + "context_plugin_should_run", + "get_instance_staging_dir", + "get_publish_repre_path", + + "ExpectedFiles", + + "RenderInstance", + "AbstractCollectRender", ) diff --git a/openpype/lib/abstract_collect_render.py b/openpype/pipeline/publish/abstract_collect_render.py similarity index 95% rename from openpype/lib/abstract_collect_render.py rename to openpype/pipeline/publish/abstract_collect_render.py index 7c768e280c..ccb2415346 100644 --- a/openpype/lib/abstract_collect_render.py +++ b/openpype/pipeline/publish/abstract_collect_render.py @@ -9,10 +9,10 @@ from abc import abstractmethod import attr import six -from avalon import api import pyblish.api -from .abstract_metaplugins import AbstractMetaContextPlugin +from openpype.pipeline import legacy_io +from .publish_plugins import AbstractMetaContextPlugin @attr.s @@ -30,6 +30,7 @@ class RenderInstance(object): source = attr.ib() # path to source scene file label = attr.ib() # label to show in GUI subset = attr.ib() # subset name + task = attr.ib() # task name asset = attr.ib() # asset name (AVALON_ASSET) attachTo = attr.ib() # subset name to attach render to setMembers = attr.ib() # list of nodes/members producing render output @@ -62,6 +63,8 @@ class RenderInstance(object): family = attr.ib(default="renderlayer") families = attr.ib(default=["renderlayer"]) # list of families + # True if should be rendered on farm, eg not integrate + farm = attr.ib(default=False) # format settings multipartExr = attr.ib(default=False) # flag for multipart exrs @@ -127,7 +130,7 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): """Constructor.""" super(AbstractCollectRender, self).__init__(*args, **kwargs) self._file_path = None - self._asset = api.Session["AVALON_ASSET"] + self._asset = legacy_io.Session["AVALON_ASSET"] self._context = None def process(self, context): @@ -138,7 +141,9 @@ class AbstractCollectRender(pyblish.api.ContextPlugin): try: if "workfile" in instance.data["families"]: instance.data["publish"] = True - if "renderFarm" in instance.data["families"]: + # TODO merge renderFarm and render.farm + if ("renderFarm" in instance.data["families"] or + "render.farm" in instance.data["families"]): instance.data["remove"] = True except KeyError: # be tolerant if 'families' is missing. diff --git a/openpype/lib/abstract_expected_files.py b/openpype/pipeline/publish/abstract_expected_files.py similarity index 100% rename from openpype/lib/abstract_expected_files.py rename to openpype/pipeline/publish/abstract_expected_files.py diff --git a/openpype/pipeline/publish/constants.py b/openpype/pipeline/publish/constants.py new file mode 100644 index 0000000000..dcd3445200 --- /dev/null +++ b/openpype/pipeline/publish/constants.py @@ -0,0 +1,7 @@ +import pyblish.api + + +ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 +ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 +ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2 +ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3 diff --git a/openpype/pipeline/publish/contants.py b/openpype/pipeline/publish/contants.py new file mode 100644 index 0000000000..169eca2e5c --- /dev/null +++ b/openpype/pipeline/publish/contants.py @@ -0,0 +1,2 @@ +DEFAULT_PUBLISH_TEMPLATE = "publish" +DEFAULT_HERO_PUBLISH_TEMPLATE = "hero" diff --git a/openpype/pipeline/publish/lib.py b/openpype/pipeline/publish/lib.py index 739b2c8806..bbc511fc5a 100644 --- a/openpype/pipeline/publish/lib.py +++ b/openpype/pipeline/publish/lib.py @@ -2,32 +2,205 @@ import os import sys import types import inspect +import copy +import tempfile import xml.etree.ElementTree import six import pyblish.plugin +import pyblish.api + +from openpype.lib import ( + Logger, + filter_profiles +) +from openpype.settings import ( + get_project_settings, + get_system_settings, +) +from openpype.pipeline import ( + tempdir +) +from openpype.pipeline.plugin_discover import DiscoverResult + +from .contants import ( + DEFAULT_PUBLISH_TEMPLATE, + DEFAULT_HERO_PUBLISH_TEMPLATE, +) -class DiscoverResult: - """Hold result of publish plugins discovery. +def get_template_name_profiles( + project_name, project_settings=None, logger=None +): + """Receive profiles for publish template keys. - Stores discovered plugins duplicated plugins and file paths which - crashed on execution of file. + At least one of arguments must be passed. + + Args: + project_name (str): Name of project where to look for templates. + project_settings(Dic[str, Any]): Prepared project settings. + + Returns: + List[Dict[str, Any]]: Publish template profiles. """ - def __init__(self): - self.plugins = [] - self.crashed_file_paths = {} - self.duplicated_plugins = [] - def __iter__(self): - for plugin in self.plugins: - yield plugin + if not project_name and not project_settings: + raise ValueError(( + "Both project name and project settings are missing." + " At least one must be entered." + )) - def __getitem__(self, item): - return self.plugins[item] + if not project_settings: + project_settings = get_project_settings(project_name) - def __setitem__(self, item, value): - self.plugins[item] = value + profiles = ( + project_settings + ["global"] + ["tools"] + ["publish"] + ["template_name_profiles"] + ) + if profiles: + return copy.deepcopy(profiles) + + # Use legacy approach for cases new settings are not filled yet for the + # project + legacy_profiles = ( + project_settings + ["global"] + ["publish"] + ["IntegrateAssetNew"] + ["template_name_profiles"] + ) + if legacy_profiles: + if not logger: + logger = Logger.get_logger("get_template_name_profiles") + + logger.warning(( + "Project \"{}\" is using legacy access to publish template." + " It is recommended to move settings to new location" + " 'project_settings/global/tools/publish/template_name_profiles'." + ).format(project_name)) + + # Replace "tasks" key with "task_names" + profiles = [] + for profile in copy.deepcopy(legacy_profiles): + profile["task_names"] = profile.pop("tasks", []) + profiles.append(profile) + return profiles + + +def get_hero_template_name_profiles( + project_name, project_settings=None, logger=None +): + """Receive profiles for hero publish template keys. + + At least one of arguments must be passed. + + Args: + project_name (str): Name of project where to look for templates. + project_settings(Dic[str, Any]): Prepared project settings. + + Returns: + List[Dict[str, Any]]: Publish template profiles. + """ + + if not project_name and not project_settings: + raise ValueError(( + "Both project name and project settings are missing." + " At least one must be entered." + )) + + if not project_settings: + project_settings = get_project_settings(project_name) + + profiles = ( + project_settings + ["global"] + ["tools"] + ["publish"] + ["hero_template_name_profiles"] + ) + if profiles: + return copy.deepcopy(profiles) + + # Use legacy approach for cases new settings are not filled yet for the + # project + legacy_profiles = copy.deepcopy( + project_settings + ["global"] + ["publish"] + ["IntegrateHeroVersion"] + ["template_name_profiles"] + ) + if legacy_profiles: + if not logger: + logger = Logger.get_logger("get_hero_template_name_profiles") + + logger.warning(( + "Project \"{}\" is using legacy access to hero publish template." + " It is recommended to move settings to new location" + " 'project_settings/global/tools/publish/" + "hero_template_name_profiles'." + ).format(project_name)) + return legacy_profiles + + +def get_publish_template_name( + project_name, + host_name, + family, + task_name, + task_type, + project_settings=None, + hero=False, + logger=None +): + """Get template name which should be used for passed context. + + Publish templates are filtered by host name, family, task name and + task type. + + Default template which is used at if profiles are not available or profile + has empty value is defined by 'DEFAULT_PUBLISH_TEMPLATE' constant. + + Args: + project_name (str): Name of project where to look for settings. + host_name (str): Name of host integration. + family (str): Family for which should be found template. + task_name (str): Task name on which is intance working. + task_type (str): Task type on which is intance working. + project_setting (Dict[str, Any]): Prepared project settings. + logger (logging.Logger): Custom logger used for 'filter_profiles' + function. + + Returns: + str: Template name which should be used for integration. + """ + + template = None + filter_criteria = { + "hosts": host_name, + "families": family, + "task_names": task_name, + "task_types": task_type, + } + if hero: + default_template = DEFAULT_HERO_PUBLISH_TEMPLATE + profiles = get_hero_template_name_profiles( + project_name, project_settings, logger + ) + + else: + profiles = get_template_name_profiles( + project_name, project_settings, logger + ) + default_template = DEFAULT_PUBLISH_TEMPLATE + + profile = filter_profiles(profiles, filter_criteria, logger=logger) + if profile: + template = profile["template_name"] + return template or default_template class HelpContent: @@ -97,7 +270,7 @@ def publish_plugins_discover(paths=None): """ # The only difference with `pyblish.api.discover` - result = DiscoverResult() + result = DiscoverResult(pyblish.api.Plugin) plugins = dict() plugin_names = [] @@ -180,3 +353,337 @@ def publish_plugins_discover(paths=None): result.plugins = plugins return result + + +def filter_pyblish_plugins(plugins): + """Pyblish plugin filter which applies OpenPype settings. + + Apply OpenPype settings on discovered plugins. On plugin with implemented + class method 'def apply_settings(cls, project_settings, system_settings)' + is called the method. Default behavior looks for plugin name and current + host name to look for + + Args: + plugins (List[pyblish.plugin.Plugin]): Discovered plugins on which + are applied settings. + """ + + log = Logger.get_logger("filter_pyblish_plugins") + + # TODO: Don't use host from 'pyblish.api' but from defined host by us. + # - kept becau on farm is probably used host 'shell' which propably + # affect how settings are applied there + host = pyblish.api.current_host() + project_name = os.environ.get("AVALON_PROJECT") + + project_setting = get_project_settings(project_name) + system_settings = get_system_settings() + + # iterate over plugins + for plugin in plugins[:]: + if hasattr(plugin, "apply_settings"): + try: + # Use classmethod 'apply_settings' + # - can be used to target settings from custom settings place + # - skip default behavior when successful + plugin.apply_settings(project_setting, system_settings) + continue + + except Exception: + log.warning( + ( + "Failed to apply settings on plugin {}" + ).format(plugin.__name__), + exc_info=True + ) + + try: + config_data = ( + project_setting + [host] + ["publish"] + [plugin.__name__] + ) + except KeyError: + # host determined from path + file = os.path.normpath(inspect.getsourcefile(plugin)) + file = os.path.normpath(file) + + split_path = file.split(os.path.sep) + if len(split_path) < 4: + log.warning( + 'plugin path too short to extract host {}'.format(file) + ) + continue + + host_from_file = split_path[-4] + plugin_kind = split_path[-2] + + # TODO: change after all plugins are moved one level up + if host_from_file == "openpype": + host_from_file = "global" + + try: + config_data = ( + project_setting + [host_from_file] + [plugin_kind] + [plugin.__name__] + ) + except KeyError: + continue + + for option, value in config_data.items(): + if option == "enabled" and value is False: + log.info('removing plugin {}'.format(plugin.__name__)) + plugins.remove(plugin) + else: + log.info('setting {}:{} on plugin {}'.format( + option, value, plugin.__name__)) + + setattr(plugin, option, value) + + +def find_close_plugin(close_plugin_name, log): + if close_plugin_name: + plugins = pyblish.api.discover() + for plugin in plugins: + if plugin.__name__ == close_plugin_name: + return plugin + + log.debug("Close plugin not found, app might not close.") + + +def remote_publish(log, close_plugin_name=None, raise_error=False): + """Loops through all plugins, logs to console. Used for tests. + + Args: + log (openpype.lib.Logger) + close_plugin_name (str): name of plugin with responsibility to + close host app + """ + # Error exit as soon as any error occurs. + error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" + + close_plugin = find_close_plugin(close_plugin_name, log) + + for result in pyblish.util.publish_iter(): + for record in result["records"]: + log.info("{}: {}".format( + result["plugin"].label, record.msg)) + + if result["error"]: + error_message = error_format.format(**result) + log.error(error_message) + if close_plugin: # close host app explicitly after error + context = pyblish.api.Context() + close_plugin().process(context) + if raise_error: + # Fatal Error is because of Deadline + error_message = "Fatal Error: " + error_format.format(**result) + raise RuntimeError(error_message) + + +def get_errored_instances_from_context(context): + """Collect failed instances from pyblish context. + + Args: + context (pyblish.api.Context): Publish context where we're looking + for failed instances. + + Returns: + List[pyblish.lib.Instance]: Instances which failed during processing. + """ + + instances = list() + for result in context.data["results"]: + if result["instance"] is None: + # When instance is None we are on the "context" result + continue + + if result["error"]: + instances.append(result["instance"]) + + return instances + + +def get_errored_plugins_from_context(context): + """Collect failed plugins from pyblish context. + + Args: + context (pyblish.api.Context): Publish context where we're looking + for failed plugins. + + Returns: + List[pyblish.api.Plugin]: Plugins which failed during processing. + """ + + plugins = list() + results = context.data.get("results", []) + for result in results: + if result["success"] is True: + continue + plugins.append(result["plugin"]) + + return plugins + + +def filter_instances_for_context_plugin(plugin, context): + """Filter instances on context by context plugin filters. + + This is for cases when context plugin need similar filtering like instance + plugin have, but for some reason must run on context or should find out + if there is at least one instance with a family. + + Args: + plugin (pyblish.api.Plugin): Plugin with filters. + context (pyblish.api.Context): Pyblish context with insances. + + Returns: + Iterator[pyblish.lib.Instance]: Iteration of valid instances. + """ + + instances = [] + plugin_families = set() + all_families = False + if plugin.families: + instances = context + plugin_families = set(plugin.families) + all_families = "*" in plugin_families + + for instance in instances: + # Ignore inactive instances + if ( + not instance.data.get("publish", True) + or not instance.data.get("active", True) + ): + continue + + family = instance.data.get("family") + families = instance.data.get("families") or [] + if ( + all_families + or (family and family in plugin_families) + or any(f in plugin_families for f in families) + ): + yield instance + + +def context_plugin_should_run(plugin, context): + """Return whether the ContextPlugin should run on the given context. + + This is a helper function to work around a bug pyblish-base#250 + Whenever a ContextPlugin sets specific families it will still trigger even + when no instances are present that have those families. + + This actually checks it correctly and returns whether it should run. + + Args: + plugin (pyblish.api.Plugin): Plugin with filters. + context (pyblish.api.Context): Pyblish context with instances. + + Returns: + bool: Context plugin should run based on valid instances. + """ + + for _ in filter_instances_for_context_plugin(plugin, context): + return True + return False + + +def get_instance_staging_dir(instance): + """Unified way how staging dir is stored and created on instances. + + First check if 'stagingDir' is already set in instance data. + In case there already is new tempdir will not be created. + + It also supports `OPENPYPE_TMPDIR`, so studio can define own temp + shared repository per project or even per more granular context. + Template formatting is supported also with optional keys. Folder is + created in case it doesn't exists. + + Available anatomy formatting keys: + - root[work | ] + - project[name | code] + + Note: + Staging dir does not have to be necessarily in tempdir so be careful + about its usage. + + Args: + instance (pyblish.lib.Instance): Instance for which we want to get + staging dir. + + Returns: + str: Path to staging dir of instance. + """ + staging_dir = instance.data.get('stagingDir') + if staging_dir: + return staging_dir + + anatomy = instance.context.data.get("anatomy") + + # get customized tempdir path from `OPENPYPE_TMPDIR` env var + custom_temp_dir = tempdir.create_custom_tempdir( + anatomy.project_name, anatomy) + + if custom_temp_dir: + staging_dir = os.path.normpath( + tempfile.mkdtemp( + prefix="pyblish_tmp_", + dir=custom_temp_dir + ) + ) + else: + staging_dir = os.path.normpath( + tempfile.mkdtemp(prefix="pyblish_tmp_") + ) + instance.data['stagingDir'] = staging_dir + + return staging_dir + + +def get_publish_repre_path(instance, repre, only_published=False): + """Get representation path that can be used for integration. + + When 'only_published' is set to true the validation of path is not + relevant. In that case we just need what is set in 'published_path' + as "reference". The reference is not used to get or upload the file but + for reference where the file was published. + + Args: + instance (pyblish.Instance): Processed instance object. Used + for source of staging dir if representation does not have + filled it. + repre (dict): Representation on instance which could be and + could not be integrated with main integrator. + only_published (bool): Care only about published paths and + ignore if filepath is not existing anymore. + + Returns: + str: Path to representation file. + None: Path is not filled or does not exists. + """ + + published_path = repre.get("published_path") + if published_path: + published_path = os.path.normpath(published_path) + if os.path.exists(published_path): + return published_path + + if only_published: + return published_path + + comp_files = repre["files"] + if isinstance(comp_files, (tuple, list, set)): + filename = comp_files[0] + else: + filename = comp_files + + staging_dir = repre.get("stagingDir") + if not staging_dir: + staging_dir = get_instance_staging_dir(instance) + src_path = os.path.normpath(os.path.join(staging_dir, filename)) + if os.path.exists(src_path): + return src_path + return None diff --git a/openpype/pipeline/publish/publish_plugins.py b/openpype/pipeline/publish/publish_plugins.py index 2402a005c2..e2ae893aa9 100644 --- a/openpype/pipeline/publish/publish_plugins.py +++ b/openpype/pipeline/publish/publish_plugins.py @@ -1,5 +1,31 @@ +import inspect +from abc import ABCMeta +from pprint import pformat +import pyblish.api +from pyblish.plugin import MetaPlugin, ExplicitMetaPlugin + from openpype.lib import BoolDef -from .lib import load_help_content_from_plugin + +from .lib import ( + load_help_content_from_plugin, + get_errored_instances_from_context, + get_errored_plugins_from_context, + get_instance_staging_dir, +) + +from openpype.pipeline.colorspace import ( + get_imageio_colorspace_from_filepath, + get_imageio_config, + get_imageio_file_rules +) + + +class AbstractMetaInstancePlugin(ABCMeta, MetaPlugin): + pass + + +class AbstractMetaContextPlugin(ABCMeta, ExplicitMetaPlugin): + pass class PublishValidationError(Exception): @@ -16,6 +42,7 @@ class PublishValidationError(Exception): description(str): Detailed description of an error. It is possible to use Markdown syntax. """ + def __init__(self, message, title=None, description=None, detail=None): self.message = message self.title = title or "< Missing title >" @@ -49,6 +76,7 @@ class KnownPublishError(Exception): Message will be shown in UI for artist. """ + pass @@ -90,8 +118,9 @@ class OpenPypePyblishPluginMixin: Attributes available for all families in plugin's `families` attribute. Returns: - list: Attribute definitions for plugin. + list: Attribute definitions for plugin. """ + return [] @classmethod @@ -110,17 +139,33 @@ class OpenPypePyblishPluginMixin: ) return attribute_values + @staticmethod + def get_attr_values_from_data_for_plugin(plugin, data): + """Get attribute values for attribute definitions from data. + + Args: + plugin (Union[publish.api.Plugin, Type[publish.api.Plugin]]): The + plugin for which attributes are extracted. + data(dict): Data from instance or context. + """ + + if not inspect.isclass(plugin): + plugin = plugin.__class__ + + return ( + data + .get("publish_attributes", {}) + .get(plugin.__name__, {}) + ) + def get_attr_values_from_data(self, data): """Get attribute values for attribute definitions from data. Args: data(dict): Data from instance or context. """ - return ( - data - .get("publish_attributes", {}) - .get(self.__class__.__name__, {}) - ) + + return self.get_attr_values_from_data_for_plugin(self.__class__, data) class OptionalPyblishPluginMixin(OpenPypePyblishPluginMixin): @@ -170,3 +215,214 @@ class OptionalPyblishPluginMixin(OpenPypePyblishPluginMixin): if active is None: active = getattr(self, "active", True) return active + + +class RepairAction(pyblish.api.Action): + """Repairs the action + + To process the repairing this requires a static `repair(instance)` method + is available on the plugin. + """ + + label = "Repair" + on = "failed" # This action is only available on a failed plug-in + icon = "wrench" # Icon from Awesome Icon + + def process(self, context, plugin): + if not hasattr(plugin, "repair"): + raise RuntimeError("Plug-in does not have repair method.") + + # Get the errored instances + self.log.info("Finding failed instances..") + errored_instances = get_errored_instances_from_context(context) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(errored_instances, plugin) + for instance in instances: + plugin.repair(instance) + + +class RepairContextAction(pyblish.api.Action): + """Repairs the action + + To process the repairing this requires a static `repair(instance)` method + is available on the plugin. + """ + + label = "Repair" + on = "failed" # This action is only available on a failed plug-in + + def process(self, context, plugin): + if not hasattr(plugin, "repair"): + raise RuntimeError("Plug-in does not have repair method.") + + # Get the failed instances + self.log.info("Finding failed instances..") + failed_plugins = get_errored_plugins_from_context(context) + + # Apply pyblish.logic to get the instances for the plug-in + if plugin in failed_plugins: + self.log.info("Attempting fix ...") + plugin.repair(context) + + +class Extractor(pyblish.api.InstancePlugin): + """Extractor base class. + + The extractor base class implements a "staging_dir" function used to + generate a temporary directory for an instance to extract to. + + This temporary directory is generated through `tempfile.mkdtemp()` + + """ + + order = 2.0 + + def staging_dir(self, instance): + """Provide a temporary directory in which to store extracted files + + Upon calling this method the staging directory is stored inside + the instance.data['stagingDir'] + """ + + return get_instance_staging_dir(instance) + + +class ExtractorColormanaged(Extractor): + """Extractor base for color managed image data. + + Each Extractor intended to export pixel data representation + should inherit from this class to allow color managed data. + Class implements "get_colorspace_settings" and + "set_representation_colorspace" functions used + for injecting colorspace data to representation data for farther + integration into db document. + + """ + + allowed_ext = [ + "cin", "dpx", "avi", "dv", "gif", "flv", "mkv", "mov", "mpg", "mpeg", + "mp4", "m4v", "mxf", "iff", "z", "ifl", "jpeg", "jpg", "jfif", "lut", + "1dl", "exr", "pic", "png", "ppm", "pnm", "pgm", "pbm", "rla", "rpf", + "sgi", "rgba", "rgb", "bw", "tga", "tiff", "tif", "img" + ] + + @staticmethod + def get_colorspace_settings(context): + """Retuns solved settings for the host context. + + Args: + context (publish.Context): publishing context + + Returns: + tuple | bool: config, file rules or None + """ + if "imageioSettings" in context.data: + return context.data["imageioSettings"] + + project_name = context.data["projectName"] + host_name = context.data["hostName"] + anatomy_data = context.data["anatomyData"] + project_settings_ = context.data["project_settings"] + + config_data = get_imageio_config( + project_name, host_name, + project_settings=project_settings_, + anatomy_data=anatomy_data + ) + file_rules = get_imageio_file_rules( + project_name, host_name, + project_settings=project_settings_ + ) + + # caching settings for future instance processing + context.data["imageioSettings"] = (config_data, file_rules) + + return config_data, file_rules + + def set_representation_colorspace( + self, representation, context, + colorspace=None, + colorspace_settings=None + ): + """Sets colorspace data to representation. + + Args: + representation (dict): publishing representation + context (publish.Context): publishing context + config_data (dict): host resolved config data + file_rules (dict): host resolved file rules data + colorspace (str, optional): colorspace name. Defaults to None. + colorspace_settings (tuple[dict, dict], optional): + Settings for config_data and file_rules. + Defaults to None. + + Example: + ``` + { + # for other publish plugins and loaders + "colorspace": "linear", + "config": { + # for future references in case need + "path": "/abs/path/to/config.ocio", + # for other plugins within remote publish cases + "template": "{project[root]}/path/to/config.ocio" + } + } + ``` + + """ + ext = representation["ext"] + # check extension + self.log.debug("__ ext: `{}`".format(ext)) + if ext.lower() not in self.allowed_ext: + return + + if colorspace_settings is None: + colorspace_settings = self.get_colorspace_settings(context) + + # unpack colorspace settings + config_data, file_rules = colorspace_settings + + if not config_data: + # warn in case no colorspace path was defined + self.log.warning("No colorspace management was defined") + return + + self.log.info("Config data is : `{}`".format( + config_data)) + + project_name = context.data["projectName"] + host_name = context.data["hostName"] + project_settings = context.data["project_settings"] + + # get one filename + filename = representation["files"] + if isinstance(filename, list): + filename = filename[0] + + self.log.debug("__ filename: `{}`".format( + filename)) + + # get matching colorspace from rules + colorspace = colorspace or get_imageio_colorspace_from_filepath( + filename, host_name, project_name, + config_data=config_data, + file_rules=file_rules, + project_settings=project_settings + ) + self.log.debug("__ colorspace: `{}`".format( + colorspace)) + + # infuse data to representation + if colorspace: + colorspace_data = { + "colorspace": colorspace, + "config": config_data + } + + # update data key + representation["colorspaceData"] = colorspace_data + + self.log.debug("__ colorspace_data: `{}`".format( + pformat(colorspace_data))) diff --git a/openpype/pipeline/schema.py b/openpype/pipeline/schema.py new file mode 100644 index 0000000000..7e96bfe1b1 --- /dev/null +++ b/openpype/pipeline/schema.py @@ -0,0 +1,137 @@ +"""Wrapper around :mod:`jsonschema` + +Schemas are implicitly loaded from the /schema directory of this project. + +Attributes: + _cache: Cache of previously loaded schemas + +Resources: + http://json-schema.org/ + http://json-schema.org/latest/json-schema-core.html + http://spacetelescope.github.io/understanding-json-schema/index.html + +""" + +import os +import re +import json +import logging + +import jsonschema +import six + +log_ = logging.getLogger(__name__) + +ValidationError = jsonschema.ValidationError +SchemaError = jsonschema.SchemaError + +_CACHED = False + + +def get_schema_version(schema_name): + """Extract version form schema name. + + It is expected that schema name contain only major and minor version. + + Expected name should match to: + "{name}:{type}-{major version}.{minor version}" + - `name` - must not contain colon + - `type` - must not contain dash + - major and minor versions must be numbers separated by dot + + Args: + schema_name(str): Name of schema that should be parsed. + + Returns: + tuple: Contain two values major version as first and minor version as + second. When schema does not match parsing regex then `(0, 0)` is + returned. + """ + schema_regex = re.compile(r"[^:]+:[^-]+-(\d.\d)") + groups = schema_regex.findall(schema_name) + if not groups: + return 0, 0 + + maj_version, min_version = groups[0].split(".") + return int(maj_version), int(min_version) + + +def validate(data, schema=None): + """Validate `data` with `schema` + + Arguments: + data (dict): JSON-compatible data + schema (str): DEPRECATED Name of schema. Now included in the data. + + Raises: + ValidationError on invalid schema + + """ + if not _CACHED: + _precache() + + root, schema = data["schema"].rsplit(":", 1) + # assert root in ( + # "mindbender-core", # Backwards compatiblity + # "avalon-core", + # "pype" + # ) + + if isinstance(schema, six.string_types): + schema = _cache[schema + ".json"] + + resolver = jsonschema.RefResolver( + "", + None, + store=_cache, + cache_remote=True + ) + + jsonschema.validate(data, + schema, + types={"array": (list, tuple)}, + resolver=resolver) + + +_cache = { + # A mock schema for docstring tests + "_doctest.json": { + "$schema": "http://json-schema.org/schema#", + + "title": "_doctest", + "description": "A test schema", + + "type": "object", + + "additionalProperties": False, + + "required": ["key"], + + "properties": { + "key": { + "description": "A test key", + "type": "string" + } + } + } +} + + +def _precache(): + """Store available schemas in-memory for reduced disk access""" + global _CACHED + + repos_root = os.environ["OPENPYPE_REPOS_ROOT"] + schema_dir = os.path.join(repos_root, "schema") + + for schema in os.listdir(schema_dir): + if schema.startswith(("_", ".")): + continue + if not schema.endswith(".json"): + continue + if not os.path.isfile(os.path.join(schema_dir, schema)): + continue + with open(os.path.join(schema_dir, schema)) as f: + log_.debug("Installing schema '%s'.." % schema) + _cache[schema] = json.load(f) + _CACHED = True diff --git a/openpype/pipeline/tempdir.py b/openpype/pipeline/tempdir.py new file mode 100644 index 0000000000..55a1346b08 --- /dev/null +++ b/openpype/pipeline/tempdir.py @@ -0,0 +1,59 @@ +""" +Temporary folder operations +""" + +import os +from openpype.lib import StringTemplate +from openpype.pipeline import Anatomy + + +def create_custom_tempdir(project_name, anatomy=None): + """ Create custom tempdir + + Template path formatting is supporting: + - optional key formatting + - available keys: + - root[work | ] + - project[name | code] + + Args: + project_name (str): project name + anatomy (openpype.pipeline.Anatomy)[optional]: Anatomy object + + Returns: + str | None: formatted path or None + """ + openpype_tempdir = os.getenv("OPENPYPE_TMPDIR") + if not openpype_tempdir: + return + + custom_tempdir = None + if "{" in openpype_tempdir: + if anatomy is None: + anatomy = Anatomy(project_name) + # create base formate data + data = { + "root": anatomy.roots, + "project": { + "name": anatomy.project_name, + "code": anatomy.project_code, + } + } + # path is anatomy template + custom_tempdir = StringTemplate.format_template( + openpype_tempdir, data).normalized() + + else: + # path is absolute + custom_tempdir = openpype_tempdir + + # create the dir path if it doesn't exists + if not os.path.exists(custom_tempdir): + try: + # create it if it doesn't exists + os.makedirs(custom_tempdir) + except IOError as error: + raise IOError( + "Path couldn't be created: {}".format(error)) + + return custom_tempdir diff --git a/openpype/pipeline/template_data.py b/openpype/pipeline/template_data.py new file mode 100644 index 0000000000..627eba5c3d --- /dev/null +++ b/openpype/pipeline/template_data.py @@ -0,0 +1,238 @@ +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_system_settings +from openpype.lib.local_settings import get_openpype_username + + +def get_general_template_data(system_settings=None): + """General template data based on system settings or machine. + + Output contains formatting keys: + - 'studio[name]' - Studio name filled from system settings + - 'studio[code]' - Studio code filled from system settings + - 'user' - User's name using 'get_openpype_username' + + Args: + system_settings (Dict[str, Any]): System settings. + """ + + if not system_settings: + system_settings = get_system_settings() + studio_name = system_settings["general"]["studio_name"] + studio_code = system_settings["general"]["studio_code"] + return { + "studio": { + "name": studio_name, + "code": studio_code + }, + "user": get_openpype_username() + } + + +def get_project_template_data(project_doc=None, project_name=None): + """Extract data from project document that are used in templates. + + Project document must have 'name' and (at this moment) optional + key 'data.code'. + + One of 'project_name' or 'project_doc' must be passed. With prepared + project document is function much faster because don't have to query. + + Output contains formatting keys: + - 'project[name]' - Project name + - 'project[code]' - Project code + + Args: + project_doc (Dict[str, Any]): Queried project document. + project_name (str): Name of project. + + Returns: + Dict[str, Dict[str, str]]: Template data based on project document. + """ + + if not project_name: + project_name = project_doc["name"] + + if not project_doc: + project_doc = get_project(project_name, fields=["data.code"]) + + project_code = project_doc.get("data", {}).get("code") + return { + "project": { + "name": project_name, + "code": project_code + } + } + + +def get_asset_template_data(asset_doc, project_name): + """Extract data from asset document that are used in templates. + + Output dictionary contains keys: + - 'asset' - asset name + - 'hierarchy' - parent asset names joined with '/' + - 'parent' - direct parent name, project name used if is under project + + Required document fields: + Asset: 'name', 'data.parents' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + project_name (str): Is used for 'parent' key if asset doc does not have + any. + + Returns: + Dict[str, str]: Data that are based on asset document and can be used + in templates. + """ + + asset_parents = asset_doc["data"]["parents"] + hierarchy = "/".join(asset_parents) + if asset_parents: + parent_name = asset_parents[-1] + else: + parent_name = project_name + + return { + "asset": asset_doc["name"], + "hierarchy": hierarchy, + "parent": parent_name + } + + +def get_task_type(asset_doc, task_name): + """Get task type based on asset document and task name. + + Required document fields: + Asset: 'data.tasks' + + Args: + asset_doc (Dict[str, Any]): Queried asset document. + task_name (str): Task name which is under asset. + + Returns: + str: Task type name. + None: Task was not found on asset document. + """ + + asset_tasks_info = asset_doc["data"]["tasks"] + return asset_tasks_info.get(task_name, {}).get("type") + + +def get_task_template_data(project_doc, asset_doc, task_name): + """"Extract task specific data from project and asset documents. + + Required document fields: + Project: 'config.tasks' + Asset: 'data.tasks'. + + Args: + project_doc (Dict[str, Any]): Queried project document. + asset_doc (Dict[str, Any]): Queried asset document. + tas_name (str): Name of task for which data should be returned. + + Returns: + Dict[str, Dict[str, str]]: Template data + """ + + project_task_types = project_doc["config"]["tasks"] + task_type = get_task_type(asset_doc, task_name) + task_code = project_task_types.get(task_type, {}).get("short_name") + + return { + "task": { + "name": task_name, + "type": task_type, + "short": task_code, + } + } + + +def get_template_data( + project_doc, + asset_doc=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered documents and info. + + This function does not "auto fill" any values except system settings and + it's on purpose. + + Universal function to receive template data from passed arguments. Only + required argument is project document all other arguments are optional + and their values won't be added to template data if are not passed. + + Required document fields: + Project: 'name', 'data.code', 'config.tasks' + Asset: 'name', 'data.parents', 'data.tasks' + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]): Used to fill '{app}' key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed (may be slower). + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + template_data = get_general_template_data(system_settings) + template_data.update(get_project_template_data(project_doc)) + if asset_doc: + template_data.update(get_asset_template_data( + asset_doc, project_doc["name"] + )) + if task_name: + template_data.update(get_task_template_data( + project_doc, asset_doc, task_name + )) + + if host_name: + template_data["app"] = host_name + + return template_data + + +def get_template_data_with_names( + project_name, + asset_name=None, + task_name=None, + host_name=None, + system_settings=None +): + """Prepare data for templates filling from entered entity names and info. + + Copy of 'get_template_data' but based on entity names instead of documents. + Only difference is that documents are queried. + + Args: + project_name (str): Project name for which template data are + calculated. + asset_name (Union[str, None]): Asset name for which template data are + calculated. + task_name (Union[str, None]): Task name under passed asset. + host_name (Union[str, None]):Used to fill '{app}' key. + because workdir template may contain `{app}` key. + system_settings (Union[Dict, None]): Prepared system settings. + They're queried if not passed. + + Returns: + Dict[str, Any]: Data prepared for filling workdir template. + """ + + project_doc = get_project( + project_name, fields=["name", "data.code", "config.tasks"] + ) + asset_doc = None + if asset_name: + asset_doc = get_asset_by_name( + project_name, + asset_name, + fields=["name", "data.parents", "data.tasks"] + ) + return get_template_data( + project_doc, asset_doc, task_name, host_name, system_settings + ) diff --git a/openpype/pipeline/thumbnail.py b/openpype/pipeline/thumbnail.py index 12bab83be6..39f3e17893 100644 --- a/openpype/pipeline/thumbnail.py +++ b/openpype/pipeline/thumbnail.py @@ -2,6 +2,14 @@ import os import copy import logging +from openpype.client import get_project +from . import legacy_io +from .anatomy import Anatomy +from .plugin_discover import ( + discover, + register_plugin, + register_plugin_path, +) log = logging.getLogger(__name__) @@ -12,8 +20,7 @@ def get_thumbnail_binary(thumbnail_entity, thumbnail_type, dbcon=None): resolvers = discover_thumbnail_resolvers() resolvers = sorted(resolvers, key=lambda cls: cls.priority) if dbcon is None: - from avalon import io - dbcon = io + dbcon = legacy_io for Resolver in resolvers: available_types = Resolver.thumbnail_types @@ -67,26 +74,22 @@ class ThumbnailResolver(object): class TemplateResolver(ThumbnailResolver): - priority = 90 def process(self, thumbnail_entity, thumbnail_type): - - if not os.environ.get("AVALON_THUMBNAIL_ROOT"): - return - template = thumbnail_entity["data"].get("template") if not template: self.log.debug("Thumbnail entity does not have set template") return - project = self.dbcon.find_one( - {"type": "project"}, - { - "name": True, - "data.code": True - } - ) + thumbnail_root_format_key = "{thumbnail_root}" + thumbnail_root = os.environ.get("AVALON_THUMBNAIL_ROOT") or "" + # Check if template require thumbnail root and if is avaiable + if thumbnail_root_format_key in template and not thumbnail_root: + return + + project_name = self.dbcon.active_project() + project = get_project(project_name, fields=["name", "data.code"]) template_data = copy.deepcopy( thumbnail_entity["data"].get("template_data") or {} @@ -94,12 +97,16 @@ class TemplateResolver(ThumbnailResolver): template_data.update({ "_id": str(thumbnail_entity["_id"]), "thumbnail_type": thumbnail_type, - "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "thumbnail_root": thumbnail_root, "project": { "name": project["name"], "code": project["data"].get("code") - } + }, }) + # Add anatomy roots if is in template + if "{root" in template: + anatomy = Anatomy(project_name) + template_data["root"] = anatomy.roots try: filepath = os.path.normpath(template.format(**template_data)) @@ -126,21 +133,15 @@ class BinaryThumbnail(ThumbnailResolver): # Thumbnail resolvers def discover_thumbnail_resolvers(): - import avalon.api - - return avalon.api.discover(ThumbnailResolver) + return discover(ThumbnailResolver) def register_thumbnail_resolver(plugin): - import avalon.api - - return avalon.api.register_plugin(ThumbnailResolver, plugin) + register_plugin(ThumbnailResolver, plugin) def register_thumbnail_resolver_path(path): - import avalon.api - - return avalon.api.register_plugin_path(ThumbnailResolver, path) + register_plugin_path(ThumbnailResolver, path) register_thumbnail_resolver(TemplateResolver) diff --git a/openpype/pipeline/workfile/__init__.py b/openpype/pipeline/workfile/__init__.py new file mode 100644 index 0000000000..94ecc81bd6 --- /dev/null +++ b/openpype/pipeline/workfile/__init__.py @@ -0,0 +1,34 @@ +from .path_resolving import ( + get_workfile_template_key_from_context, + get_workfile_template_key, + get_workdir_with_workdir_data, + get_workdir, + + get_last_workfile_with_version, + get_last_workfile, + + get_custom_workfile_template, + get_custom_workfile_template_by_string_context, + + create_workdir_extra_folders, +) + +from .build_workfile import BuildWorkfile + + +__all__ = ( + "get_workfile_template_key_from_context", + "get_workfile_template_key", + "get_workdir_with_workdir_data", + "get_workdir", + + "get_last_workfile_with_version", + "get_last_workfile", + + "get_custom_workfile_template", + "get_custom_workfile_template_by_string_context", + + "create_workdir_extra_folders", + + "BuildWorkfile", +) diff --git a/openpype/pipeline/workfile/build_workfile.py b/openpype/pipeline/workfile/build_workfile.py new file mode 100644 index 0000000000..26b17fa151 --- /dev/null +++ b/openpype/pipeline/workfile/build_workfile.py @@ -0,0 +1,706 @@ +"""Workfile build based on settings. + +Workfile builder will do stuff based on project settings. Advantage is that +it need only access to settings. Disadvantage is that it is hard to focus +build per context and being explicit about loaded content. + +For more explicit workfile build is recommended 'AbstractTemplateBuilder' +from '~/openpype/pipeline/workfile/workfile_template_builder'. Which gives +more abilities to define how build happens but require more code to achive it. +""" + +import os +import re +import collections +import json + +from openpype.client import ( + get_asset_by_name, + get_subsets, + get_last_versions, + get_representations, + get_linked_assets, +) +from openpype.settings import get_project_settings +from openpype.lib import ( + filter_profiles, + Logger, +) +from openpype.pipeline import legacy_io +from openpype.pipeline.load import ( + discover_loader_plugins, + IncompatibleLoaderError, + load_container, +) + + +class BuildWorkfile: + """Wrapper for build workfile process. + + Load representations for current context by build presets. Build presets + are host related, since each host has it's loaders. + """ + + _log = None + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(self.__class__.__name__) + return self._log + + @staticmethod + def map_subsets_by_family(subsets): + subsets_by_family = collections.defaultdict(list) + for subset in subsets: + family = subset["data"].get("family") + if not family: + families = subset["data"].get("families") + if not families: + continue + family = families[0] + + subsets_by_family[family].append(subset) + return subsets_by_family + + def process(self): + """Main method of this wrapper. + + Building of workfile is triggered and is possible to implement + post processing of loaded containers if necessary. + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + return self.build_workfile() + + def build_workfile(self): + """Prepares and load containers into workfile. + + Loads latest versions of current and linked assets to workfile by logic + stored in Workfile profiles from presets. Profiles are set by host, + filtered by current task name and used by families. + + Each family can specify representation names and loaders for + representations and first available and successful loaded + representation is returned as container. + + At the end you'll get list of loaded containers per each asset. + + loaded_containers [{ + "asset_entity": , + "containers": [, , ...] + }, { + "asset_entity": , + "containers": [, ...] + }, { + ... + }] + + Returns: + List[Dict[str, Any]]: Loaded containers during build. + """ + + loaded_containers = [] + + # Get current asset name and entity + project_name = legacy_io.active_project() + current_asset_name = legacy_io.Session["AVALON_ASSET"] + current_asset_entity = get_asset_by_name( + project_name, current_asset_name + ) + # Skip if asset was not found + if not current_asset_entity: + print("Asset entity with name `{}` was not found".format( + current_asset_name + )) + return loaded_containers + + # Prepare available loaders + loaders_by_name = {} + for loader in discover_loader_plugins(): + if not loader.enabled: + continue + loader_name = loader.__name__ + if loader_name in loaders_by_name: + raise KeyError( + "Duplicated loader name {0}!".format(loader_name) + ) + loaders_by_name[loader_name] = loader + + # Skip if there are any loaders + if not loaders_by_name: + self.log.warning("There are no registered loaders.") + return loaded_containers + + # Get current task name + current_task_name = legacy_io.Session["AVALON_TASK"] + + # Load workfile presets for task + self.build_presets = self.get_build_presets( + current_task_name, current_asset_entity + ) + + # Skip if there are any presets for task + if not self.build_presets: + self.log.warning( + "Current task `{}` does not have any loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + # Get presets for loading current asset + current_context_profiles = self.build_presets.get("current_context") + # Get presets for loading linked assets + link_context_profiles = self.build_presets.get("linked_assets") + # Skip if both are missing + if not current_context_profiles and not link_context_profiles: + self.log.warning( + "Current task `{}` has empty loading preset.".format( + current_task_name + ) + ) + return loaded_containers + + elif not current_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any loading" + " preset for it's context." + ).format(current_task_name)) + + elif not link_context_profiles: + self.log.warning(( + "Current task `{}` doesn't have any" + "loading preset for it's linked assets." + ).format(current_task_name)) + + # Prepare assets to process by workfile presets + assets = [] + current_asset_id = None + if current_context_profiles: + # Add current asset entity if preset has current context set + assets.append(current_asset_entity) + current_asset_id = current_asset_entity["_id"] + + if link_context_profiles: + # Find and append linked assets if preset has set linked mapping + link_assets = get_linked_assets(current_asset_entity) + if link_assets: + assets.extend(link_assets) + + # Skip if there are no assets. This can happen if only linked mapping + # is set and there are no links for his asset. + if not assets: + self.log.warning( + "Asset does not have linked assets. Nothing to process." + ) + return loaded_containers + + # Prepare entities from database for assets + prepared_entities = self._collect_last_version_repres(assets) + + # Load containers by prepared entities and presets + # - Current asset containers + if current_asset_id and current_asset_id in prepared_entities: + current_context_data = prepared_entities.pop(current_asset_id) + loaded_data = self.load_containers_by_asset_data( + current_context_data, current_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # - Linked assets container + for linked_asset_data in prepared_entities.values(): + loaded_data = self.load_containers_by_asset_data( + linked_asset_data, link_context_profiles, loaders_by_name + ) + if loaded_data: + loaded_containers.append(loaded_data) + + # Return list of loaded containers + return loaded_containers + + def get_build_presets(self, task_name, asset_doc): + """ Returns presets to build workfile for task name. + + Presets are loaded for current project set in + io.Session["AVALON_PROJECT"], filtered by registered host + and entered task name. + + Args: + task_name (str): Task name used for filtering build presets. + + Returns: + Dict[str, Any]: preset per entered task name + """ + + host_name = os.environ["AVALON_APP"] + project_settings = get_project_settings( + legacy_io.Session["AVALON_PROJECT"] + ) + + host_settings = project_settings.get(host_name) or {} + # Get presets for host + wb_settings = host_settings.get("workfile_builder") + if not wb_settings: + # backward compatibility + wb_settings = host_settings.get("workfile_build") or {} + + builder_profiles = wb_settings.get("profiles") + if not builder_profiles: + return None + + task_type = ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(task_name, {}) + .get("type") + ) + filter_data = { + "task_types": task_type, + "tasks": task_name + } + return filter_profiles(builder_profiles, filter_data) + + def _filter_build_profiles(self, build_profiles, loaders_by_name): + """ Filter build profiles by loaders and prepare process data. + + Valid profile must have "loaders", "families" and "repre_names" keys + with valid values. + - "loaders" expects list of strings representing possible loaders. + - "families" expects list of strings for filtering + by main subset family. + - "repre_names" expects list of strings for filtering by + representation name. + + Lowered "families" and "repre_names" are prepared for each profile with + all required keys. + + Args: + build_profiles (Dict[str, Any]): Profiles for building workfile. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Filtered and prepared profiles. + """ + + valid_profiles = [] + for profile in build_profiles: + # Check loaders + profile_loaders = profile.get("loaders") + if not profile_loaders: + self.log.warning(( + "Build profile has missing loaders configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check if any loader is available + loaders_match = False + for loader_name in profile_loaders: + if loader_name in loaders_by_name: + loaders_match = True + break + + if not loaders_match: + self.log.warning(( + "All loaders from Build profile are not available: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check families + profile_families = profile.get("families") + if not profile_families: + self.log.warning(( + "Build profile is missing families configuration: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Check representation names + profile_repre_names = profile.get("repre_names") + if not profile_repre_names: + self.log.warning(( + "Build profile is missing" + " representation names filtering: {0}" + ).format(json.dumps(profile, indent=4))) + continue + + # Prepare lowered families and representation names + profile["families_lowered"] = [ + fam.lower() for fam in profile_families + ] + profile["repre_names_lowered"] = [ + name.lower() for name in profile_repre_names + ] + + valid_profiles.append(profile) + + return valid_profiles + + def _prepare_profile_for_subsets(self, subsets, profiles): + """Select profile for each subset by it's data. + + Profiles are filtered for each subset individually. + Profile is filtered by subset's family, optionally by name regex and + representation names set in profile. + It is possible to not find matching profile for subset, in that case + subset is skipped and it is possible that none of subsets have + matching profile. + + Args: + subsets (List[Dict[str, Any]]): Subset documents. + profiles (List[Dict[str, Any]]): Build profiles. + + Returns: + Dict[str, Any]: Profile by subset's id. + """ + + # Prepare subsets + subsets_by_family = self.map_subsets_by_family(subsets) + + profiles_per_subset_id = {} + for family, subsets in subsets_by_family.items(): + family_low = family.lower() + for profile in profiles: + # Skip profile if does not contain family + if family_low not in profile["families_lowered"]: + continue + + # Precompile name filters as regexes + profile_regexes = profile.get("subset_name_filters") + if profile_regexes: + _profile_regexes = [] + for regex in profile_regexes: + _profile_regexes.append(re.compile(regex)) + profile_regexes = _profile_regexes + + # TODO prepare regex compilation + for subset in subsets: + # Verify regex filtering (optional) + if profile_regexes: + valid = False + for pattern in profile_regexes: + if re.match(pattern, subset["name"]): + valid = True + break + + if not valid: + continue + + profiles_per_subset_id[subset["_id"]] = profile + + # break profiles loop on finding the first matching profile + break + return profiles_per_subset_id + + def load_containers_by_asset_data( + self, asset_entity_data, build_profiles, loaders_by_name + ): + """Load containers for entered asset entity by Build profiles. + + Args: + asset_entity_data (Dict[str, Any]): Prepared data with subsets, + last versions and representations for specific asset. + build_profiles (Dict[str, Any]): Build profiles. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + Dict[str, Any]: Output contains asset document + and loaded containers. + """ + + # Make sure all data are not empty + if not asset_entity_data or not build_profiles or not loaders_by_name: + return + + asset_entity = asset_entity_data["asset_entity"] + + valid_profiles = self._filter_build_profiles( + build_profiles, loaders_by_name + ) + if not valid_profiles: + self.log.warning( + "There are not valid Workfile profiles. Skipping process." + ) + return + + self.log.debug("Valid Workfile profiles: {}".format(valid_profiles)) + + subsets_by_id = {} + version_by_subset_id = {} + repres_by_version_id = {} + for subset_id, in_data in asset_entity_data["subsets"].items(): + subset_entity = in_data["subset_entity"] + subsets_by_id[subset_entity["_id"]] = subset_entity + + version_data = in_data["version"] + version_entity = version_data["version_entity"] + version_by_subset_id[subset_id] = version_entity + repres_by_version_id[version_entity["_id"]] = ( + version_data["repres"] + ) + + if not subsets_by_id: + self.log.warning("There are not subsets for asset {0}".format( + asset_entity["name"] + )) + return + + profiles_per_subset_id = self._prepare_profile_for_subsets( + subsets_by_id.values(), valid_profiles + ) + if not profiles_per_subset_id: + self.log.warning("There are not valid subsets.") + return + + valid_repres_by_subset_id = collections.defaultdict(list) + for subset_id, profile in profiles_per_subset_id.items(): + profile_repre_names = profile["repre_names_lowered"] + + version_entity = version_by_subset_id[subset_id] + version_id = version_entity["_id"] + repres = repres_by_version_id[version_id] + for repre in repres: + repre_name_low = repre["name"].lower() + if repre_name_low in profile_repre_names: + valid_repres_by_subset_id[subset_id].append(repre) + + # DEBUG message + msg = "Valid representations for Asset: `{}`".format( + asset_entity["name"] + ) + for subset_id, repres in valid_repres_by_subset_id.items(): + subset = subsets_by_id[subset_id] + msg += "\n# Subset Name/ID: `{}`/{}".format( + subset["name"], subset_id + ) + for repre in repres: + msg += "\n## Repre name: `{}`".format(repre["name"]) + + self.log.debug(msg) + + containers = self._load_containers( + valid_repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ) + + return { + "asset_entity": asset_entity, + "containers": containers + } + + def _load_containers( + self, repres_by_subset_id, subsets_by_id, + profiles_per_subset_id, loaders_by_name + ): + """Real load by collected data happens here. + + Loading of representations per subset happens here. Each subset can + loads one representation. Loading is tried in specific order. + Representations are tried to load by names defined in configuration. + If subset has representation matching representation name each loader + is tried to load it until any is successful. If none of them was + successful then next representation name is tried. + Subset process loop ends when any representation is loaded or + all matching representations were already tried. + + Args: + repres_by_subset_id (Dict[str, Dict[str, Any]]): Available + representations mapped by their parent (subset) id. + subsets_by_id (Dict[str, Dict[str, Any]]): Subset documents + mapped by their id. + profiles_per_subset_id (Dict[str, Dict[str, Any]]): Build profiles + mapped by subset id. + loaders_by_name (Dict[str, LoaderPlugin]): Available loaders + per name. + + Returns: + List[Dict[str, Any]]: Objects of loaded containers. + """ + + loaded_containers = [] + + # Get subset id order from build presets. + build_presets = self.build_presets.get("current_context", []) + build_presets += self.build_presets.get("linked_assets", []) + subset_ids_ordered = [] + for preset in build_presets: + for preset_family in preset["families"]: + for id, subset in subsets_by_id.items(): + if preset_family not in subset["data"].get("families", []): + continue + + subset_ids_ordered.append(id) + + # Order representations from subsets. + print("repres_by_subset_id", repres_by_subset_id) + representations_ordered = [] + representations = [] + for id in subset_ids_ordered: + for subset_id, repres in repres_by_subset_id.items(): + if repres in representations: + continue + + if id == subset_id: + representations_ordered.append((subset_id, repres)) + representations.append(repres) + + print("representations", representations) + + # Load ordered representations. + for subset_id, repres in representations_ordered: + subset_name = subsets_by_id[subset_id]["name"] + + profile = profiles_per_subset_id[subset_id] + loaders_last_idx = len(profile["loaders"]) - 1 + repre_names_last_idx = len(profile["repre_names_lowered"]) - 1 + + repre_by_low_name = { + repre["name"].lower(): repre for repre in repres + } + + is_loaded = False + for repre_name_idx, profile_repre_name in enumerate( + profile["repre_names_lowered"] + ): + # Break iteration if representation was already loaded + if is_loaded: + break + + repre = repre_by_low_name.get(profile_repre_name) + if not repre: + continue + + for loader_idx, loader_name in enumerate(profile["loaders"]): + if is_loaded: + break + + loader = loaders_by_name.get(loader_name) + if not loader: + continue + try: + container = load_container( + loader, + repre["_id"], + name=subset_name + ) + loaded_containers.append(container) + is_loaded = True + + except Exception as exc: + if exc == IncompatibleLoaderError: + self.log.info(( + "Loader `{}` is not compatible with" + " representation `{}`" + ).format(loader_name, repre["name"])) + + else: + self.log.error( + "Unexpected error happened during loading", + exc_info=True + ) + + msg = "Loading failed." + if loader_idx < loaders_last_idx: + msg += " Trying next loader." + elif repre_name_idx < repre_names_last_idx: + msg += ( + " Loading of subset `{}` was not successful." + ).format(subset_name) + else: + msg += " Trying next representation." + self.log.info(msg) + + return loaded_containers + + def _collect_last_version_repres(self, asset_docs): + """Collect subsets, versions and representations for asset_entities. + + Args: + asset_docs (List[Dict[str, Any]]): Asset entities for which + want to find data. + + Returns: + Dict[str, Any]: collected entities + + Example output: + ``` + { + {Asset ID}: { + "asset_entity": , + "subsets": { + {Subset ID}: { + "subset_entity": , + "version": { + "version_entity": , + "repres": [ + , , ... + ] + } + }, + ... + } + }, + ... + } + output[asset_id]["subsets"][subset_id]["version"]["repres"] + ``` + """ + + output = {} + if not asset_docs: + return output + + asset_docs_by_ids = {asset["_id"]: asset for asset in asset_docs} + + project_name = legacy_io.active_project() + subsets = list(get_subsets( + project_name, asset_ids=asset_docs_by_ids.keys() + )) + subset_entity_by_ids = {subset["_id"]: subset for subset in subsets} + + last_version_by_subset_id = get_last_versions( + project_name, subset_entity_by_ids.keys() + ) + last_version_docs_by_id = { + version["_id"]: version + for version in last_version_by_subset_id.values() + } + repre_docs = get_representations( + project_name, version_ids=last_version_docs_by_id.keys() + ) + + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + version_doc = last_version_docs_by_id[version_id] + + subset_id = version_doc["parent"] + subset_doc = subset_entity_by_ids[subset_id] + + asset_id = subset_doc["parent"] + asset_doc = asset_docs_by_ids[asset_id] + + if asset_id not in output: + output[asset_id] = { + "asset_entity": asset_doc, + "subsets": {} + } + + if subset_id not in output[asset_id]["subsets"]: + output[asset_id]["subsets"][subset_id] = { + "subset_entity": subset_doc, + "version": { + "version_entity": version_doc, + "repres": [] + } + } + + output[asset_id]["subsets"][subset_id]["version"]["repres"].append( + repre_doc + ) + + return output diff --git a/openpype/pipeline/workfile/lock_workfile.py b/openpype/pipeline/workfile/lock_workfile.py new file mode 100644 index 0000000000..579840c07d --- /dev/null +++ b/openpype/pipeline/workfile/lock_workfile.py @@ -0,0 +1,74 @@ +import os +import json +from openpype.lib import Logger, filter_profiles +from openpype.lib.pype_info import get_workstation_info +from openpype.settings import get_project_settings +from openpype.pipeline import get_process_id + + +def _read_lock_file(lock_filepath): + if not os.path.exists(lock_filepath): + log = Logger.get_logger("_read_lock_file") + log.debug("lock file is not created or readable as expected!") + with open(lock_filepath, "r") as stream: + data = json.load(stream) + return data + + +def _get_lock_file(filepath): + return filepath + ".oplock" + + +def is_workfile_locked(filepath): + lock_filepath = _get_lock_file(filepath) + if not os.path.exists(lock_filepath): + return False + return True + + +def get_workfile_lock_data(filepath): + lock_filepath = _get_lock_file(filepath) + return _read_lock_file(lock_filepath) + + +def is_workfile_locked_for_current_process(filepath): + if not is_workfile_locked(filepath): + return False + + lock_filepath = _get_lock_file(filepath) + data = _read_lock_file(lock_filepath) + return data["process_id"] == get_process_id() + + +def delete_workfile_lock(filepath): + lock_filepath = _get_lock_file(filepath) + if os.path.exists(lock_filepath): + os.remove(lock_filepath) + + +def create_workfile_lock(filepath): + lock_filepath = _get_lock_file(filepath) + info = get_workstation_info() + info["process_id"] = get_process_id() + with open(lock_filepath, "w") as stream: + json.dump(info, stream) + + +def remove_workfile_lock(filepath): + if is_workfile_locked_for_current_process(filepath): + delete_workfile_lock(filepath) + + +def is_workfile_lock_enabled(host_name, project_name, project_setting=None): + if project_setting is None: + project_setting = get_project_settings(project_name) + workfile_lock_profiles = ( + project_setting + ["global"] + ["tools"] + ["Workfiles"] + ["workfile_lock_profiles"]) + profile = filter_profiles(workfile_lock_profiles, {"host_name": host_name}) + if not profile: + return False + return profile["enabled"] diff --git a/openpype/pipeline/workfile/path_resolving.py b/openpype/pipeline/workfile/path_resolving.py new file mode 100644 index 0000000000..801cb7223c --- /dev/null +++ b/openpype/pipeline/workfile/path_resolving.py @@ -0,0 +1,530 @@ +import os +import re +import copy +import platform + +from openpype.client import get_project, get_asset_by_name +from openpype.settings import get_project_settings +from openpype.lib import ( + filter_profiles, + Logger, + StringTemplate, +) +from openpype.pipeline import Anatomy +from openpype.pipeline.template_data import get_template_data + + +def get_workfile_template_key_from_context( + asset_name, task_name, host_name, project_name, project_settings=None +): + """Helper function to get template key for workfile template. + + Do the same as `get_workfile_template_key` but returns value for "session + context". + + Args: + asset_name(str): Name of asset document. + task_name(str): Task name for which is template key retrieved. + Must be available on asset document under `data.tasks`. + host_name(str): Name of host implementation for which is workfile + used. + project_name(str): Project name where asset and task is. + project_settings(Dict[str, Any]): Project settings for passed + 'project_name'. Not required at all but makes function faster. + """ + + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["data.tasks"] + ) + asset_tasks = asset_doc.get("data", {}).get("tasks") or {} + task_info = asset_tasks.get(task_name) or {} + task_type = task_info.get("type") + + return get_workfile_template_key( + task_type, host_name, project_name, project_settings + ) + + +def get_workfile_template_key( + task_type, host_name, project_name, project_settings=None +): + """Workfile template key which should be used to get workfile template. + + Function is using profiles from project settings to return right template + for passet task type and host name. + + Args: + task_type(str): Name of task type. + host_name(str): Name of host implementation (e.g. "maya", "nuke", ...) + project_name(str): Name of project in which context should look for + settings. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. + """ + + default = "work" + if not task_type or not host_name: + return default + + if not project_settings: + project_settings = get_project_settings(project_name) + + try: + profiles = ( + project_settings + ["global"] + ["tools"] + ["Workfiles"] + ["workfile_template_profiles"] + ) + except Exception: + profiles = [] + + if not profiles: + return default + + profile_filter = { + "task_types": task_type, + "hosts": host_name + } + profile = filter_profiles(profiles, profile_filter) + if profile: + return profile["workfile_template"] or default + return default + + +def get_workdir_with_workdir_data( + workdir_data, + project_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + It is possible to pass only project's name instead of project's anatomy but + one of them **must** be entered. It is preferred to enter anatomy if is + available as initialization of a new Anatomy object may be time consuming. + + Args: + workdir_data (Dict[str, Any]): Data to fill workdir template. + project_name (str): Project's name. + anatomy (Anatomy): Anatomy object for specific project. Faster + processing if is passed. + template_key (str): Key of work templates in anatomy templates. If not + passed `get_workfile_template_key_from_context` is used to get it. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_name) + + if not template_key: + template_key = get_workfile_template_key( + workdir_data["task"]["type"], + workdir_data["app"], + workdir_data["project"]["name"], + project_settings + ) + + anatomy_filled = anatomy.format(workdir_data) + # Output is TemplateResult object which contain useful data + output = anatomy_filled[template_key]["folder"] + if output: + return output.normalized() + return output + + +def get_workdir( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + template_key=None, + project_settings=None +): + """Fill workdir path from entered data and project's anatomy. + + Args: + project_doc (Dict[str, Any]): Mongo document of project from MongoDB. + asset_doc (Dict[str, Any]): Mongo document of asset from MongoDB. + task_name (str): Task name for which are workdir data preapred. + host_name (str): Host which is used to workdir. This is required + because workdir template may contain `{app}` key. In `Session` + is stored under `AVALON_APP` key. + anatomy (Anatomy): Optional argument. Anatomy object is created using + project name from `project_doc`. It is preferred to pass this + argument as initialization of a new Anatomy object may be time + consuming. + template_key (str): Key of work templates in anatomy templates. Default + value is defined in `get_workdir_with_workdir_data`. + project_settings(Dict[str, Any]): Prepared project settings for + project name. Optional to make processing faster. Ans id used only + if 'template_key' is not passed. + + Returns: + TemplateResult: Workdir path. + """ + + if not anatomy: + anatomy = Anatomy(project_doc["name"]) + + workdir_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # Output is TemplateResult object which contain useful data + return get_workdir_with_workdir_data( + workdir_data, + anatomy.project_name, + anatomy, + template_key, + project_settings + ) + + +def get_last_workfile_with_version( + workdir, file_template, fill_data, extensions +): + """Return last workfile version. + + Usign workfile template and it's filling data find most possible last + version of workfile which was created for the context. + + Functionality is fully based on knowing which keys are optional or what + values are expected as value. + + The last modified file is used if more files can be considered as + last workfile. + + Args: + workdir (str): Path to dir where workfiles are stored. + file_template (str): Template of file name. + fill_data (Dict[str, Any]): Data for filling template. + extensions (Iterable[str]): All allowed file extensions of workfile. + + Returns: + Tuple[Union[str, None], Union[int, None]]: Last workfile with version + if there is any workfile otherwise None for both. + """ + + if not os.path.exists(workdir): + return None, None + + dotted_extensions = set() + for ext in extensions: + if not ext.startswith("."): + ext = ".{}".format(ext) + dotted_extensions.add(ext) + + # Fast match on extension + filenames = [ + filename + for filename in os.listdir(workdir) + if os.path.splitext(filename)[-1] in dotted_extensions + ] + + # Build template without optionals, version to digits only regex + # and comment to any definable value. + # Escape extensions dot for regex + regex_exts = [ + "\\" + ext + for ext in dotted_extensions + ] + ext_expression = "(?:" + "|".join(regex_exts) + ")" + + # Replace `.{ext}` with `{ext}` so we are sure there is not dot at the end + file_template = re.sub(r"\.?{ext}", ext_expression, file_template) + # Replace optional keys with optional content regex + file_template = re.sub(r"<.*?>", r".*?", file_template) + # Replace `{version}` with group regex + file_template = re.sub(r"{version.*?}", r"([0-9]+)", file_template) + file_template = re.sub(r"{comment.*?}", r".+?", file_template) + file_template = StringTemplate.format_strict_template( + file_template, fill_data + ) + + # Match with ignore case on Windows due to the Windows + # OS not being case-sensitive. This avoids later running + # into the error that the file did exist if it existed + # with a different upper/lower-case. + kwargs = {} + if platform.system().lower() == "windows": + kwargs["flags"] = re.IGNORECASE + + # Get highest version among existing matching files + version = None + output_filenames = [] + for filename in sorted(filenames): + match = re.match(file_template, filename, **kwargs) + if not match: + continue + + if not match.groups(): + output_filenames.append(filename) + continue + + file_version = int(match.group(1)) + if version is None or file_version > version: + output_filenames[:] = [] + version = file_version + + if file_version == version: + output_filenames.append(filename) + + output_filename = None + if output_filenames: + if len(output_filenames) == 1: + output_filename = output_filenames[0] + else: + last_time = None + for _output_filename in output_filenames: + full_path = os.path.join(workdir, _output_filename) + mod_time = os.path.getmtime(full_path) + if last_time is None or last_time < mod_time: + output_filename = _output_filename + last_time = mod_time + + return output_filename, version + + +def get_last_workfile( + workdir, file_template, fill_data, extensions, full_path=False +): + """Return last workfile filename. + + Returns file with version 1 if there is not workfile yet. + + Args: + workdir(str): Path to dir where workfiles are stored. + file_template(str): Template of file name. + fill_data(Dict[str, Any]): Data for filling template. + extensions(Iterable[str]): All allowed file extensions of workfile. + full_path(bool): Full path to file is returned if set to True. + + Returns: + str: Last or first workfile as filename of full path to filename. + """ + + filename, version = get_last_workfile_with_version( + workdir, file_template, fill_data, extensions + ) + if filename is None: + data = copy.deepcopy(fill_data) + data["version"] = 1 + data.pop("comment", None) + if not data.get("ext"): + data["ext"] = extensions[0] + data["ext"] = data["ext"].replace('.', '') + filename = StringTemplate.format_strict_template(file_template, data) + + if full_path: + return os.path.normpath(os.path.join(workdir, filename)) + + return filename + + +def get_custom_workfile_template( + project_doc, + asset_doc, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Custom workfile template can be used as first version of workfiles. + Template is a file on a disk which is set in settings. Expected settings + structure to have this feature enabled is: + project settings + |- + |- workfile_builder + |- create_first_version - a bool which must be set to 'True' + |- custom_templates - profiles based on task name/type which + points to a file which is copied as + first workfile + + It is expected that passed argument are already queried documents of + project and asset as parents of processing task name. + + Args: + project_doc (Dict[str, Any]): Project document from MongoDB. + asset_doc (Dict[str, Any]): Asset document from MongoDB. + task_name (str): Name of task for which templates are filtered. + host_name (str): Name of host. + anatomy (Anatomy): Optionally passed anatomy object for passed project + name. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. Existence of formatted path is not validated. + None: If no profile is matching context. + """ + + log = Logger.get_logger("CustomWorkfileResolve") + + project_name = project_doc["name"] + if project_settings is None: + project_settings = get_project_settings(project_name) + + host_settings = project_settings.get(host_name) + if not host_settings: + log.info("Host \"{}\" doesn't have settings".format(host_name)) + return None + + workfile_builder_settings = host_settings.get("workfile_builder") + if not workfile_builder_settings: + log.info(( + "Seems like old version of settings is used." + " Can't access custom templates in host \"{}\"." + ).format(host_name)) + return + + if not workfile_builder_settings["create_first_version"]: + log.info(( + "Project \"{}\" has turned off to create first workfile for" + " host \"{}\"" + ).format(project_name, host_name)) + return + + # Backwards compatibility + template_profiles = workfile_builder_settings.get("custom_templates") + if not template_profiles: + log.info( + "Custom templates are not filled. Skipping template copy." + ) + return + + if anatomy is None: + anatomy = Anatomy(project_name) + + # get project, asset, task anatomy context data + anatomy_context_data = get_template_data( + project_doc, asset_doc, task_name, host_name + ) + # add root dict + anatomy_context_data["root"] = anatomy.roots + + # get task type for the task in context + current_task_type = anatomy_context_data["task"]["type"] + + # get path from matching profile + matching_item = filter_profiles( + template_profiles, + {"task_types": current_task_type} + ) + # when path is available try to format it in case + # there are some anatomy template strings + if matching_item: + # extend anatomy context with os.environ to + # also allow formatting against env + full_context_data = os.environ.copy() + full_context_data.update(anatomy_context_data) + + template = matching_item["path"][platform.system().lower()] + return StringTemplate.format_strict_template( + template, full_context_data + ).normalized() + + return None + + +def get_custom_workfile_template_by_string_context( + project_name, + asset_name, + task_name, + host_name, + anatomy=None, + project_settings=None +): + """Filter and fill workfile template profiles by passed context. + + Passed context are string representations of project, asset and task. + Function will query documents of project and asset to be able use + `get_custom_workfile_template` for rest of logic. + + Args: + project_name(str): Project name. + asset_name(str): Asset name. + task_name(str): Task name. + host_name (str): Name of host. + anatomy(Anatomy): Optionally prepared anatomy object for passed + project. + project_settings(Dict[str, Any]): Preloaded project settings. + + Returns: + str: Path to template or None if none of profiles match current + context. (Existence of formatted path is not validated.) + None: If no profile is matching context. + """ + + project_doc = get_project(project_name) + asset_doc = get_asset_by_name(project_name, asset_name) + + return get_custom_workfile_template( + project_doc, asset_doc, task_name, host_name, anatomy, project_settings + ) + + +def create_workdir_extra_folders( + workdir, + host_name, + task_type, + task_name, + project_name, + project_settings=None +): + """Create extra folders in work directory based on context. + + Args: + workdir (str): Path to workdir where workfiles is stored. + host_name (str): Name of host implementation. + task_type (str): Type of task for which extra folders should be + created. + task_name (str): Name of task for which extra folders should be + created. + project_name (str): Name of project on which task is. + project_settings (dict): Prepared project settings. Are loaded if not + passed. + """ + + # Load project settings if not set + if not project_settings: + project_settings = get_project_settings(project_name) + + # Load extra folders profiles + extra_folders_profiles = ( + project_settings["global"]["tools"]["Workfiles"]["extra_folders"] + ) + # Skip if are empty + if not extra_folders_profiles: + return + + # Prepare profiles filters + filter_data = { + "task_types": task_type, + "task_names": task_name, + "hosts": host_name + } + profile = filter_profiles(extra_folders_profiles, filter_data) + if profile is None: + return + + for subfolder in profile["folders"]: + # Make sure backslashes are converted to forwards slashes + # and does not start with slash + subfolder = subfolder.replace("\\", "/").lstrip("/") + # Skip empty strings + if not subfolder: + continue + + fullpath = os.path.join(workdir, subfolder) + if not os.path.exists(fullpath): + os.makedirs(fullpath) diff --git a/openpype/pipeline/workfile/workfile_template_builder.py b/openpype/pipeline/workfile/workfile_template_builder.py new file mode 100644 index 0000000000..119e4aaeb7 --- /dev/null +++ b/openpype/pipeline/workfile/workfile_template_builder.py @@ -0,0 +1,1705 @@ +"""Workfile build mechanism using workfile templates. + +Build templates are manually prepared using plugin definitions which create +placeholders inside the template which are populated on import. + +This approach is very explicit to achive very specific build logic that can be +targeted by task types and names. + +Placeholders are created using placeholder plugins which should care about +logic and data of placeholder items. 'PlaceholderItem' is used to keep track +about it's progress. +""" + +import os +import re +import collections +import copy +from abc import ABCMeta, abstractmethod + +import six + +from openpype.client import ( + get_asset_by_name, + get_linked_assets, + get_representations, +) +from openpype.settings import ( + get_project_settings, + get_system_settings, +) +from openpype.host import HostBase +from openpype.lib import ( + Logger, + StringTemplate, + filter_profiles, + attribute_definitions, +) +from openpype.lib.attribute_definitions import get_attributes_keys +from openpype.pipeline import legacy_io, Anatomy +from openpype.pipeline.load import ( + get_loaders_by_name, + get_contexts_for_repre_docs, + load_with_repre_context, +) +from openpype.pipeline.create import ( + discover_legacy_creator_plugins +) + + +class TemplateNotFound(Exception): + """Exception raised when template does not exist.""" + pass + + +class TemplateProfileNotFound(Exception): + """Exception raised when current profile + doesn't match any template profile""" + pass + + +class TemplateAlreadyImported(Exception): + """Error raised when Template was already imported by host for + this session""" + pass + + +class TemplateLoadFailed(Exception): + """Error raised whend Template loader was unable to load the template""" + pass + + +@six.add_metaclass(ABCMeta) +class AbstractTemplateBuilder(object): + """Abstraction of Template Builder. + + Builder cares about context, shared data, cache, discovery of plugins + and trigger logic. Provides public api for host workfile build systen. + + Rest of logic is based on plugins that care about collection and creation + of placeholder items. + + Population of placeholders happens in loops. Each loop will collect all + available placeholders, skip already populated, and populate the rest. + + Builder item has 2 types of shared data. Refresh lifetime which are cleared + on refresh and populate lifetime which are cleared after loop of + placeholder population. + + Args: + host (Union[HostBase, ModuleType]): Implementation of host. + """ + + _log = None + + def __init__(self, host): + # Get host name + if isinstance(host, HostBase): + host_name = host.name + else: + host_name = os.environ.get("AVALON_APP") + + self._host = host + self._host_name = host_name + + # Shared data across placeholder plugins + self._shared_data = {} + self._shared_populate_data = {} + + # Where created objects of placeholder plugins will be stored + self._placeholder_plugins = None + self._loaders_by_name = None + self._creators_by_name = None + + self._system_settings = None + self._project_settings = None + + self._current_asset_doc = None + self._linked_asset_docs = None + self._task_type = None + + @property + def project_name(self): + return legacy_io.active_project() + + @property + def current_asset_name(self): + return legacy_io.Session["AVALON_ASSET"] + + @property + def current_task_name(self): + return legacy_io.Session["AVALON_TASK"] + + @property + def system_settings(self): + if self._system_settings is None: + self._system_settings = get_system_settings() + return self._system_settings + + @property + def project_settings(self): + if self._project_settings is None: + self._project_settings = get_project_settings(self.project_name) + return self._project_settings + + @property + def current_asset_doc(self): + if self._current_asset_doc is None: + self._current_asset_doc = get_asset_by_name( + self.project_name, self.current_asset_name + ) + return self._current_asset_doc + + @property + def linked_asset_docs(self): + if self._linked_asset_docs is None: + self._linked_asset_docs = get_linked_assets( + self.current_asset_doc + ) + return self._linked_asset_docs + + @property + def current_task_type(self): + asset_doc = self.current_asset_doc + if not asset_doc: + return None + return ( + asset_doc + .get("data", {}) + .get("tasks", {}) + .get(self.current_task_name, {}) + .get("type") + ) + + def get_placeholder_plugin_classes(self): + """Get placeholder plugin classes that can be used to build template. + + Default implementation looks for method + 'get_workfile_build_placeholder_plugins' on host. + + Returns: + List[PlaceholderPlugin]: Plugin classes available for host. + """ + + if hasattr(self._host, "get_workfile_build_placeholder_plugins"): + return self._host.get_workfile_build_placeholder_plugins() + return [] + + @property + def host(self): + """Access to host implementation. + + Returns: + Union[HostBase, ModuleType]: Implementation of host. + """ + + return self._host + + @property + def host_name(self): + """Name of 'host' implementation. + + Returns: + str: Host's name. + """ + + return self._host_name + + @property + def log(self): + """Dynamically created logger for the plugin.""" + + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + def refresh(self): + """Reset cached data.""" + + self._placeholder_plugins = None + self._loaders_by_name = None + self._creators_by_name = None + + self._current_asset_doc = None + self._linked_asset_docs = None + self._task_type = None + + self._system_settings = None + self._project_settings = None + + self.clear_shared_data() + self.clear_shared_populate_data() + + def get_loaders_by_name(self): + if self._loaders_by_name is None: + self._loaders_by_name = get_loaders_by_name() + return self._loaders_by_name + + def get_creators_by_name(self): + if self._creators_by_name is None: + self._creators_by_name = {} + for creator in discover_legacy_creator_plugins(): + if not creator.enabled: + continue + creator_name = creator.__name__ + if creator_name in self._creators_by_name: + raise KeyError( + "Duplicated creator name {} !".format(creator_name) + ) + self._creators_by_name[creator_name] = creator + return self._creators_by_name + + def get_shared_data(self, key): + """Receive shared data across plugins and placeholders. + + This can be used to scroll scene only once to look for placeholder + items if the storing is unified but each placeholder plugin would have + to call it again. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + return self._shared_data.get(key) + + def set_shared_data(self, key, value): + """Store share data across plugins and placeholders. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + self._shared_data[key] = value + + def clear_shared_data(self): + """Clear shared data. + + Method only clear shared data to default state. + """ + + self._shared_data = {} + + def clear_shared_populate_data(self): + """Receive shared data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + This can be used to scroll scene only once to look for placeholder + items if the storing is unified but each placeholder plugin would have + to call it again. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + self._shared_populate_data = {} + + def get_shared_populate_data(self, key): + """Store share populate data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + return self._shared_populate_data.get(key) + + def set_shared_populate_data(self, key, value): + """Store share populate data across plugins and placeholders. + + These data are cleared after each loop of populating of template. + + Store data that can be afterwards accessed from any future call. It + is good practice to check if the same value is not already stored under + different key or if the key is not already used for something else. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + self._shared_populate_data[key] = value + + @property + def placeholder_plugins(self): + """Access to initialized placeholder plugins. + + Returns: + List[PlaceholderPlugin]: Initialized plugins available for host. + """ + + if self._placeholder_plugins is None: + placeholder_plugins = {} + for cls in self.get_placeholder_plugin_classes(): + try: + plugin = cls(self) + placeholder_plugins[plugin.identifier] = plugin + + except Exception: + self.log.warning( + "Failed to initialize placeholder plugin {}".format( + cls.__name__ + ), + exc_info=True + ) + + self._placeholder_plugins = placeholder_plugins + return self._placeholder_plugins + + def create_placeholder(self, plugin_identifier, placeholder_data): + """Create new placeholder using plugin identifier and data. + + Args: + plugin_identifier (str): Identifier of plugin. That's how builder + know which plugin should be used. + placeholder_data (Dict[str, Any]): Placeholder item data. They + should match options required by the plugin. + + Returns: + PlaceholderItem: Created placeholder item. + """ + + plugin = self.placeholder_plugins[plugin_identifier] + return plugin.create_placeholder(placeholder_data) + + def get_placeholders(self): + """Collect placeholder items from scene. + + Each placeholder plugin can collect it's placeholders and return them. + This method does not use cached values but always go through the scene. + + Returns: + List[PlaceholderItem]: Sorted placeholder items. + """ + + placeholders = [] + for placeholder_plugin in self.placeholder_plugins.values(): + result = placeholder_plugin.collect_placeholders() + if result: + placeholders.extend(result) + + return list(sorted( + placeholders, + key=lambda i: i.order + )) + + def build_template( + self, + template_path=None, + level_limit=None, + keep_placeholders=None + ): + """Main callback for building workfile from template path. + + Todo: + Handle report of populated placeholders from + 'populate_scene_placeholders' to be shown to a user. + + Args: + template_path (str): Path to a template file with placeholders. + Template from settings 'get_template_preset' used when not + passed. + level_limit (int): Limit of populate loops. Related to + 'populate_scene_placeholders' method. + keep_placeholders (bool): Add flag to placeholder data for + hosts to decide if they want to remove + placeholder after it is used. + """ + template_preset = self.get_template_preset() + + if template_path is None: + template_path = template_preset["path"] + + if keep_placeholders is None: + keep_placeholders = template_preset["keep_placeholder"] + + self.import_template(template_path) + self.populate_scene_placeholders( + level_limit, keep_placeholders) + + def rebuild_template(self): + """Go through existing placeholders in scene and update them. + + This could not make sense for all plugin types so this is optional + logic for plugins. + + Note: + Logic is not importing the template again but using placeholders + that were already available. We should maybe change the method + name. + + Question: + Should this also handle subloops as it is possible that another + template is loaded during processing? + """ + + if not self.placeholder_plugins: + self.log.info("There are no placeholder plugins available.") + return + + placeholders = self.get_placeholders() + if not placeholders: + self.log.info("No placeholders were found.") + return + + for placeholder in placeholders: + plugin = placeholder.plugin + plugin.repopulate_placeholder(placeholder) + + self.clear_shared_populate_data() + + @abstractmethod + def import_template(self, template_path): + """ + Import template in current host. + + Should load the content of template into scene so + 'populate_scene_placeholders' can be started. + + Args: + template_path (str): Fullpath for current task and + host's template file. + """ + + pass + + def _prepare_placeholders(self, placeholders): + """Run preparation part for placeholders on plugins. + + Args: + placeholders (List[PlaceholderItem]): Placeholder items that will + be processed. + """ + + # Prepare placeholder items by plugin + plugins_by_identifier = {} + placeholders_by_plugin_id = collections.defaultdict(list) + for placeholder in placeholders: + plugin = placeholder.plugin + identifier = plugin.identifier + plugins_by_identifier[identifier] = plugin + placeholders_by_plugin_id[identifier].append(placeholder) + + # Plugin should prepare data for passed placeholders + for identifier, placeholders in placeholders_by_plugin_id.items(): + plugin = plugins_by_identifier[identifier] + plugin.prepare_placeholders(placeholders) + + def populate_scene_placeholders( + self, level_limit=None, keep_placeholders=None + ): + """Find placeholders in scene using plugins and process them. + + This should happen after 'import_template'. + + Collect available placeholders from scene. All of them are processed + after that shared data are cleared. Placeholder items are collected + again and if there are any new the loop happens again. This is possible + to change with defying 'level_limit'. + + Placeholders are marked as processed so they're not re-processed. To + identify which placeholders were already processed is used + placeholder's 'scene_identifier'. + + Args: + level_limit (int): Level of loops that can happen. Default is 1000. + keep_placeholders (bool): Add flag to placeholder data for + hosts to decide if they want to remove + placeholder after it is used. + """ + + if not self.placeholder_plugins: + self.log.warning("There are no placeholder plugins available.") + return + + placeholders = self.get_placeholders() + if not placeholders: + self.log.warning("No placeholders were found.") + return + + # Avoid infinite loop + # - 1000 iterations of placeholders processing must be enough + if not level_limit: + level_limit = 1000 + + placeholder_by_scene_id = { + placeholder.scene_identifier: placeholder + for placeholder in placeholders + } + all_processed = len(placeholders) == 0 + # Counter is checked at the ned of a loop so the loop happens at least + # once. + iter_counter = 0 + while not all_processed: + filtered_placeholders = [] + for placeholder in placeholders: + if placeholder.finished: + continue + + if placeholder.in_progress: + self.log.warning(( + "Placeholder that should be processed" + " is already in progress." + )) + continue + + # add flag for keeping placeholders in scene + # after they are processed + placeholder.data["keep_placeholder"] = keep_placeholders + + filtered_placeholders.append(placeholder) + + self._prepare_placeholders(filtered_placeholders) + + for placeholder in filtered_placeholders: + placeholder.set_in_progress() + placeholder_plugin = placeholder.plugin + try: + placeholder_plugin.populate_placeholder(placeholder) + + except Exception as exc: + self.log.warning( + ( + "Failed to process placeholder {} with plugin {}" + ).format( + placeholder.scene_identifier, + placeholder_plugin.__class__.__name__ + ), + exc_info=True + ) + placeholder.set_failed(exc) + + placeholder.set_finished() + + # Clear shared data before getting new placeholders + self.clear_shared_populate_data() + + iter_counter += 1 + if iter_counter >= level_limit: + break + + all_processed = True + collected_placeholders = self.get_placeholders() + for placeholder in collected_placeholders: + identifier = placeholder.scene_identifier + if identifier in placeholder_by_scene_id: + continue + + all_processed = False + placeholder_by_scene_id[identifier] = placeholder + placeholders.append(placeholder) + + self.refresh() + + def _get_build_profiles(self): + """Get build profiles for workfile build template path. + + Returns: + List[Dict[str, Any]]: Profiles for template path resolving. + """ + + return ( + self.project_settings + [self.host_name] + ["templated_workfile_build"] + ["profiles"] + ) + + def get_template_preset(self): + """Unified way how template preset is received usign settings. + + Method is dependent on '_get_build_profiles' which should return filter + profiles to resolve path to a template. Default implementation looks + into host settings: + - 'project_settings/{host name}/templated_workfile_build/profiles' + + Returns: + str: Path to a template file with placeholders. + + Raises: + TemplateProfileNotFound: When profiles are not filled. + TemplateLoadFailed: Profile was found but path is not set. + TemplateNotFound: Path was set but file does not exists. + """ + + host_name = self.host_name + project_name = self.project_name + task_name = self.current_task_name + task_type = self.current_task_type + + build_profiles = self._get_build_profiles() + profile = filter_profiles( + build_profiles, + { + "task_types": task_type, + "task_names": task_name + } + ) + + if not profile: + raise TemplateProfileNotFound(( + "No matching profile found for task '{}' of type '{}' " + "with host '{}'" + ).format(task_name, task_type, host_name)) + + path = profile["path"] + + # switch to remove placeholders after they are used + keep_placeholder = profile.get("keep_placeholder") + # backward compatibility, since default is True + if keep_placeholder is None: + keep_placeholder = True + + if not path: + raise TemplateLoadFailed(( + "Template path is not set.\n" + "Path need to be set in {}\\Template Workfile Build " + "Settings\\Profiles" + ).format(host_name.title())) + + # Try fill path with environments and anatomy roots + anatomy = Anatomy(project_name) + fill_data = { + key: value + for key, value in os.environ.items() + } + + fill_data["root"] = anatomy.roots + fill_data["project"] = { + "name": project_name, + "code": anatomy["attributes"]["code"] + } + + + result = StringTemplate.format_template(path, fill_data) + if result.solved: + path = result.normalized() + + if path and os.path.exists(path): + self.log.info("Found template at: '{}'".format(path)) + return { + "path": path, + "keep_placeholder": keep_placeholder + } + + solved_path = None + while True: + try: + solved_path = anatomy.path_remapper(path) + except KeyError as missing_key: + raise KeyError( + "Could not solve key '{}' in template path '{}'".format( + missing_key, path)) + + if solved_path is None: + solved_path = path + if solved_path == path: + break + path = solved_path + + solved_path = os.path.normpath(solved_path) + if not os.path.exists(solved_path): + raise TemplateNotFound( + "Template found in openPype settings for task '{}' with host " + "'{}' does not exists. (Not found : {})".format( + task_name, host_name, solved_path)) + + self.log.info("Found template at: '{}'".format(solved_path)) + + return { + "path": solved_path, + "keep_placeholder": keep_placeholder + } + + +@six.add_metaclass(ABCMeta) +class PlaceholderPlugin(object): + """Plugin which care about handling of placeholder items logic. + + Plugin create and update placeholders in scene and populate them on + template import. Populating means that based on placeholder data happens + a logic in the scene. Most common logic is to load representation using + loaders or to create instances in scene. + """ + + label = None + _log = None + + def __init__(self, builder): + self._builder = builder + + @property + def builder(self): + """Access to builder which initialized the plugin. + + Returns: + AbstractTemplateBuilder: Loader of template build. + """ + + return self._builder + + @property + def project_name(self): + return self._builder.project_name + + @property + def log(self): + """Dynamically created logger for the plugin.""" + + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + @property + def identifier(self): + """Identifier which will be stored to placeholder. + + Default implementation uses class name. + + Returns: + str: Unique identifier of placeholder plugin. + """ + + return self.__class__.__name__ + + @abstractmethod + def create_placeholder(self, placeholder_data): + """Create new placeholder in scene and get it's item. + + It matters on the plugin implementation if placeholder will use + selection in scene or create new node. + + Args: + placeholder_data (Dict[str, Any]): Data that were created + based on attribute definitions from 'get_placeholder_options'. + + Returns: + PlaceholderItem: Created placeholder item. + """ + + pass + + @abstractmethod + def update_placeholder(self, placeholder_item, placeholder_data): + """Update placeholder item with new data. + + New data should be propagated to object of placeholder item itself + and also into the scene. + + Reason: + Some placeholder plugins may require some special way how the + updates should be propagated to object. + + Args: + placeholder_item (PlaceholderItem): Object of placeholder that + should be updated. + placeholder_data (Dict[str, Any]): Data related to placeholder. + Should match plugin options. + """ + + pass + + @abstractmethod + def collect_placeholders(self): + """Collect placeholders from scene. + + Returns: + List[PlaceholderItem]: Placeholder objects. + """ + + pass + + def get_placeholder_options(self, options=None): + """Placeholder options for data showed. + + Returns: + List[AbstractAttrDef]: Attribute definitions of + placeholder options. + """ + + return [] + + def get_placeholder_keys(self): + """Get placeholder keys that are stored in scene. + + Returns: + Set[str]: Key of placeholder keys that are stored in scene. + """ + + option_keys = get_attributes_keys(self.get_placeholder_options()) + option_keys.add("plugin_identifier") + return option_keys + + def prepare_placeholders(self, placeholders): + """Preparation part of placeholders. + + Args: + placeholders (List[PlaceholderItem]): List of placeholders that + will be processed. + """ + + pass + + @abstractmethod + def populate_placeholder(self, placeholder): + """Process single placeholder item. + + Processing of placeholders is defined by their order thus can't be + processed in batch. + + Args: + placeholder (PlaceholderItem): Placeholder that should be + processed. + """ + + pass + + def repopulate_placeholder(self, placeholder): + """Update scene with current context for passed placeholder. + + Can be used to re-run placeholder logic (if it make sense). + """ + + pass + + def get_plugin_shared_data(self, key): + """Receive shared data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + plugin_data = self.builder.get_shared_data(self.identifier) + if plugin_data is None: + return None + return plugin_data.get(key) + + def set_plugin_shared_data(self, key, value): + """Store share data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + plugin_data = self.builder.get_shared_data(self.identifier) + if plugin_data is None: + plugin_data = {} + plugin_data[key] = value + self.builder.set_shared_data(self.identifier, plugin_data) + + def get_plugin_shared_populate_data(self, key): + """Receive shared data across plugin and placeholders. + + Using shared populate data from builder but stored under plugin + identifier. + + Shared populate data are cleaned up during populate while loop. + + Args: + key (str): Key under which are shared data stored. + + Returns: + Union[None, Any]: None if key was not set. + """ + + plugin_data = self.builder.get_shared_populate_data(self.identifier) + if plugin_data is None: + return None + return plugin_data.get(key) + + def set_plugin_shared_populate_data(self, key, value): + """Store share data across plugin and placeholders. + + Using shared data from builder but stored under plugin identifier. + + Key should be self explanatory to content. + - wrong: 'asset' + - good: 'asset_name' + + Shared populate data are cleaned up during populate while loop. + + Args: + key (str): Key under which is key stored. + value (Any): Value that should be stored under the key. + """ + + plugin_data = self.builder.get_shared_populate_data(self.identifier) + if plugin_data is None: + plugin_data = {} + plugin_data[key] = value + self.builder.set_shared_populate_data(self.identifier, plugin_data) + + +class PlaceholderItem(object): + """Item representing single item in scene that is a placeholder to process. + + Items are always created and updated by their plugins. Each plugin can use + modified class of 'PlacehoderItem' but only to add more options instead of + new other. + + Scene identifier is used to avoid processing of the palceholder item + multiple times so must be unique across whole workfile builder. + + Args: + scene_identifier (str): Unique scene identifier. If placeholder is + created from the same "node" it must have same identifier. + data (Dict[str, Any]): Data related to placeholder. They're defined + by plugin. + plugin (PlaceholderPlugin): Plugin which created the placeholder item. + """ + + default_order = 100 + + def __init__(self, scene_identifier, data, plugin): + self._log = None + self._scene_identifier = scene_identifier + self._data = data + self._plugin = plugin + + # Keep track about state of Placeholder process + self._state = 0 + + # Error messages to be shown in UI + # - all other messages should be logged + self._errors = [] # -> List[str] + + @property + def plugin(self): + """Access to plugin which created placeholder. + + Returns: + PlaceholderPlugin: Plugin object. + """ + + return self._plugin + + @property + def builder(self): + """Access to builder. + + Returns: + AbstractTemplateBuilder: Builder which is the top part of + placeholder. + """ + + return self.plugin.builder + + @property + def data(self): + """Placeholder data which can modify how placeholder is processed. + + Possible general keys + - order: Can define the order in which is palceholder processed. + Lower == earlier. + + Other keys are defined by placeholder and should validate them on item + creation. + + Returns: + Dict[str, Any]: Placeholder item data. + """ + + return self._data + + def to_dict(self): + """Create copy of item's data. + + Returns: + Dict[str, Any]: Placeholder data. + """ + + return copy.deepcopy(self.data) + + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger(repr(self)) + return self._log + + def __repr__(self): + name = None + if hasattr("name", self): + name = self.name + if hasattr("_scene_identifier ", self): + name = self._scene_identifier + + return "< {} {} >".format(self.__class__.__name__, name) + + @property + def order(self): + """Order of item processing.""" + + order = self._data.get("order") + if order is None: + return self.default_order + return order + + @property + def scene_identifier(self): + return self._scene_identifier + + @property + def finished(self): + """Item was already processed.""" + + return self._state == 2 + + @property + def in_progress(self): + """Processing is in progress.""" + + return self._state == 1 + + def set_in_progress(self): + """Change to in progress state.""" + + self._state = 1 + + def set_finished(self): + """Change to finished state.""" + + self._state = 2 + + def set_failed(self, exception): + self.add_error(str(exception)) + + def add_error(self, error): + """Set placeholder item as failed and mark it as finished.""" + + self._errors.append(error) + + def get_errors(self): + """Exception with which the placeholder process failed. + + Gives ability to access the exception. + """ + + return self._errors + + +class PlaceholderLoadMixin(object): + """Mixin prepared for loading placeholder plugins. + + Implementation prepares options for placeholders with + 'get_load_plugin_options'. + + For placeholder population is implemented 'populate_load_placeholder'. + + PlaceholderItem can have implemented methods: + - 'load_failed' - called when loading of one representation failed + - 'load_succeed' - called when loading of one representation succeeded + """ + + def get_load_plugin_options(self, options=None): + """Unified attribute definitions for load placeholder. + + Common function for placeholder plugins used for loading of + repsentations. Use it in 'get_placeholder_options'. + + Args: + plugin (PlaceholderPlugin): Plugin used for loading of + representations. + options (Dict[str, Any]): Already available options which are used + as defaults for attributes. + + Returns: + List[AbstractAttrDef]: Attribute definitions common for load + plugins. + """ + + loaders_by_name = self.builder.get_loaders_by_name() + loader_items = [ + {"value": loader_name, "label": loader.label or loader_name} + for loader_name, loader in loaders_by_name.items() + ] + + loader_items = list(sorted(loader_items, key=lambda i: i["label"])) + options = options or {} + return [ + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Main attributes"), + attribute_definitions.UISeparatorDef(), + + attribute_definitions.EnumDef( + "builder_type", + label="Asset Builder Type", + default=options.get("builder_type"), + items=[ + {"label": "Current asset", "value": "context_asset"}, + {"label": "Linked assets", "value": "linked_asset"}, + {"label": "All assets", "value": "all_assets"}, + ], + tooltip=( + "Asset Builder Type\n" + "\nBuilder type describe what template loader will look" + " for." + "\ncontext_asset : Template loader will look for subsets" + " of current context asset (Asset bob will find asset)" + "\nlinked_asset : Template loader will look for assets" + " linked to current context asset." + "\nLinked asset are looked in database under" + " field \"inputLinks\"" + ) + ), + attribute_definitions.TextDef( + "family", + label="Family", + default=options.get("family"), + placeholder="model, look, ..." + ), + attribute_definitions.TextDef( + "representation", + label="Representation name", + default=options.get("representation"), + placeholder="ma, abc, ..." + ), + attribute_definitions.EnumDef( + "loader", + label="Loader", + default=options.get("loader"), + items=loader_items, + tooltip=( + "Loader" + "\nDefines what OpenPype loader will be used to" + " load assets." + "\nUseable loader depends on current host's loader list." + "\nField is case sensitive." + ) + ), + attribute_definitions.TextDef( + "loader_args", + label="Loader Arguments", + default=options.get("loader_args"), + placeholder='{"camera":"persp", "lights":True}', + tooltip=( + "Loader" + "\nDefines a dictionnary of arguments used to load assets." + "\nUseable arguments depend on current placeholder Loader." + "\nField should be a valid python dict." + " Anything else will be ignored." + ) + ), + attribute_definitions.NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines asset loading priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ), + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Optional attributes"), + attribute_definitions.UISeparatorDef(), + attribute_definitions.TextDef( + "asset", + label="Asset filter", + default=options.get("asset"), + placeholder="regex filtering by asset name", + tooltip=( + "Filtering assets by matching field regex to asset's name" + ) + ), + attribute_definitions.TextDef( + "subset", + label="Subset filter", + default=options.get("subset"), + placeholder="regex filtering by subset name", + tooltip=( + "Filtering assets by matching field regex to subset's name" + ) + ), + attribute_definitions.TextDef( + "hierarchy", + label="Hierarchy filter", + default=options.get("hierarchy"), + placeholder="regex filtering by asset's hierarchy", + tooltip=( + "Filtering assets by matching field asset's hierarchy" + ) + ) + ] + + def parse_loader_args(self, loader_args): + """Helper function to parse string of loader arugments. + + Empty dictionary is returned if conversion fails. + + Args: + loader_args (str): Loader args filled by user. + + Returns: + Dict[str, Any]: Parsed arguments used as dictionary. + """ + + if not loader_args: + return {} + + try: + parsed_args = eval(loader_args) + if isinstance(parsed_args, dict): + return parsed_args + + except Exception as err: + print( + "Error while parsing loader arguments '{}'.\n{}: {}\n\n" + "Continuing with default arguments. . .".format( + loader_args, err.__class__.__name__, err)) + + return {} + + def _get_representations(self, placeholder): + """Prepared query of representations based on load options. + + This function is directly connected to options defined in + 'get_load_plugin_options'. + + Note: + This returns all representation documents from all versions of + matching subset. To filter for last version use + '_reduce_last_version_repre_docs'. + + Args: + placeholder (PlaceholderItem): Item which should be populated. + + Returns: + List[Dict[str, Any]]: Representation documents matching filters + from placeholder data. + """ + + project_name = self.builder.project_name + current_asset_doc = self.builder.current_asset_doc + linked_asset_docs = self.builder.linked_asset_docs + + builder_type = placeholder.data["builder_type"] + if builder_type == "context_asset": + context_filters = { + "asset": [current_asset_doc["name"]], + "subset": [re.compile(placeholder.data["subset"])], + "hierarchy": [re.compile(placeholder.data["hierarchy"])], + "representation": [placeholder.data["representation"]], + "family": [placeholder.data["family"]] + } + + elif builder_type != "linked_asset": + context_filters = { + "asset": [re.compile(placeholder.data["asset"])], + "subset": [re.compile(placeholder.data["subset"])], + "hierarchy": [re.compile(placeholder.data["hierarchy"])], + "representation": [placeholder.data["representation"]], + "family": [placeholder.data["family"]] + } + + else: + asset_regex = re.compile(placeholder.data["asset"]) + linked_asset_names = [] + for asset_doc in linked_asset_docs: + asset_name = asset_doc["name"] + if asset_regex.match(asset_name): + linked_asset_names.append(asset_name) + + context_filters = { + "asset": linked_asset_names, + "subset": [re.compile(placeholder.data["subset"])], + "hierarchy": [re.compile(placeholder.data["hierarchy"])], + "representation": [placeholder.data["representation"]], + "family": [placeholder.data["family"]], + } + + return list(get_representations( + project_name, + context_filters=context_filters + )) + + def _before_repre_load(self, placeholder, representation): + """Can be overriden. Is called before representation is loaded.""" + + pass + + def _reduce_last_version_repre_docs(self, representations): + """Reduce representations to last verison.""" + + mapping = {} + for repre_doc in representations: + repre_context = repre_doc["context"] + + asset_name = repre_context["asset"] + subset_name = repre_context["subset"] + version = repre_context.get("version", -1) + + if asset_name not in mapping: + mapping[asset_name] = {} + + subset_mapping = mapping[asset_name] + if subset_name not in subset_mapping: + subset_mapping[subset_name] = collections.defaultdict(list) + + version_mapping = subset_mapping[subset_name] + version_mapping[version].append(repre_doc) + + output = [] + for subset_mapping in mapping.values(): + for version_mapping in subset_mapping.values(): + last_version = tuple(sorted(version_mapping.keys()))[-1] + output.extend(version_mapping[last_version]) + return output + + def populate_load_placeholder(self, placeholder, ignore_repre_ids=None): + """Load placeholder is goind to load matching representations. + + Note: + Ignore repre ids is to avoid loading the same representation again + on load. But the representation can be loaded with different loader + and there could be published new version of matching subset for the + representation. We should maybe expect containers. + + Also import loaders don't have containers at all... + + Args: + placeholder (PlaceholderItem): Placeholder item with information + about requested representations. + ignore_repre_ids (Iterable[Union[str, ObjectId]]): Representation + ids that should be skipped. + """ + + if ignore_repre_ids is None: + ignore_repre_ids = set() + + # TODO check loader existence + loader_name = placeholder.data["loader"] + loader_args = placeholder.data["loader_args"] + + placeholder_representations = self._get_representations(placeholder) + + filtered_representations = [] + for representation in self._reduce_last_version_repre_docs( + placeholder_representations + ): + repre_id = str(representation["_id"]) + if repre_id not in ignore_repre_ids: + filtered_representations.append(representation) + + if not filtered_representations: + self.log.info(( + "There's no representation for this placeholder: {}" + ).format(placeholder.scene_identifier)) + return + + repre_load_contexts = get_contexts_for_repre_docs( + self.project_name, filtered_representations + ) + loaders_by_name = self.builder.get_loaders_by_name() + for repre_load_context in repre_load_contexts.values(): + representation = repre_load_context["representation"] + repre_context = representation["context"] + self._before_repre_load( + placeholder, representation + ) + self.log.info( + "Loading {} from {} with loader {}\n" + "Loader arguments used : {}".format( + repre_context["subset"], + repre_context["asset"], + loader_name, + loader_args + ) + ) + try: + container = load_with_repre_context( + loaders_by_name[loader_name], + repre_load_context, + options=self.parse_loader_args(loader_args) + ) + + except Exception: + failed = True + self.load_failed(placeholder, representation) + + else: + failed = False + self.load_succeed(placeholder, container) + self.cleanup_placeholder(placeholder, failed) + + def load_failed(self, placeholder, representation): + if hasattr(placeholder, "load_failed"): + placeholder.load_failed(representation) + + def load_succeed(self, placeholder, container): + if hasattr(placeholder, "load_succeed"): + placeholder.load_succeed(container) + + def cleanup_placeholder(self, placeholder, failed): + """Cleanup placeholder after load of single representation. + + Can be called multiple times during placeholder item populating and is + called even if loading failed. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + + pass + + +class PlaceholderCreateMixin(object): + """Mixin prepared for creating placeholder plugins. + + Implementation prepares options for placeholders with + 'get_create_plugin_options'. + + For placeholder population is implemented 'populate_create_placeholder'. + + PlaceholderItem can have implemented methods: + - 'create_failed' - called when creating of an instance failed + - 'create_succeed' - called when creating of an instance succeeded + """ + + def get_create_plugin_options(self, options=None): + """Unified attribute definitions for create placeholder. + + Common function for placeholder plugins used for creating of + publishable instances. Use it with 'get_placeholder_options'. + + Args: + plugin (PlaceholderPlugin): Plugin used for creating of + publish instances. + options (Dict[str, Any]): Already available options which are used + as defaults for attributes. + + Returns: + List[AbstractAttrDef]: Attribute definitions common for create + plugins. + """ + + creators_by_name = self.builder.get_creators_by_name() + + creator_items = [ + (creator_name, creator.label or creator_name) + for creator_name, creator in creators_by_name.items() + ] + + creator_items.sort(key=lambda i: i[1]) + options = options or {} + return [ + attribute_definitions.UISeparatorDef(), + attribute_definitions.UILabelDef("Main attributes"), + attribute_definitions.UISeparatorDef(), + + attribute_definitions.EnumDef( + "creator", + label="Creator", + default=options.get("creator"), + items=creator_items, + tooltip=( + "Creator" + "\nDefines what OpenPype creator will be used to" + " create publishable instance." + "\nUseable creator depends on current host's creator list." + "\nField is case sensitive." + ) + ), + attribute_definitions.TextDef( + "create_variant", + label="Variant", + default=options.get("create_variant"), + placeholder='Main', + tooltip=( + "Creator" + "\nDefines variant name which will be use for " + "\ncompiling of subset name." + ) + ), + attribute_definitions.UISeparatorDef(), + attribute_definitions.NumberDef( + "order", + label="Order", + default=options.get("order") or 0, + decimals=0, + minimum=0, + maximum=999, + tooltip=( + "Order" + "\nOrder defines creating instance priority (0 to 999)" + "\nPriority rule is : \"lowest is first to load\"." + ) + ) + ] + + def populate_create_placeholder(self, placeholder): + """Create placeholder is going to create matching publishabe instance. + + Args: + placeholder (PlaceholderItem): Placeholder item with information + about requested publishable instance. + """ + creator_name = placeholder.data["creator"] + create_variant = placeholder.data["create_variant"] + + creator_plugin = self.builder.get_creators_by_name()[creator_name] + + # create subset name + project_name = legacy_io.Session["AVALON_PROJECT"] + task_name = legacy_io.Session["AVALON_TASK"] + asset_name = legacy_io.Session["AVALON_ASSET"] + + # get asset id + asset_doc = get_asset_by_name(project_name, asset_name, fields=["_id"]) + assert asset_doc, "No current asset found in Session" + asset_id = asset_doc['_id'] + + subset_name = creator_plugin.get_subset_name( + create_variant, + task_name, + asset_id, + project_name + ) + + creator_data = { + "creator_name": creator_name, + "create_variant": create_variant, + "subset_name": subset_name, + "creator_plugin": creator_plugin + } + + self._before_instance_create(placeholder) + + # compile subset name from variant + try: + creator_instance = creator_plugin( + subset_name, + asset_name + ).process() + + except Exception: + failed = True + self.create_failed(placeholder, creator_data) + + else: + failed = False + self.create_succeed(placeholder, creator_instance) + + self.cleanup_placeholder(placeholder, failed) + + def create_failed(self, placeholder, creator_data): + if hasattr(placeholder, "create_failed"): + placeholder.create_failed(creator_data) + + def create_succeed(self, placeholder, creator_instance): + if hasattr(placeholder, "create_succeed"): + placeholder.create_succeed(creator_instance) + + def cleanup_placeholder(self, placeholder, failed): + """Cleanup placeholder after load of single representation. + + Can be called multiple times during placeholder item populating and is + called even if loading failed. + + Args: + placeholder (PlaceholderItem): Item which was just used to load + representation. + failed (bool): Loading of representation failed. + """ + + pass + + def _before_instance_create(self, placeholder): + """Can be overriden. Is called before instance is created.""" + + pass + + +class LoadPlaceholderItem(PlaceholderItem): + """PlaceholderItem for plugin which is loading representations. + + Connected to 'PlaceholderLoadMixin'. + """ + + def __init__(self, *args, **kwargs): + super(LoadPlaceholderItem, self).__init__(*args, **kwargs) + self._failed_representations = [] + + def get_errors(self): + if not self._failed_representations: + return [] + message = ( + "Failed to load {} representations using Loader {}" + ).format( + len(self._failed_representations), + self.data["loader"] + ) + return [message] + + def load_failed(self, representation): + self._failed_representations.append(representation) + + +class CreatePlaceholderItem(PlaceholderItem): + """PlaceholderItem for plugin which is creating publish instance. + + Connected to 'PlaceholderCreateMixin'. + """ + + def __init__(self, *args, **kwargs): + super(CreatePlaceholderItem, self).__init__(*args, **kwargs) + self._failed_created_publish_instances = [] + + def get_errors(self): + if not self._failed_representations: + return [] + message = ( + "Failed to create {} instance using Creator {}" + ).format( + len(self._failed_created_publish_instances), + self.data["creator"] + ) + return [message] + + def create_failed(self, creator_data): + self._failed_created_publish_instances.append(creator_data) diff --git a/openpype/plugin.py b/openpype/plugin.py index 3569936dac..7e906b4451 100644 --- a/openpype/plugin.py +++ b/openpype/plugin.py @@ -1,25 +1,91 @@ -import tempfile -import os -import pyblish.api -import avalon.api +import functools +import warnings +import pyblish.api + +# New location of orders: openpype.pipeline.publish.constants +# - can be imported as +# 'from openpype.pipeline.publish import ValidatePipelineOrder' ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05 ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1 ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2 ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3 +class PluginDeprecatedWarning(DeprecationWarning): + pass + + +def _deprecation_warning(item_name, warning_message): + warnings.simplefilter("always", PluginDeprecatedWarning) + warnings.warn( + ( + "Call to deprecated function '{}'" + "\nFunction was moved or removed.{}" + ).format(item_name, warning_message), + category=PluginDeprecatedWarning, + stacklevel=4 + ) + + +def deprecated(new_destination): + """Mark functions as deprecated. + + It will result in a warning being emitted when the function is used. + """ + + func = None + if callable(new_destination): + func = new_destination + new_destination = None + + def _decorator(decorated_func): + if new_destination is None: + warning_message = ( + " Please check content of deprecated function to figure out" + " possible replacement." + ) + else: + warning_message = " Please replace your usage with '{}'.".format( + new_destination + ) + + @functools.wraps(decorated_func) + def wrapper(*args, **kwargs): + _deprecation_warning(decorated_func.__name__, warning_message) + return decorated_func(*args, **kwargs) + return wrapper + + if func is None: + return _decorator + return _decorator(func) + + +# Classes just inheriting from pyblish classes +# - seems to be unused in code (not 100% sure) +# - they should be removed but because it is not clear if they're used +# we'll keep then and log deprecation warning +# Deprecated since 3.14.* will be removed in 3.16.* class ContextPlugin(pyblish.api.ContextPlugin): - def process(cls, *args, **kwargs): - super(ContextPlugin, cls).process(cls, *args, **kwargs) + def __init__(self, *args, **kwargs): + _deprecation_warning( + "openpype.plugin.ContextPlugin", + " Please replace your usage with 'pyblish.api.ContextPlugin'." + ) + super(ContextPlugin, self).__init__(*args, **kwargs) +# Deprecated since 3.14.* will be removed in 3.16.* class InstancePlugin(pyblish.api.InstancePlugin): - def process(cls, *args, **kwargs): - super(InstancePlugin, cls).process(cls, *args, **kwargs) + def __init__(self, *args, **kwargs): + _deprecation_warning( + "openpype.plugin.ContextPlugin", + " Please replace your usage with 'pyblish.api.InstancePlugin'." + ) + super(InstancePlugin, self).__init__(*args, **kwargs) -class Extractor(InstancePlugin): +class Extractor(pyblish.api.InstancePlugin): """Extractor base class. The extractor base class implements a "staging_dir" function used to @@ -37,17 +103,13 @@ class Extractor(InstancePlugin): Upon calling this method the staging directory is stored inside the instance.data['stagingDir'] """ - staging_dir = instance.data.get('stagingDir', None) - if not staging_dir: - staging_dir = os.path.normpath( - tempfile.mkdtemp(prefix="pyblish_tmp_") - ) - instance.data['stagingDir'] = staging_dir + from openpype.pipeline.publish import get_instance_staging_dir - return staging_dir + return get_instance_staging_dir(instance) +@deprecated("openpype.pipeline.publish.context_plugin_should_run") def contextplugin_should_run(plugin, context): """Return whether the ContextPlugin should run on the given context. @@ -57,30 +119,10 @@ def contextplugin_should_run(plugin, context): This actually checks it correctly and returns whether it should run. + Deprecated: + Since 3.14.* will be removed in 3.16.* or later. """ - required = set(plugin.families) - # When no filter always run - if "*" in required: - return True + from openpype.pipeline.publish import context_plugin_should_run - for instance in context: - - # Ignore inactive instances - if (not instance.data.get("publish", True) or - not instance.data.get("active", True)): - continue - - families = instance.data.get("families", []) - if any(f in required for f in families): - return True - - family = instance.data.get("family") - if family and family in required: - return True - - return False - - -class ValidationException(Exception): - pass + return context_plugin_should_run(plugin, context) diff --git a/openpype/plugins/load/add_site.py b/openpype/plugins/load/add_site.py index 95001691e2..e31f746f51 100644 --- a/openpype/plugins/load/add_site.py +++ b/openpype/plugins/load/add_site.py @@ -1,9 +1,19 @@ +from openpype.client import get_linked_representation_id from openpype.modules import ModulesManager from openpype.pipeline import load +from openpype.modules.sync_server.utils import SiteAlreadyPresentError class AddSyncSite(load.LoaderPlugin): - """Add sync site to representation""" + """Add sync site to representation + + If family of synced representation is 'workfile', it looks for all + representations which are referenced (loaded) in workfile with content of + 'inputLinks'. + It doesn't do any checks for site, most common use case is when artist is + downloading workfile to his local site, but it might be helpful when + artist is re-uploading broken representation on remote site also. + """ representations = ["*"] families = ["*"] @@ -12,21 +22,58 @@ class AddSyncSite(load.LoaderPlugin): icon = "download" color = "#999999" - def load(self, context, name=None, namespace=None, data=None): - self.log.info("Adding {} to representation: {}".format( - data["site_name"], data["_id"])) - self.add_site_to_representation(data["project_name"], - data["_id"], - data["site_name"]) - self.log.debug("Site added.") + _sync_server = None + is_add_site_loader = True - @staticmethod - def add_site_to_representation(project_name, representation_id, site_name): - """Adds new site to representation_id, resets if exists""" - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] - sync_server.add_site(project_name, representation_id, site_name, - force=True) + @property + def sync_server(self): + if not self._sync_server: + manager = ModulesManager() + self._sync_server = manager.modules_by_name["sync_server"] + + return self._sync_server + + def load(self, context, name=None, namespace=None, data=None): + """"Adds site skeleton information on representation_id + + Looks for loaded containers for workfile, adds them site skeleton too + (eg. they should be downloaded too). + Args: + context (dict): + name (str): + namespace (str): + data (dict): expects {"site_name": SITE_NAME_TO_ADD} + """ + # self.log wont propagate + project_name = context["project"]["name"] + repre_doc = context["representation"] + family = repre_doc["context"]["family"] + repre_id = repre_doc["_id"] + site_name = data["site_name"] + print("Adding {} to representation: {}".format( + data["site_name"], repre_id)) + + self.sync_server.add_site(project_name, repre_id, site_name, + force=True) + + if family == "workfile": + links = get_linked_representation_id( + project_name, + repre_id=repre_id, + link_type="reference" + ) + for link_repre_id in links: + try: + print("Adding {} to linked representation: {}".format( + data["site_name"], link_repre_id)) + self.sync_server.add_site(project_name, link_repre_id, + site_name, + force=False) + except SiteAlreadyPresentError: + # do not add/reset working site for references + self.log.debug("Site present", exc_info=True) + + self.log.debug("Site added.") def filepath_from_context(self, context): """No real file loading""" diff --git a/openpype/plugins/load/copy_file.py b/openpype/plugins/load/copy_file.py index 60db094cfb..163f56a83a 100644 --- a/openpype/plugins/load/copy_file.py +++ b/openpype/plugins/load/copy_file.py @@ -19,7 +19,7 @@ class CopyFile(load.LoaderPlugin): @staticmethod def copy_file_to_clipboard(path): - from Qt import QtCore, QtWidgets + from qtpy import QtCore, QtWidgets clipboard = QtWidgets.QApplication.clipboard() assert clipboard, "Must have running QApplication instance" diff --git a/openpype/plugins/load/copy_file_path.py b/openpype/plugins/load/copy_file_path.py index 565d8d1ff1..569e5c8780 100644 --- a/openpype/plugins/load/copy_file_path.py +++ b/openpype/plugins/load/copy_file_path.py @@ -19,7 +19,7 @@ class CopyFilePath(load.LoaderPlugin): @staticmethod def copy_path_to_clipboard(path): - from Qt import QtWidgets + from qtpy import QtWidgets clipboard = QtWidgets.QApplication.clipboard() assert clipboard, "Must have running QApplication instance" diff --git a/openpype/plugins/load/delete_old_versions.py b/openpype/plugins/load/delete_old_versions.py index 2789f4ea23..c7ad88a924 100644 --- a/openpype/plugins/load/delete_old_versions.py +++ b/openpype/plugins/load/delete_old_versions.py @@ -4,15 +4,18 @@ import uuid import clique from pymongo import UpdateOne -import ftrack_api import qargparse -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore -from avalon.api import AvalonMongoDB from openpype import style -from openpype.pipeline import load -from openpype.lib import StringTemplate -from openpype.api import Anatomy +from openpype.client import get_versions, get_representations +from openpype.modules import ModulesManager +from openpype.lib import format_file_size +from openpype.pipeline import load, AvalonMongoDB, Anatomy +from openpype.pipeline.load import ( + get_representation_path_with_anatomy, + InvalidRepresentationContext, +) class DeleteOldVersions(load.SubsetLoaderPlugin): @@ -39,13 +42,6 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): ) ] - def sizeof_fmt(self, num, suffix='B'): - for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) - def delete_whole_dir_paths(self, dir_paths, delete=True): size = 0 @@ -81,27 +77,28 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): def path_from_representation(self, representation, anatomy): try: - template = representation["data"]["template"] - + context = representation["context"] except KeyError: return (None, None) + try: + path = get_representation_path_with_anatomy( + representation, anatomy + ) + except InvalidRepresentationContext: + return (None, None) + sequence_path = None - try: - context = representation["context"] - context["root"] = anatomy.roots - path = str(StringTemplate.format_template(template, context)) - if "frame" in context: - context["frame"] = self.sequence_splitter - sequence_path = os.path.normpath(str( - StringTemplate.format_template(template, context) - )) + if "frame" in context: + context["frame"] = self.sequence_splitter + sequence_path = get_representation_path_with_anatomy( + representation, anatomy + ) - except KeyError: - # Template references unavailable data - return (None, None) + if sequence_path: + sequence_path = sequence_path.normalized() - return (os.path.normpath(path), sequence_path) + return (path.normalized(), sequence_path) def delete_only_repre_files(self, dir_paths, file_paths, delete=True): size = 0 @@ -199,18 +196,10 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): def get_data(self, context, versions_count): subset = context["subset"] asset = context["asset"] - anatomy = Anatomy(context["project"]["name"]) + project_name = context["project"]["name"] + anatomy = Anatomy(project_name) - self.dbcon = AvalonMongoDB() - self.dbcon.Session["AVALON_PROJECT"] = context["project"]["name"] - self.dbcon.install() - - versions = list( - self.dbcon.find({ - "type": "version", - "parent": {"$in": [subset["_id"]]} - }) - ) + versions = list(get_versions(project_name, subset_ids=[subset["_id"]])) versions_by_parent = collections.defaultdict(list) for ent in versions: @@ -269,10 +258,9 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): print(msg) return - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list(get_representations( + project_name, version_ids=version_ids + )) self.log.debug( "Collected representations to remove ({})".format(len(repres)) @@ -331,7 +319,7 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): return data - def main(self, data, remove_publish_folder): + def main(self, project_name, data, remove_publish_folder): # Size of files. size = 0 if not data: @@ -368,30 +356,70 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): )) if mongo_changes_bulk: - self.dbcon.bulk_write(mongo_changes_bulk) + dbcon = AvalonMongoDB() + dbcon.Session["AVALON_PROJECT"] = project_name + dbcon.install() + dbcon.bulk_write(mongo_changes_bulk) + dbcon.uninstall() - self.dbcon.uninstall() + self._ftrack_delete_versions(data) + + return size + + def _ftrack_delete_versions(self, data): + """Delete version on ftrack. + + Handling of ftrack logic in this plugin is not ideal. But in OP3 it is + almost impossible to solve the issue other way. + + Note: + Asset versions on ftrack are not deleted but marked as + "not published" which cause that they're invisible. + + Args: + data (dict): Data sent to subset loader with full context. + """ + + # First check for ftrack id on asset document + # - skip if ther is none + asset_ftrack_id = data["asset"]["data"].get("ftrackId") + if not asset_ftrack_id: + self.log.info(( + "Asset does not have filled ftrack id. Skipped delete" + " of ftrack version." + )) + return + + # Check if ftrack module is enabled + modules_manager = ModulesManager() + ftrack_module = modules_manager.modules_by_name.get("ftrack") + if not ftrack_module or not ftrack_module.enabled: + return + + import ftrack_api + + session = ftrack_api.Session() + subset_name = data["subset"]["name"] + versions = { + '"{}"'.format(version_doc["name"]) + for version_doc in data["versions"] + } + asset_versions = session.query( + ( + "select id, is_published from AssetVersion where" + " asset.parent.id is \"{}\"" + " and asset.name is \"{}\"" + " and version in ({})" + ).format( + asset_ftrack_id, + subset_name, + ",".join(versions) + ) + ).all() # Set attribute `is_published` to `False` on ftrack AssetVersions - session = ftrack_api.Session() - query = ( - "AssetVersion where asset.parent.id is \"{}\"" - " and asset.name is \"{}\"" - " and version is \"{}\"" - ) - for v in data["versions"]: - try: - ftrack_version = session.query( - query.format( - data["asset"]["data"]["ftrackId"], - data["subset"]["name"], - v["name"] - ) - ).one() - except ftrack_api.exception.NoResultFoundError: - continue - - ftrack_version["is_published"] = False + for asset_version in asset_versions: + asset_version["is_published"] = False try: session.commit() @@ -404,8 +432,6 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): self.log.error(msg) self.message(msg) - return size - def load(self, contexts, name=None, namespace=None, options=None): try: size = 0 @@ -424,10 +450,11 @@ class DeleteOldVersions(load.SubsetLoaderPlugin): if not data: continue - size += self.main(data, remove_publish_folder) + project_name = context["project"]["name"] + size += self.main(project_name, data, remove_publish_folder) print("Progressing {}/{}".format(count + 1, len(contexts))) - msg = "Total size of files: " + self.sizeof_fmt(size) + msg = "Total size of files: {}".format(format_file_size(size)) self.log.info(msg) self.message(msg) @@ -450,7 +477,7 @@ class CalculateOldVersions(DeleteOldVersions): ) ] - def main(self, data, remove_publish_folder): + def main(self, project_name, data, remove_publish_folder): size = 0 if not data: diff --git a/openpype/plugins/load/delivery.py b/openpype/plugins/load/delivery.py index 04080053e3..d1d5659118 100644 --- a/openpype/plugins/load/delivery.py +++ b/openpype/plugins/load/delivery.py @@ -1,22 +1,23 @@ import copy from collections import defaultdict -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB - -from openpype.pipeline import load -from openpype.api import Anatomy, config +from openpype.client import get_representations +from openpype.pipeline import load, Anatomy from openpype import resources, style -from openpype.lib.delivery import ( - sizeof_fmt, - path_from_representation, +from openpype.lib import ( + format_file_size, + collect_frames, + get_datetime_data, +) +from openpype.pipeline.load import get_representation_path_with_anatomy +from openpype.pipeline.delivery import ( get_format_dict, check_destination_path, - process_single_file, - process_sequence, - collect_frames + deliver_single_file, + deliver_sequence, ) @@ -70,17 +71,13 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): self.setStyleSheet(style.load_stylesheet()) - project = contexts[0]["project"]["name"] - self.anatomy = Anatomy(project) + project_name = contexts[0]["project"]["name"] + self.anatomy = Anatomy(project_name) self._representations = None self.log = log self.currently_uploaded = 0 - self.dbcon = AvalonMongoDB() - self.dbcon.Session["AVALON_PROJECT"] = project - self.dbcon.install() - - self._set_representations(contexts) + self._set_representations(project_name, contexts) dropdown = QtWidgets.QComboBox() self.templates = self._get_templates(self.anatomy) @@ -165,14 +162,16 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): selected_repres = self._get_selected_repres() - datetime_data = config.get_datetime_data() + datetime_data = get_datetime_data() template_name = self.dropdown.currentText() format_dict = get_format_dict(self.anatomy, self.root_line_edit.text()) for repre in self._representations: if repre["name"] not in selected_repres: continue - repre_path = path_from_representation(repre, self.anatomy) + repre_path = get_representation_path_with_anatomy( + repre, self.anatomy + ) anatomy_data = copy.deepcopy(repre["context"]) new_report_items = check_destination_path(str(repre["_id"]), @@ -207,7 +206,7 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): args[0] = src_path if frame: anatomy_data["frame"] = frame - new_report_items, uploaded = process_single_file(*args) + new_report_items, uploaded = deliver_single_file(*args) report_items.update(new_report_items) self._update_progress(uploaded) else: # fallback for Pype2 and representations without files @@ -216,9 +215,9 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): repre["context"]["frame"] = len(str(frame)) * "#" if not frame: - new_report_items, uploaded = process_single_file(*args) + new_report_items, uploaded = deliver_single_file(*args) else: - new_report_items, uploaded = process_sequence(*args) + new_report_items, uploaded = deliver_sequence(*args) report_items.update(new_report_items) self._update_progress(uploaded) @@ -240,13 +239,12 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): return templates - def _set_representations(self, contexts): + def _set_representations(self, project_name, contexts): version_ids = [context["version"]["_id"] for context in contexts] - repres = list(self.dbcon.find({ - "type": "representation", - "parent": {"$in": version_ids} - })) + repres = list(get_representations( + project_name, version_ids=version_ids + )) self._representations = repres @@ -269,8 +267,9 @@ class DeliveryOptionsDialog(QtWidgets.QDialog): def _prepare_label(self): """Provides text with no of selected files and their size.""" - label = "{} files, size {}".format(self.files_selected, - sizeof_fmt(self.size_selected)) + label = "{} files, size {}".format( + self.files_selected, + format_file_size(self.size_selected)) return label def _get_selected_repres(self): diff --git a/openpype/plugins/load/open_djv.py b/openpype/plugins/load/open_djv.py index 273c77c93f..bc5fd64b87 100644 --- a/openpype/plugins/load/open_djv.py +++ b/openpype/plugins/load/open_djv.py @@ -1,5 +1,5 @@ import os -from openpype.api import ApplicationManager +from openpype.lib import ApplicationManager from openpype.pipeline import load diff --git a/openpype/plugins/load/open_file.py b/openpype/plugins/load/open_file.py index f21cd07c7f..00b2ecd7c5 100644 --- a/openpype/plugins/load/open_file.py +++ b/openpype/plugins/load/open_file.py @@ -15,8 +15,8 @@ def open(filepath): subprocess.call(('xdg-open', filepath)) -class Openfile(load.LoaderPlugin): - """Open Image Sequence with system default""" +class OpenFile(load.LoaderPlugin): + """Open Image Sequence or Video with system default""" families = ["render2d"] representations = ["*"] @@ -27,32 +27,10 @@ class Openfile(load.LoaderPlugin): color = "orange" def load(self, context, name, namespace, data): - import clique - directory = os.path.dirname(self.fname) - pattern = clique.PATTERNS["frames"] + path = self.fname + if not os.path.exists(path): + raise RuntimeError("File not found: {}".format(path)) - files = os.listdir(directory) - representation = context["representation"] - - ext = representation["name"] - path = representation["data"]["path"] - - if ext in ["#"]: - collections, remainder = clique.assemble(files, - patterns=[pattern], - minimum_items=1) - - seqeunce = collections[0] - - first_image = list(seqeunce)[0] - filepath = os.path.normpath(os.path.join(directory, first_image)) - else: - file = [f for f in files - if ext in f - if "#" not in f][0] - filepath = os.path.normpath(os.path.join(directory, file)) - - self.log.info("Opening : {}".format(filepath)) - - open(filepath) + self.log.info("Opening : {}".format(path)) + open(path) diff --git a/openpype/plugins/load/push_to_library.py b/openpype/plugins/load/push_to_library.py new file mode 100644 index 0000000000..dd7291e686 --- /dev/null +++ b/openpype/plugins/load/push_to_library.py @@ -0,0 +1,52 @@ +import os + +from openpype import PACKAGE_DIR +from openpype.lib import get_openpype_execute_args, run_detached_process +from openpype.pipeline import load +from openpype.pipeline.load import LoadError + + +class PushToLibraryProject(load.SubsetLoaderPlugin): + """Export selected versions to folder structure from Template""" + + is_multiple_contexts_compatible = True + + representations = ["*"] + families = ["*"] + + label = "Push to Library project" + order = 35 + icon = "send" + color = "#d8d8d8" + + def load(self, contexts, name=None, namespace=None, options=None): + filtered_contexts = [ + context + for context in contexts + if context.get("project") and context.get("version") + ] + if not filtered_contexts: + raise LoadError("Nothing to push for your selection") + + if len(filtered_contexts) > 1: + raise LoadError("Please select only one item") + + context = tuple(filtered_contexts)[0] + push_tool_script_path = os.path.join( + PACKAGE_DIR, + "tools", + "push_to_project", + "app.py" + ) + project_doc = context["project"] + version_doc = context["version"] + project_name = project_doc["name"] + version_id = str(version_doc["_id"]) + + args = get_openpype_execute_args( + "run", + push_tool_script_path, + "--project", project_name, + "--version", version_id + ) + run_detached_process(args) diff --git a/openpype/plugins/load/remove_site.py b/openpype/plugins/load/remove_site.py index adffec9986..bea8b1b346 100644 --- a/openpype/plugins/load/remove_site.py +++ b/openpype/plugins/load/remove_site.py @@ -3,7 +3,10 @@ from openpype.pipeline import load class RemoveSyncSite(load.LoaderPlugin): - """Remove sync site and its files on representation""" + """Remove sync site and its files on representation. + + Removes files only on local site! + """ representations = ["*"] families = ["*"] @@ -12,21 +15,30 @@ class RemoveSyncSite(load.LoaderPlugin): icon = "download" color = "#999999" - def load(self, context, name=None, namespace=None, data=None): - self.log.info("Removing {} on representation: {}".format( - data["site_name"], data["_id"])) - self.remove_site_on_representation(data["project_name"], - data["_id"], - data["site_name"]) - self.log.debug("Site added.") + _sync_server = None + is_remove_site_loader = True - @staticmethod - def remove_site_on_representation(project_name, representation_id, - site_name): - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] - sync_server.remove_site(project_name, representation_id, - site_name, True) + @property + def sync_server(self): + if not self._sync_server: + manager = ModulesManager() + self._sync_server = manager.modules_by_name["sync_server"] + + return self._sync_server + + def load(self, context, name=None, namespace=None, data=None): + project_name = context["project"]["name"] + repre_doc = context["representation"] + repre_id = repre_doc["_id"] + site_name = data["site_name"] + + print("Removing {} on representation: {}".format(site_name, repre_id)) + + self.sync_server.remove_site(project_name, + repre_id, + site_name, + True) + self.log.debug("Site removed.") def filepath_from_context(self, context): """No real file loading""" diff --git a/openpype/plugins/publish/cleanup.py b/openpype/plugins/publish/cleanup.py index f29e6ccd4e..ef312e391f 100644 --- a/openpype/plugins/publish/cleanup.py +++ b/openpype/plugins/publish/cleanup.py @@ -5,6 +5,8 @@ import shutil import pyblish.api import re +from openpype.tests.lib import is_in_tests + class CleanUp(pyblish.api.InstancePlugin): """Cleans up the staging directory after a successful publish. @@ -44,6 +46,9 @@ class CleanUp(pyblish.api.InstancePlugin): def process(self, instance): """Plugin entry point.""" + if is_in_tests(): + # let automatic test process clean up temporary data + return # Get the errored instances failed = [] for result in instance.context.data["results"]: diff --git a/openpype/plugins/publish/cleanup_explicit.py b/openpype/plugins/publish/cleanup_explicit.py index 88bba34532..983c9223c6 100644 --- a/openpype/plugins/publish/cleanup_explicit.py +++ b/openpype/plugins/publish/cleanup_explicit.py @@ -73,7 +73,7 @@ class ExplicitCleanUp(pyblish.api.ContextPlugin): ) # Delete folders with it's content - succeded_dirs = set() + succeeded = set() for dirpath in dirpaths: # Check if directory still exists # - it is possible that directory was already deleted with @@ -81,13 +81,13 @@ class ExplicitCleanUp(pyblish.api.ContextPlugin): if os.path.exists(dirpath): try: shutil.rmtree(dirpath) - succeded_dirs.add(dirpath) + succeeded.add(dirpath) except Exception: failed.append(dirpath) - if succeded_dirs: + if succeeded: self.log.info( - "Removed direcoties:\n{}".format("\n".join(succeded_dirs)) + "Removed directories:\n{}".format("\n".join(succeeded)) ) # Prepare lines for report of failed removements diff --git a/openpype/plugins/publish/cleanup_farm.py b/openpype/plugins/publish/cleanup_farm.py index ab0c6e469e..b87d4698a2 100644 --- a/openpype/plugins/publish/cleanup_farm.py +++ b/openpype/plugins/publish/cleanup_farm.py @@ -3,7 +3,6 @@ import os import shutil import pyblish.api -import avalon.api class CleanUpFarm(pyblish.api.ContextPlugin): @@ -22,8 +21,8 @@ class CleanUpFarm(pyblish.api.ContextPlugin): def process(self, context): # Get source host from which farm publishing was started - src_host_name = avalon.api.Session.get("AVALON_APP") - self.log.debug("Host name from session is {}".format(src_host_name)) + src_host_name = context.data["hostName"] + self.log.debug("Host name from context is {}".format(src_host_name)) # Skip process if is not in list of source hosts in which this # plugin should run if src_host_name not in self.allowed_hosts: diff --git a/openpype/plugins/publish/collect_anatomy_context_data.py b/openpype/plugins/publish/collect_anatomy_context_data.py index bd8d9e50c4..55ce8e06f4 100644 --- a/openpype/plugins/publish/collect_anatomy_context_data.py +++ b/openpype/plugins/publish/collect_anatomy_context_data.py @@ -13,12 +13,10 @@ Provides: """ import json -from openpype.lib import ( - get_system_general_anatomy_data -) -from avalon import api import pyblish.api +from openpype.pipeline.template_data import get_template_data + class CollectAnatomyContextData(pyblish.api.ContextPlugin): """Collect Anatomy Context data. @@ -32,11 +30,15 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): "asset": "AssetName", "hierarchy": "path/to/asset", "task": "Working", + "user": "MeDespicable", + # Duplicated entry "username": "MeDespicable", + # Current host name + "app": "maya" + *** OPTIONAL *** - "app": "maya" # Current application base name - + mutliple keys from `datetimeData` # see it's collector + + mutliple keys from `datetimeData` (See it's collector) } """ @@ -44,52 +46,26 @@ class CollectAnatomyContextData(pyblish.api.ContextPlugin): label = "Collect Anatomy Context Data" def process(self, context): + host_name = context.data["hostName"] + system_settings = context.data["system_settings"] project_entity = context.data["projectEntity"] - context_data = { - "project": { - "name": project_entity["name"], - "code": project_entity["data"].get("code") - }, - "username": context.data["user"], - "app": context.data["hostName"] - } - - context.data["anatomyData"] = context_data - - # add system general settings anatomy data - system_general_data = get_system_general_anatomy_data() - context_data.update(system_general_data) - - datetime_data = context.data.get("datetimeData") or {} - context_data.update(datetime_data) - asset_entity = context.data.get("assetEntity") + task_name = None if asset_entity: - task_name = api.Session["AVALON_TASK"] + task_name = context.data["task"] - asset_tasks = asset_entity["data"]["tasks"] - task_type = asset_tasks.get(task_name, {}).get("type") + anatomy_data = get_template_data( + project_entity, asset_entity, task_name, host_name, system_settings + ) + anatomy_data.update(context.data.get("datetimeData") or {}) - project_task_types = project_entity["config"]["tasks"] - task_code = project_task_types.get(task_type, {}).get("short_name") + username = context.data["user"] + anatomy_data["user"] = username + # Backwards compatibility for 'username' key + anatomy_data["username"] = username - asset_parents = asset_entity["data"]["parents"] - hierarchy = "/".join(asset_parents) - - parent_name = project_entity["name"] - if asset_parents: - parent_name = asset_parents[-1] - - context_data.update({ - "asset": asset_entity["name"], - "parent": parent_name, - "hierarchy": hierarchy, - "task": { - "name": task_name, - "type": task_type, - "short": task_code, - } - }) + # Store + context.data["anatomyData"] = anatomy_data self.log.info("Global anatomy Data collected") - self.log.debug(json.dumps(context_data, indent=4)) + self.log.debug(json.dumps(anatomy_data, indent=4)) diff --git a/openpype/plugins/publish/collect_anatomy_instance_data.py b/openpype/plugins/publish/collect_anatomy_instance_data.py index 42836e796b..48171aa957 100644 --- a/openpype/plugins/publish/collect_anatomy_instance_data.py +++ b/openpype/plugins/publish/collect_anatomy_instance_data.py @@ -25,9 +25,14 @@ import copy import json import collections -from avalon import io import pyblish.api +from openpype.client import ( + get_assets, + get_subsets, + get_last_versions +) + class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): """Collect Instance specific Anatomy data. @@ -43,13 +48,15 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): def process(self, context): self.log.info("Collecting anatomy data for all instances.") - self.fill_missing_asset_docs(context) - self.fill_latest_versions(context) + project_name = context.data["projectName"] + self.fill_missing_asset_docs(context, project_name) + self.fill_instance_data_from_asset(context) + self.fill_latest_versions(context, project_name) self.fill_anatomy_data(context) self.log.info("Anatomy Data collection finished.") - def fill_missing_asset_docs(self, context): + def fill_missing_asset_docs(self, context, project_name): self.log.debug("Qeurying asset documents for instances.") context_asset_doc = context.data.get("assetEntity") @@ -83,10 +90,8 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): self.log.debug("Querying asset documents with names: {}".format( ", ".join(["\"{}\"".format(name) for name in asset_names]) )) - asset_docs = io.find({ - "type": "asset", - "name": {"$in": asset_names} - }) + + asset_docs = get_assets(project_name, asset_names=asset_names) asset_docs_by_name = { asset_doc["name"]: asset_doc for asset_doc in asset_docs @@ -110,7 +115,24 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): "Not found asset documents with names \"{}\"." ).format(joined_asset_names)) - def fill_latest_versions(self, context): + def fill_instance_data_from_asset(self, context): + for instance in context: + asset_doc = instance.data.get("assetEntity") + if not asset_doc: + continue + + asset_data = asset_doc["data"] + for key in ( + "fps", + "frameStart", + "frameEnd", + "handleStart", + "handleEnd", + ): + if key not in instance.data and key in asset_data: + instance.data[key] = asset_data[key] + + def fill_latest_versions(self, context, project_name): """Try to find latest version for each instance's subset. Key "latestVersion" is always set to latest version or `None`. @@ -122,16 +144,16 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): None """ - self.log.debug("Qeurying latest versions for instances.") + self.log.debug("Querying latest versions for instances.") hierarchy = {} - subset_filters = [] + names_by_asset_ids = collections.defaultdict(set) for instance in context: # Make sure `"latestVersion"` key is set latest_version = instance.data.get("latestVersion") instance.data["latestVersion"] = latest_version - # Skip instances withou "assetEntity" + # Skip instances without "assetEntity" asset_doc = instance.data.get("assetEntity") if not asset_doc: continue @@ -140,73 +162,39 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): asset_id = asset_doc["_id"] subset_name = instance.data["subset"] - # Prepare instance hiearchy for faster filling latest versions + # Prepare instance hierarchy for faster filling latest versions if asset_id not in hierarchy: hierarchy[asset_id] = {} if subset_name not in hierarchy[asset_id]: hierarchy[asset_id][subset_name] = [] hierarchy[asset_id][subset_name].append(instance) - subset_filters.append({ - "parent": asset_id, - "name": subset_name - }) + names_by_asset_ids[asset_id].add(subset_name) subset_docs = [] - if subset_filters: - subset_docs = list(io.find({ - "type": "subset", - "$or": subset_filters - })) + if names_by_asset_ids: + subset_docs = list(get_subsets( + project_name, names_by_asset_ids=names_by_asset_ids + )) subset_ids = [ subset_doc["_id"] for subset_doc in subset_docs ] - last_version_by_subset_id = self._query_last_versions(subset_ids) + last_version_docs_by_subset_id = get_last_versions( + project_name, subset_ids, fields=["name"] + ) for subset_doc in subset_docs: subset_id = subset_doc["_id"] - last_version = last_version_by_subset_id.get(subset_id) - if last_version is None: + last_version_doc = last_version_docs_by_subset_id.get(subset_id) + if last_version_doc is None: continue asset_id = subset_doc["parent"] subset_name = subset_doc["name"] _instances = hierarchy[asset_id][subset_name] for _instance in _instances: - _instance.data["latestVersion"] = last_version - - def _query_last_versions(self, subset_ids): - """Retrieve all latest versions for entered subset_ids. - - Args: - subset_ids (list): List of subset ids with type `ObjectId`. - - Returns: - dict: Key is subset id and value is last version name. - """ - _pipeline = [ - # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, - # Sorting versions all together - {"$sort": {"name": 1}}, - # Group them by "parent", but only take the last - {"$group": { - "_id": "$parent", - "_version_id": {"$last": "$_id"}, - "name": {"$last": "$name"} - }} - ] - - last_version_by_subset_id = {} - for doc in io.aggregate(_pipeline): - subset_id = doc["_id"] - last_version_by_subset_id[subset_id] = doc["name"] - - return last_version_by_subset_id + _instance.data["latestVersion"] = last_version_doc["name"] def fill_anatomy_data(self, context): self.log.debug("Storing anatomy data to instance data.") @@ -238,7 +226,7 @@ class CollectAnatomyInstanceData(pyblish.api.ContextPlugin): "version": version_number } - # Hiearchy + # Hierarchy asset_doc = instance.data.get("assetEntity") if ( asset_doc diff --git a/openpype/plugins/publish/collect_anatomy_object.py b/openpype/plugins/publish/collect_anatomy_object.py index 2c87918728..725cae2b14 100644 --- a/openpype/plugins/publish/collect_anatomy_object.py +++ b/openpype/plugins/publish/collect_anatomy_object.py @@ -1,29 +1,32 @@ """Collect Anatomy object. Requires: - os.environ -> AVALON_PROJECT + context -> projectName Provides: - context -> anatomy (pype.api.Anatomy) + context -> anatomy (openpype.pipeline.anatomy.Anatomy) """ -import os -from openpype.api import Anatomy + import pyblish.api +from openpype.pipeline import Anatomy, KnownPublishError class CollectAnatomyObject(pyblish.api.ContextPlugin): - """Collect Anatomy object into Context""" + """Collect Anatomy object into Context. + + Order offset could be changed to '-0.45'. + """ order = pyblish.api.CollectorOrder - 0.4 label = "Collect Anatomy Object" def process(self, context): - project_name = os.environ.get("AVALON_PROJECT") + project_name = context.data.get("projectName") if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` is not set." + raise KnownPublishError(( + "Project name is not set in 'projectName'." "Could not initialize project's Anatomy." - ) + )) context.data["anatomy"] = Anatomy(project_name) diff --git a/openpype/plugins/publish/collect_audio.py b/openpype/plugins/publish/collect_audio.py new file mode 100644 index 0000000000..3a0ddb3281 --- /dev/null +++ b/openpype/plugins/publish/collect_audio.py @@ -0,0 +1,178 @@ +import collections +import pyblish.api + +from openpype.client import ( + get_assets, + get_subsets, + get_last_versions, + get_representations, +) +from openpype.pipeline.load import get_representation_path_with_anatomy + + +class CollectAudio(pyblish.api.ContextPlugin): + """Collect asset's last published audio. + + The audio subset name searched for is defined in: + project settings > Collect Audio + + Note: + The plugin was instance plugin but because of so much queries the + plugin was slowing down whole collection phase a lot thus was + converted to context plugin which requires only 4 queries top. + """ + + label = "Collect Asset Audio" + order = pyblish.api.CollectorOrder + 0.1 + families = ["review"] + hosts = [ + "nuke", + "maya", + "shell", + "hiero", + "premiere", + "harmony", + "traypublisher", + "standalonepublisher", + "fusion", + "tvpaint", + "resolve", + "webpublisher", + "aftereffects", + "flame", + "unreal" + ] + + audio_subset_name = "audioMain" + + def process(self, context): + # Fake filtering by family inside context plugin + filtered_instances = [] + for instance in pyblish.api.instances_by_plugin( + context, self.__class__ + ): + # Skip instances that already have audio filled + if instance.data.get("audio"): + self.log.info( + "Skipping Audio collecion. It is already collected" + ) + continue + filtered_instances.append(instance) + + # Skip if none of instances remained + if not filtered_instances: + return + + # Add audio to instance if exists. + instances_by_asset_name = collections.defaultdict(list) + for instance in filtered_instances: + asset_name = instance.data["asset"] + instances_by_asset_name[asset_name].append(instance) + + asset_names = set(instances_by_asset_name.keys()) + self.log.info(( + "Searching for audio subset '{subset}' in assets {assets}" + ).format( + subset=self.audio_subset_name, + assets=", ".join([ + '"{}"'.format(asset_name) + for asset_name in asset_names + ]) + )) + + # Query all required documents + project_name = context.data["projectName"] + anatomy = context.data["anatomy"] + repre_docs_by_asset_names = self.query_representations( + project_name, asset_names) + + for asset_name, instances in instances_by_asset_name.items(): + repre_docs = repre_docs_by_asset_names[asset_name] + if not repre_docs: + continue + + repre_doc = repre_docs[0] + repre_path = get_representation_path_with_anatomy( + repre_doc, anatomy + ) + for instance in instances: + instance.data["audio"] = [{ + "offset": 0, + "filename": repre_path + }] + self.log.info("Audio Data added to instance ...") + + def query_representations(self, project_name, asset_names): + """Query representations related to audio subsets for passed assets. + + Args: + project_name (str): Project in which we're looking for all + entities. + asset_names (Iterable[str]): Asset names where to look for audio + subsets and their representations. + + Returns: + collections.defaultdict[str, List[Dict[Str, Any]]]: Representations + related to audio subsets by asset name. + """ + + output = collections.defaultdict(list) + # Query asset documents + asset_docs = get_assets( + project_name, + asset_names=asset_names, + fields=["_id", "name"] + ) + + asset_id_by_name = {} + for asset_doc in asset_docs: + asset_id_by_name[asset_doc["name"]] = asset_doc["_id"] + asset_ids = set(asset_id_by_name.values()) + + # Query subsets with name define by 'audio_subset_name' attr + # - one or none subsets with the name should be available on an asset + subset_docs = get_subsets( + project_name, + subset_names=[self.audio_subset_name], + asset_ids=asset_ids, + fields=["_id", "parent"] + ) + subset_id_by_asset_id = {} + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subset_id_by_asset_id[asset_id] = subset_doc["_id"] + + subset_ids = set(subset_id_by_asset_id.values()) + if not subset_ids: + return output + + # Find all latest versions for the subsets + version_docs_by_subset_id = get_last_versions( + project_name, subset_ids=subset_ids, fields=["_id", "parent"] + ) + version_id_by_subset_id = { + subset_id: version_doc["_id"] + for subset_id, version_doc in version_docs_by_subset_id.items() + } + version_ids = set(version_id_by_subset_id.values()) + if not version_ids: + return output + + # Find representations under latest versions of audio subsets + repre_docs = get_representations( + project_name, version_ids=version_ids + ) + repre_docs_by_version_id = collections.defaultdict(list) + for repre_doc in repre_docs: + version_id = repre_doc["parent"] + repre_docs_by_version_id[version_id].append(repre_doc) + + if not repre_docs_by_version_id: + return output + + for asset_name in asset_names: + asset_id = asset_id_by_name.get(asset_name) + subset_id = subset_id_by_asset_id.get(asset_id) + version_id = version_id_by_subset_id.get(subset_id) + output[asset_name] = repre_docs_by_version_id[version_id] + return output diff --git a/openpype/plugins/publish/collect_cleanup_keys.py b/openpype/plugins/publish/collect_cleanup_keys.py new file mode 100644 index 0000000000..b9cd1a9fc9 --- /dev/null +++ b/openpype/plugins/publish/collect_cleanup_keys.py @@ -0,0 +1,21 @@ +""" +Requires: + None +Provides: + context + - cleanupFullPaths (list) + - cleanupEmptyDirs (list) +""" + +import pyblish.api + + +class CollectCleanupKeys(pyblish.api.ContextPlugin): + """Prepare keys for 'ExplicitCleanUp' plugin.""" + + label = "Collect Cleanup Keys" + order = pyblish.api.CollectorOrder - 0.5 + + def process(self, context): + context.data["cleanupFullPaths"] = [] + context.data["cleanupEmptyDirs"] = [] diff --git a/openpype/plugins/publish/collect_comment.py b/openpype/plugins/publish/collect_comment.py index 062142ace9..5be04731ac 100644 --- a/openpype/plugins/publish/collect_comment.py +++ b/openpype/plugins/publish/collect_comment.py @@ -1,19 +1,125 @@ -""" -Requires: - None -Provides: - context -> comment (str) +"""Collect comment and add option to enter comment per instance. + +Combination of plugins. One define optional input for instances in Publisher +UI (CollectInstanceCommentDef) and second cares that each instance during +collection has available "comment" key in data (CollectComment). + +Plugin 'CollectInstanceCommentDef' define "comment" attribute which won't be +filled with any value if instance does not match families filter or when +plugin is disabled. + +Plugin 'CollectComment' makes sure that each instance in context has +available "comment" key in data which can be set to 'str' or 'None' if is not +set. +- In case instance already has filled comment the plugin's logic is skipped +- The comment is always set and value should be always 'str' even if is empty + +Why are separated: +- 'CollectInstanceCommentDef' can have specific settings to show comment + attribute only to defined families in publisher UI +- 'CollectComment' will run all the time + +Todos: + The comment per instance is not sent via farm. """ import pyblish.api +from openpype.lib.attribute_definitions import TextDef +from openpype.pipeline.publish import OpenPypePyblishPluginMixin -class CollectComment(pyblish.api.ContextPlugin): - """This plug-ins displays the comment dialog box per default""" +class CollectInstanceCommentDef( + pyblish.api.ContextPlugin, + OpenPypePyblishPluginMixin +): + label = "Comment per instance" + targets = ["local"] + # Disable plugin by default + families = [] + enabled = False - label = "Collect Comment" - order = pyblish.api.CollectorOrder + def process(self, instance): + pass + + @classmethod + def apply_settings(cls, project_setting, _): + plugin_settings = project_setting["global"]["publish"].get( + "collect_comment_per_instance" + ) + if not plugin_settings: + return + + if plugin_settings.get("enabled") is not None: + cls.enabled = plugin_settings["enabled"] + + if plugin_settings.get("families") is not None: + cls.families = plugin_settings["families"] + + @classmethod + def get_attribute_defs(cls): + return [ + TextDef("comment", label="Comment") + ] + + +class CollectComment( + pyblish.api.ContextPlugin, + OpenPypePyblishPluginMixin +): + """Collect comment per each instance. + + Plugin makes sure each instance to publish has set "comment" in data so any + further plugin can use it directly. + """ + + label = "Collect Instance Comment" + # TODO change to CollectorOrder after Pyblish is purged + # Pyblish allows modifying comment after collect phase + order = pyblish.api.ExtractorOrder - 0.49 def process(self, context): - comment = (context.data.get("comment") or "").strip() - context.data["comment"] = comment + context_comment = self.cleanup_comment(context.data.get("comment")) + # Set it back + context.data["comment"] = context_comment + for instance in context: + instance_label = str(instance) + # Check if comment is already set + instance_comment = self.cleanup_comment( + instance.data.get("comment")) + + # If comment on instance is not set then look for attributes + if not instance_comment: + attr_values = self.get_attr_values_from_data_for_plugin( + CollectInstanceCommentDef, instance.data + ) + instance_comment = self.cleanup_comment( + attr_values.get("comment") + ) + + # Use context comment if instance has all options of comment + # empty + if not instance_comment: + instance_comment = context_comment + + instance.data["comment"] = instance_comment + if instance_comment: + msg_end = " has comment set to: \"{}\"".format( + instance_comment) + else: + msg_end = " does not have set comment" + self.log.debug("Instance {} {}".format(instance_label, msg_end)) + + def cleanup_comment(self, comment): + """Cleanup comment value. + + Args: + comment (Union[str, None]): Comment value from data. + + Returns: + str: Cleaned comment which is stripped or empty string if input + was 'None'. + """ + + if comment: + return comment.strip() + return "" diff --git a/openpype/plugins/publish/collect_avalon_entities.py b/openpype/plugins/publish/collect_context_entities.py similarity index 72% rename from openpype/plugins/publish/collect_avalon_entities.py rename to openpype/plugins/publish/collect_context_entities.py index c099a2cf75..31fbeb5dbd 100644 --- a/openpype/plugins/publish/collect_avalon_entities.py +++ b/openpype/plugins/publish/collect_context_entities.py @@ -1,36 +1,39 @@ """Collect Anatomy and global anatomy data. Requires: - session -> AVALON_PROJECT, AVALON_ASSET + session -> AVALON_ASSET + context -> projectName + context -> asset + context -> task Provides: - context -> projectEntity - project entity from database - context -> assetEntity - asset entity from database + context -> projectEntity - Project document from database. + context -> assetEntity - Asset document from database only if 'asset' is + set in context. """ -from avalon import io, api import pyblish.api +from openpype.client import get_project, get_asset_by_name +from openpype.pipeline import KnownPublishError -class CollectAvalonEntities(pyblish.api.ContextPlugin): - """Collect Anatomy into Context""" + +class CollectContextEntities(pyblish.api.ContextPlugin): + """Collect entities into Context.""" order = pyblish.api.CollectorOrder - 0.1 - label = "Collect Avalon Entities" + label = "Collect Context Entities" def process(self, context): - io.install() - project_name = api.Session["AVALON_PROJECT"] - asset_name = api.Session["AVALON_ASSET"] - task_name = api.Session["AVALON_TASK"] + project_name = context.data["projectName"] + asset_name = context.data["asset"] + task_name = context.data["task"] - project_entity = io.find_one({ - "type": "project", - "name": project_name - }) - assert project_entity, ( - "Project '{0}' was not found." - ).format(project_name) + project_entity = get_project(project_name) + if not project_entity: + raise KnownPublishError( + "Project '{0}' was not found.".format(project_name) + ) self.log.debug("Collected Project \"{}\"".format(project_entity)) context.data["projectEntity"] = project_entity @@ -38,11 +41,8 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): if not asset_name: self.log.info("Context is not set. Can't collect global data.") return - asset_entity = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) + + asset_entity = get_asset_by_name(project_name, asset_name) assert asset_entity, ( "No asset found by the name '{0}' in project '{1}'" ).format(asset_name, project_name) diff --git a/openpype/plugins/publish/collect_context_label.py b/openpype/plugins/publish/collect_context_label.py index 8cf71882aa..6cdeba8418 100644 --- a/openpype/plugins/publish/collect_context_label.py +++ b/openpype/plugins/publish/collect_context_label.py @@ -1,5 +1,6 @@ """ -Requires: +Optional: + context -> hostName (str) context -> currentFile (str) Provides: context -> label (str) @@ -16,16 +17,27 @@ class CollectContextLabel(pyblish.api.ContextPlugin): label = "Context Label" def process(self, context): + # Add ability to use custom context label + label = context.data.get("label") + if label: + self.log.debug("Context label is already set to \"{}\"".format( + label + )) + return - # Get last registered host - host = pyblish.api.registered_hosts()[-1] + host_name = context.data.get("hostName") + if not host_name: + host_name = pyblish.api.registered_hosts()[-1] + # Use host name as base for label + label = host_name.title() - # Get scene name from "currentFile" - path = context.data.get("currentFile") or "" - base = os.path.basename(path) + # Get scene name from "currentFile" and use basename as ending of label + path = context.data.get("currentFile") + if path: + label += " - {}".format(os.path.basename(path)) # Set label - label = "{host} - {scene}".format(host=host.title(), scene=base) - if host == "standalonepublisher": - label = host.title() context.data["label"] = label + self.log.debug("Context label is changed to \"{}\"".format( + label + )) diff --git a/openpype/plugins/publish/collect_current_context.py b/openpype/plugins/publish/collect_current_context.py new file mode 100644 index 0000000000..7e42700d7d --- /dev/null +++ b/openpype/plugins/publish/collect_current_context.py @@ -0,0 +1,47 @@ +""" +Provides: + context -> projectName (str) + context -> asset (str) + context -> task (str) +""" + +import pyblish.api +from openpype.pipeline import legacy_io + + +class CollectCurrentContext(pyblish.api.ContextPlugin): + """Collect project context into publish context data. + + Plugin does not override any value if is already set. + """ + + order = pyblish.api.CollectorOrder - 0.5 + label = "Collect Current context" + + def process(self, context): + # Make sure 'legacy_io' is intalled + legacy_io.install() + + # Check if values are already set + project_name = context.data.get("projectName") + asset_name = context.data.get("asset") + task_name = context.data.get("task") + if not project_name: + project_name = legacy_io.current_project() + context.data["projectName"] = project_name + + if not asset_name: + asset_name = legacy_io.Session.get("AVALON_ASSET") + context.data["asset"] = asset_name + + if not task_name: + task_name = legacy_io.Session.get("AVALON_TASK") + context.data["task"] = task_name + + # QUESTION should we be explicit with keys? (the same on instances) + # - 'asset' -> 'assetName' + # - 'task' -> 'taskName' + + self.log.info(( + "Collected project context\nProject: {}\nAsset: {}\nTask: {}" + ).format(project_name, asset_name, task_name)) diff --git a/openpype/plugins/publish/collect_current_pype_user.py b/openpype/plugins/publish/collect_current_pype_user.py index 1a52a59012..2d507ba292 100644 --- a/openpype/plugins/publish/collect_current_pype_user.py +++ b/openpype/plugins/publish/collect_current_pype_user.py @@ -1,5 +1,3 @@ -import os -import getpass import pyblish.api from openpype.lib import get_openpype_username diff --git a/openpype/plugins/publish/collect_datetime_data.py b/openpype/plugins/publish/collect_datetime_data.py index 1675ae1a98..b3178ca3d2 100644 --- a/openpype/plugins/publish/collect_datetime_data.py +++ b/openpype/plugins/publish/collect_datetime_data.py @@ -5,14 +5,14 @@ Provides: """ import pyblish.api -from openpype.api import config +from openpype.lib.dateutils import get_datetime_data class CollectDateTimeData(pyblish.api.ContextPlugin): - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 label = "Collect DateTime data" def process(self, context): key = "datetimeData" if key not in context.data: - context.data[key] = config.get_datetime_data() + context.data[key] = get_datetime_data() diff --git a/openpype/plugins/publish/collect_frames_fix.py b/openpype/plugins/publish/collect_frames_fix.py new file mode 100644 index 0000000000..bdd49585a5 --- /dev/null +++ b/openpype/plugins/publish/collect_frames_fix.py @@ -0,0 +1,80 @@ +import pyblish.api +from openpype.lib.attribute_definitions import ( + TextDef, + BoolDef +) + +from openpype.pipeline.publish import OpenPypePyblishPluginMixin +from openpype.client.entities import ( + get_last_version_by_subset_name, + get_representations +) + + +class CollectFramesFixDef( + pyblish.api.InstancePlugin, + OpenPypePyblishPluginMixin +): + """Provides text field to insert frame(s) to be rerendered. + + Published files of last version of an instance subset are collected into + instance.data["last_version_published_files"]. All these but frames + mentioned in text field will be reused for new version. + """ + order = pyblish.api.CollectorOrder + 0.495 + label = "Collect Frames to Fix" + targets = ["local"] + hosts = ["nuke"] + families = ["render", "prerender"] + enabled = True + + def process(self, instance): + attribute_values = self.get_attr_values_from_data(instance.data) + frames_to_fix = attribute_values.get("frames_to_fix") + rewrite_version = attribute_values.get("rewrite_version") + + if frames_to_fix: + instance.data["frames_to_fix"] = frames_to_fix + + subset_name = instance.data["subset"] + asset_name = instance.data["asset"] + + project_entity = instance.data["projectEntity"] + project_name = project_entity["name"] + + version = get_last_version_by_subset_name(project_name, + subset_name, + asset_name=asset_name) + if not version: + self.log.warning("No last version found, " + "re-render not possible") + return + + representations = get_representations(project_name, + version_ids=[version["_id"]]) + published_files = [] + for repre in representations: + if repre["context"]["family"] not in self.families: + continue + + for file_info in repre.get("files"): + published_files.append(file_info["path"]) + + instance.data["last_version_published_files"] = published_files + self.log.debug("last_version_published_files::{}".format( + instance.data["last_version_published_files"])) + + if rewrite_version: + instance.data["version"] = version["name"] + # limits triggering version validator + instance.data.pop("latestVersion") + + @classmethod + def get_attribute_defs(cls): + return [ + TextDef("frames_to_fix", label="Frames to fix", + placeholder="5,10-15", + regex="[0-9,-]+"), + BoolDef("rewrite_version", label="Rewrite latest version", + default=False), + ] diff --git a/openpype/plugins/publish/collect_from_create_context.py b/openpype/plugins/publish/collect_from_create_context.py index 16e3f669c3..5fcf8feb56 100644 --- a/openpype/plugins/publish/collect_from_create_context.py +++ b/openpype/plugins/publish/collect_from_create_context.py @@ -3,7 +3,10 @@ """ import os import pyblish.api -import avalon.api + +from openpype.host import IPublishHost +from openpype.pipeline import legacy_io, registered_host +from openpype.pipeline.create import CreateContext class CollectFromCreateContext(pyblish.api.ContextPlugin): @@ -14,26 +17,59 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): def process(self, context): create_context = context.data.pop("create_context", None) - # Skip if create context is not available + if not create_context: + host = registered_host() + if isinstance(host, IPublishHost): + create_context = CreateContext(host) + if not create_context: return + thumbnail_paths_by_instance_id = ( + create_context.thumbnail_paths_by_instance_id + ) + context.data["thumbnailSource"] = ( + thumbnail_paths_by_instance_id.get(None) + ) + + project_name = create_context.get_current_project_name() + if project_name: + context.data["projectName"] = project_name + for created_instance in create_context.instances: instance_data = created_instance.data_to_store() if instance_data["active"]: - self.create_instance(context, instance_data) + thumbnail_path = thumbnail_paths_by_instance_id.get( + created_instance.id + ) + self.create_instance( + context, + instance_data, + created_instance.transient_data, + thumbnail_path + ) # Update global data to context context.data.update(create_context.context_data_to_store()) - + context.data["newPublishing"] = True # Update context data - for key in ("AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK"): - value = create_context.dbcon.Session.get(key) - if value is not None: - avalon.api.Session[key] = value - os.environ[key] = value + asset_name = create_context.get_current_asset_name() + task_name = create_context.get_current_task_name() + for key, value in ( + ("AVALON_PROJECT", project_name), + ("AVALON_ASSET", asset_name), + ("AVALON_TASK", task_name) + ): + legacy_io.Session[key] = value + os.environ[key] = value - def create_instance(self, context, in_data): + def create_instance( + self, + context, + in_data, + transient_data, + thumbnail_path + ): subset = in_data["subset"] # If instance data already contain families then use it instance_families = in_data.get("families") or [] @@ -43,15 +79,18 @@ class CollectFromCreateContext(pyblish.api.ContextPlugin): "subset": subset, "asset": in_data["asset"], "task": in_data["task"], - "label": subset, + "label": in_data.get("label") or subset, "name": subset, "family": in_data["family"], - "families": instance_families + "families": instance_families, + "representations": [], + "thumbnailSource": thumbnail_path }) for key, value in in_data.items(): if key not in instance.data: instance.data[key] = value + + instance.data["transientData"] = transient_data + self.log.info("collected instance: {}".format(instance.data)) self.log.info("parsing data: {}".format(in_data)) - - instance.data["representations"] = list() diff --git a/openpype/plugins/publish/collect_hierarchy.py b/openpype/plugins/publish/collect_hierarchy.py index efb40407d9..687397be8a 100644 --- a/openpype/plugins/publish/collect_hierarchy.py +++ b/openpype/plugins/publish/collect_hierarchy.py @@ -1,5 +1,4 @@ import pyblish.api -import avalon.api as avalon class CollectHierarchy(pyblish.api.ContextPlugin): @@ -19,7 +18,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): def process(self, context): temp_context = {} - project_name = avalon.Session["AVALON_PROJECT"] + project_name = context.data["projectName"] final_context = {} final_context[project_name] = {} final_context[project_name]['entity_type'] = 'Project' @@ -29,14 +28,11 @@ class CollectHierarchy(pyblish.api.ContextPlugin): # shot data dict shot_data = {} - family = instance.data.get("family") - - # filter out all unepropriate instances - if not instance.data["publish"]: - continue + family = instance.data["family"] + families = instance.data["families"] # exclude other families then self.families with intersection - if not set(self.families).intersection([family]): + if not set(self.families).intersection(set(families + [family])): continue # exclude if not masterLayer True @@ -60,7 +56,7 @@ class CollectHierarchy(pyblish.api.ContextPlugin): "frameEnd": instance.data["frameEnd"], "clipIn": instance.data["clipIn"], "clipOut": instance.data["clipOut"], - 'fps': instance.context.data["fps"], + "fps": instance.data["fps"], "resolutionWidth": instance.data["resolutionWidth"], "resolutionHeight": instance.data["resolutionHeight"], "pixelAspect": instance.data["pixelAspect"] diff --git a/openpype/plugins/publish/collect_host_name.py b/openpype/plugins/publish/collect_host_name.py index b731e3ed26..d64af4d049 100644 --- a/openpype/plugins/publish/collect_host_name.py +++ b/openpype/plugins/publish/collect_host_name.py @@ -18,20 +18,30 @@ class CollectHostName(pyblish.api.ContextPlugin): def process(self, context): host_name = context.data.get("hostName") + app_name = context.data.get("appName") + app_label = context.data.get("appLabel") # Don't override value if is already set - if host_name: + if host_name and app_name and app_label: return - # Use AVALON_APP as first if available it is the same as host name - # - only if is not defined use AVALON_APP_NAME (e.g. on Farm) and - # set it back to AVALON_APP env variable - host_name = os.environ.get("AVALON_APP") + # Use AVALON_APP to get host name if available if not host_name: + host_name = os.environ.get("AVALON_APP") + + # Use AVALON_APP_NAME to get full app name + if not app_name: app_name = os.environ.get("AVALON_APP_NAME") - if app_name: - app_manager = ApplicationManager() - app = app_manager.applications.get(app_name) - if app: + + # Fill missing values based on app full name + if (not host_name or not app_label) and app_name: + app_manager = ApplicationManager() + app = app_manager.applications.get(app_name) + if app: + if not host_name: host_name = app.host_name + if not app_label: + app_label = app.full_label context.data["hostName"] = host_name + context.data["appName"] = app_name + context.data["appLabel"] = app_label diff --git a/openpype/plugins/publish/collect_input_representations_to_versions.py b/openpype/plugins/publish/collect_input_representations_to_versions.py new file mode 100644 index 0000000000..18a19bce80 --- /dev/null +++ b/openpype/plugins/publish/collect_input_representations_to_versions.py @@ -0,0 +1,47 @@ +import pyblish.api + +from bson.objectid import ObjectId + +from openpype.client import get_representations + + +class CollectInputRepresentationsToVersions(pyblish.api.ContextPlugin): + """Converts collected input representations to input versions. + + Any data in `instance.data["inputRepresentations"]` gets converted into + `instance.data["inputVersions"]` as supported in OpenPype v3. + + """ + # This is a ContextPlugin because then we can query the database only once + # for the conversion of representation ids to version ids (optimization) + label = "Input Representations to Versions" + order = pyblish.api.CollectorOrder + 0.499 + hosts = ["*"] + + def process(self, context): + # Query all version ids for representation ids from the database once + representations = set() + for instance in context: + inst_repre = instance.data.get("inputRepresentations", []) + representations.update(inst_repre) + + representations_docs = get_representations( + project_name=context.data["projectEntity"]["name"], + representation_ids=representations, + fields=["_id", "parent"]) + + representation_id_to_version_id = { + repre["_id"]: repre["parent"] for repre in representations_docs + } + + for instance in context: + inst_repre = instance.data.get("inputRepresentations", []) + if not inst_repre: + continue + + input_versions = instance.data.get("inputVersions", []) + for repre_id in inst_repre: + repre_id = ObjectId(repre_id) + version_id = representation_id_to_version_id[repre_id] + input_versions.append(version_id) + instance.data["inputVersions"] = input_versions diff --git a/openpype/plugins/publish/collect_machine_name.py b/openpype/plugins/publish/collect_machine_name.py index 72ef68f8ed..8c25966031 100644 --- a/openpype/plugins/publish/collect_machine_name.py +++ b/openpype/plugins/publish/collect_machine_name.py @@ -11,7 +11,7 @@ import pyblish.api class CollectMachineName(pyblish.api.ContextPlugin): label = "Local Machine Name" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.5 hosts = ["*"] def process(self, context): diff --git a/openpype/plugins/publish/collect_modules.py b/openpype/plugins/publish/collect_modules.py index 2f6cb1ef0e..d76096bcd9 100644 --- a/openpype/plugins/publish/collect_modules.py +++ b/openpype/plugins/publish/collect_modules.py @@ -7,7 +7,7 @@ import pyblish.api class CollectModules(pyblish.api.ContextPlugin): """Collect OpenPype modules.""" - order = pyblish.api.CollectorOrder - 0.45 + order = pyblish.api.CollectorOrder - 0.5 label = "OpenPype Modules" def process(self, context): diff --git a/openpype/plugins/publish/collect_otio_frame_ranges.py b/openpype/plugins/publish/collect_otio_frame_ranges.py index ee7b7957ad..9a68b6e43d 100644 --- a/openpype/plugins/publish/collect_otio_frame_ranges.py +++ b/openpype/plugins/publish/collect_otio_frame_ranges.py @@ -8,8 +8,12 @@ Requires: # import os import opentimelineio as otio import pyblish.api -import openpype.lib from pprint import pformat +from openpype.pipeline.editorial import ( + get_media_range_with_retimes, + otio_range_to_frame_range, + otio_range_with_handles +) class CollectOtioFrameRanges(pyblish.api.InstancePlugin): @@ -20,20 +24,21 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): label = "Collect OTIO Frame Ranges" order = pyblish.api.CollectorOrder - 0.08 families = ["shot", "clip"] - hosts = ["resolve", "hiero", "flame"] + hosts = ["resolve", "hiero", "flame", "traypublisher"] def process(self, instance): # get basic variables otio_clip = instance.data["otioClip"] workfile_start = instance.data["workfileFrameStart"] + workfile_source_duration = instance.data.get("shotDurationFromSource") # get ranges otio_tl_range = otio_clip.range_in_parent() otio_src_range = otio_clip.source_range otio_avalable_range = otio_clip.available_range() - otio_tl_range_handles = openpype.lib.otio_range_with_handles( + otio_tl_range_handles = otio_range_with_handles( otio_tl_range, instance) - otio_src_range_handles = openpype.lib.otio_range_with_handles( + otio_src_range_handles = otio_range_with_handles( otio_src_range, instance) # get source avalable start frame @@ -42,7 +47,7 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): otio_avalable_range.start_time.rate) # convert to frames - range_convert = openpype.lib.otio_range_to_frame_range + range_convert = otio_range_to_frame_range tl_start, tl_end = range_convert(otio_tl_range) tl_start_h, tl_end_h = range_convert(otio_tl_range_handles) src_start, src_end = range_convert(otio_src_range) @@ -51,17 +56,29 @@ class CollectOtioFrameRanges(pyblish.api.InstancePlugin): frame_end = frame_start + otio.opentime.to_frames( otio_tl_range.duration, otio_tl_range.duration.rate) - 1 + # in case of retimed clip and frame range should not be retimed + if workfile_source_duration: + # get available range trimmed with processed retimes + retimed_attributes = get_media_range_with_retimes( + otio_clip, 0, 0) + self.log.debug( + ">> retimed_attributes: {}".format(retimed_attributes)) + media_in = int(retimed_attributes["mediaIn"]) + media_out = int(retimed_attributes["mediaOut"]) + frame_end = frame_start + (media_out - media_in) + 1 + self.log.debug(frame_end) + data = { "frameStart": frame_start, "frameEnd": frame_end, "clipIn": tl_start, - "clipOut": tl_end, + "clipOut": tl_end - 1, "clipInH": tl_start_h, - "clipOutH": tl_end_h, + "clipOutH": tl_end_h - 1, "sourceStart": src_starting_from + src_start, - "sourceEnd": src_starting_from + src_end, + "sourceEnd": src_starting_from + src_end - 1, "sourceStartH": src_starting_from + src_start_h, - "sourceEndH": src_starting_from + src_end_h, + "sourceEndH": src_starting_from + src_end_h - 1, } instance.data.update(data) self.log.debug( diff --git a/openpype/plugins/publish/collect_otio_subset_resources.py b/openpype/plugins/publish/collect_otio_subset_resources.py index 7c11462ef0..e72c12d9a9 100644 --- a/openpype/plugins/publish/collect_otio_subset_resources.py +++ b/openpype/plugins/publish/collect_otio_subset_resources.py @@ -1,4 +1,3 @@ -# TODO: this head doc string """ Requires: instance -> otio_clip @@ -10,8 +9,11 @@ import os import clique import opentimelineio as otio import pyblish.api -import openpype -from openpype.lib import editorial +from openpype.pipeline.editorial import ( + get_media_range_with_retimes, + range_from_frames, + make_sequence_collection +) class CollectOtioSubsetResources(pyblish.api.InstancePlugin): @@ -43,7 +45,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): available_duration = otio_avalable_range.duration.value # get available range trimmed with processed retimes - retimed_attributes = editorial.get_media_range_with_retimes( + retimed_attributes = get_media_range_with_retimes( otio_clip, handle_start, handle_end) self.log.debug( ">> retimed_attributes: {}".format(retimed_attributes)) @@ -65,8 +67,8 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): a_frame_end_h = media_out + handle_end # create trimmed otio time range - trimmed_media_range_h = editorial.range_from_frames( - a_frame_start_h, (a_frame_end_h - a_frame_start_h + 1), + trimmed_media_range_h = range_from_frames( + a_frame_start_h, (a_frame_end_h - a_frame_start_h) + 1, media_fps ) trimmed_duration = trimmed_media_range_h.duration.value @@ -113,13 +115,13 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): # check in two way if it is sequence if hasattr(otio.schema, "ImageSequenceReference"): # for OpenTimelineIO 0.13 and newer - if isinstance(media_ref, - otio.schema.ImageSequenceReference): - is_sequence = True - else: - # for OpenTimelineIO 0.12 and older - if metadata.get("padding"): + if isinstance( + media_ref, + otio.schema.ImageSequenceReference + ): is_sequence = True + elif metadata.get("padding"): + is_sequence = True self.log.info( "frame_start-frame_end: {}-{}".format(frame_start, frame_end)) @@ -136,22 +138,22 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): padding=media_ref.frame_zero_padding ) collection.indexes.update( - [i for i in range(a_frame_start_h, (a_frame_end_h + 1))]) + list(range(a_frame_start_h, (a_frame_end_h + 1))) + ) - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) else: # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` path = media_ref.target_url - collection_data = openpype.lib.make_sequence_collection( + collection_data = make_sequence_collection( path, trimmed_media_range_h, metadata) self.staging_dir, collection = collection_data - self.log.debug(collection) - repre = self._create_representation( - frame_start, frame_end, collection=collection) + self.log.debug(collection) + repre = self._create_representation( + frame_start, frame_end, collection=collection) + + instance.data["originalBasename"] = collection.format("{head}") else: _trim = False dirname, filename = os.path.split(media_ref.target_url) @@ -166,6 +168,10 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): repre = self._create_representation( frame_start, frame_end, file=filename, trim=_trim) + instance.data["originalBasename"] = os.path.splitext(filename)[0] + + instance.data["originalDirname"] = self.staging_dir + if repre: # add representation to instance data instance.data["representations"].append(repre) @@ -195,7 +201,7 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): if kwargs.get("collection"): collection = kwargs.get("collection") - files = [f for f in collection] + files = list(collection) ext = collection.format("{tail}") representation_data.update({ "name": ext[1:], @@ -217,7 +223,5 @@ class CollectOtioSubsetResources(pyblish.api.InstancePlugin): }) if kwargs.get("trim") is True: - representation_data.update({ - "tags": ["trim"] - }) + representation_data["tags"] = ["trim"] return representation_data diff --git a/openpype/plugins/publish/collect_rendered_files.py b/openpype/plugins/publish/collect_rendered_files.py index 1005c38b9d..8f8d0a5eeb 100644 --- a/openpype/plugins/publish/collect_rendered_files.py +++ b/openpype/plugins/publish/collect_rendered_files.py @@ -1,7 +1,7 @@ """Loads publishing context from json and continues in publish process. Requires: - anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) + anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.4) Provides: context, instances -> All data from previous publishing process. @@ -11,7 +11,8 @@ import os import json import pyblish.api -from avalon import api + +from openpype.pipeline import legacy_io, KnownPublishError class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -19,7 +20,12 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): This collector will try to find json files in provided `OPENPYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + Note: + We should split this collector and move the part which handle reading + of file and it's context from session data before collect anatomy + and instance creation dependent on anatomy can be done here. """ + order = pyblish.api.CollectorOrder - 0.2 # Keep "filesequence" for backwards compatibility of older jobs targets = ["filesequence", "farm"] @@ -117,23 +123,20 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): def process(self, context): self._context = context - assert os.environ.get("OPENPYPE_PUBLISH_DATA"), ( - "Missing `OPENPYPE_PUBLISH_DATA`") + if not os.environ.get("OPENPYPE_PUBLISH_DATA"): + raise KnownPublishError("Missing `OPENPYPE_PUBLISH_DATA`") + + # QUESTION + # Do we support (or want support) multiple files in the variable? + # - what if they have different context? paths = os.environ["OPENPYPE_PUBLISH_DATA"].split(os.pathsep) - project_name = os.environ.get("AVALON_PROJECT") - if project_name is None: - raise AssertionError( - "Environment `AVALON_PROJECT` was not found." - "Could not set project `root` which may cause issues." - ) - - # TODO root filling should happen after collect Anatomy + # Using already collected Anatomy + anatomy = context.data["anatomy"] self.log.info("Getting root setting for project \"{}\"".format( - project_name + anatomy.project_name )) - anatomy = context.data["anatomy"] self.log.info("anatomy: {}".format(anatomy.roots)) try: session_is_set = False @@ -150,7 +153,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): session_data["AVALON_WORKDIR"] = remapped self.log.info("Setting session using data from file") - api.Session.update(session_data) + legacy_io.Session.update(session_data) os.environ.update(session_data) session_is_set = True self._process_path(data, anatomy) diff --git a/openpype/plugins/publish/collect_resources_path.py b/openpype/plugins/publish/collect_resources_path.py index fa181301ee..4a5f9f1cc2 100644 --- a/openpype/plugins/publish/collect_resources_path.py +++ b/openpype/plugins/publish/collect_resources_path.py @@ -12,16 +12,20 @@ import os import copy import pyblish.api -from avalon import api class CollectResourcesPath(pyblish.api.InstancePlugin): - """Generate directory path where the files and resources will be stored""" + """Generate directory path where the files and resources will be stored. + + Collects folder name and file name from files, if exists, for in-situ + publishing. + """ label = "Collect Resources Path" order = pyblish.api.CollectorOrder + 0.495 families = ["workfile", "pointcache", + "proxyAbc", "camera", "animation", "model", @@ -40,6 +44,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "rig", "plate", "look", + "mvLook", "yetiRig", "yeticache", "nukenodes", @@ -50,10 +55,14 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): "source", "assembly", "fbx", + "gltf", "textures", "action", "background", - "effect" + "effect", + "staticMesh", + "skeletalMesh", + "xgen" ] def process(self, instance): @@ -81,11 +90,10 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): else: # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) file_path = anatomy_filled["publish"]["path"] # Directory diff --git a/openpype/plugins/publish/collect_scene_loaded_versions.py b/openpype/plugins/publish/collect_scene_loaded_versions.py index 6746757e5f..627d451f58 100644 --- a/openpype/plugins/publish/collect_scene_loaded_versions.py +++ b/openpype/plugins/publish/collect_scene_loaded_versions.py @@ -1,7 +1,7 @@ -from bson.objectid import ObjectId - import pyblish.api -from avalon import api, io + +from openpype.client import get_representations +from openpype.pipeline import registered_host class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): @@ -24,7 +24,7 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): ] def process(self, context): - host = api.registered_host() + host = registered_host() if host is None: self.log.warn("No registered host.") return @@ -35,20 +35,41 @@ class CollectSceneLoadedVersions(pyblish.api.ContextPlugin): return loaded_versions = [] - _containers = list(host.ls()) - _repr_ids = [ObjectId(c["representation"]) for c in _containers] - version_by_repr = { - str(doc["_id"]): doc["parent"] for doc in - io.find({"_id": {"$in": _repr_ids}}, projection={"parent": 1}) + containers = list(host.ls()) + repre_ids = { + container["representation"] + for container in containers } - for con in _containers: + project_name = context.data["projectName"] + repre_docs = get_representations( + project_name, + representation_ids=repre_ids, + fields=["_id", "parent"] + ) + repre_doc_by_str_id = { + str(doc["_id"]): doc + for doc in repre_docs + } + + # QUESTION should we add same representation id when loaded multiple + # times? + for con in containers: + repre_id = con["representation"] + repre_doc = repre_doc_by_str_id.get(repre_id) + if repre_doc is None: + self.log.warning(( + "Skipping container," + " did not find representation document. {}" + ).format(str(con))) + continue + # NOTE: # may have more then one representation that are same version version = { "subsetName": con["name"], - "representation": ObjectId(con["representation"]), - "version": version_by_repr[con["representation"]], # _id + "representation": repre_doc["_id"], + "version": repre_doc["parent"], } loaded_versions.append(version) diff --git a/openpype/plugins/publish/collect_scene_version.py b/openpype/plugins/publish/collect_scene_version.py index 917647c61a..fdbcb3cb9d 100644 --- a/openpype/plugins/publish/collect_scene_version.py +++ b/openpype/plugins/publish/collect_scene_version.py @@ -1,6 +1,8 @@ import os import pyblish.api -import openpype.api as pype + +from openpype.lib import get_version_from_path +from openpype.tests.lib import is_in_tests class CollectSceneVersion(pyblish.api.ContextPlugin): @@ -35,7 +37,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): # tests should be close to regular publish as possible if ( os.environ.get("HEADLESS_PUBLISH") - and not os.environ.get("IS_TEST") + and not is_in_tests() and context.data["hostName"] in self.skip_hosts_headless_publish): self.log.debug("Skipping for headless publishing") return @@ -46,7 +48,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): if '' in filename: return - version = pype.get_version_from_path(filename) + version = get_version_from_path(filename) assert version, "Cannot determine version" rootVersion = int(version) diff --git a/openpype/plugins/publish/collect_settings.py b/openpype/plugins/publish/collect_settings.py index d56eabd1b5..a418a6400c 100644 --- a/openpype/plugins/publish/collect_settings.py +++ b/openpype/plugins/publish/collect_settings.py @@ -1,5 +1,8 @@ from pyblish import api -from openpype.api import get_current_project_settings, get_system_settings +from openpype.settings import ( + get_current_project_settings, + get_system_settings, +) class CollectSettings(api.ContextPlugin): diff --git a/openpype/plugins/publish/collect_source_for_source.py b/openpype/plugins/publish/collect_source_for_source.py new file mode 100644 index 0000000000..aa94238b4f --- /dev/null +++ b/openpype/plugins/publish/collect_source_for_source.py @@ -0,0 +1,42 @@ +""" +Requires: + instance -> currentFile + instance -> source + +Provides: + instance -> originalBasename + instance -> originalDirname +""" + +import os + +import pyblish.api + + +class CollectSourceForSource(pyblish.api.InstancePlugin): + """Collects source location of file for instance. + + Used for 'source' template name which handles in place publishing. + For this kind of publishing files are present with correct file name + pattern and correct location. + """ + + label = "Collect Source" + order = pyblish.api.CollectorOrder + 0.495 + + def process(self, instance): + # parse folder name and file name for online and source templates + # currentFile comes from hosts workfiles + # source comes from Publisher + current_file = instance.data.get("currentFile") + source = instance.data.get("source") + source_file = current_file or source + if source_file: + self.log.debug("Parsing paths for {}".format(source_file)) + if not instance.data.get("originalBasename"): + instance.data["originalBasename"] = \ + os.path.basename(source_file) + + if not instance.data.get("originalDirname"): + instance.data["originalDirname"] = \ + os.path.dirname(source_file) diff --git a/openpype/plugins/publish/extract_burnin.py b/openpype/plugins/publish/extract_burnin.py index b2ca8850b6..f113e61bb0 100644 --- a/openpype/plugins/publish/extract_burnin.py +++ b/openpype/plugins/publish/extract_burnin.py @@ -1,5 +1,4 @@ import os -import re import json import copy import tempfile @@ -8,22 +7,23 @@ import shutil import clique import six -import pyblish +import pyblish.api -import openpype -import openpype.api +from openpype import resources, PACKAGE_DIR +from openpype.pipeline import publish from openpype.lib import ( run_openpype_process, get_transcode_temp_directory, - convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, should_convert_for_ffmpeg, CREATE_NO_WINDOW ) +from openpype.lib.profiles_filtering import filter_profiles -class ExtractBurnin(openpype.api.Extractor): +class ExtractBurnin(publish.Extractor): """ Extractor to create video with pre-defined burnins from existing extracted video representation. @@ -34,6 +34,7 @@ class ExtractBurnin(openpype.api.Extractor): label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 + families = ["review", "burnin"] hosts = [ "nuke", @@ -41,6 +42,7 @@ class ExtractBurnin(openpype.api.Extractor): "shell", "hiero", "premiere", + "traypublisher", "standalonepublisher", "harmony", "fusion", @@ -52,6 +54,7 @@ class ExtractBurnin(openpype.api.Extractor): "flame" # "resolve" ] + optional = True positions = [ @@ -68,11 +71,15 @@ class ExtractBurnin(openpype.api.Extractor): "y_offset": 5 } - # Preset attributes + # Configurable by Settings profiles = None options = None def process(self, instance): + if not self.profiles: + self.log.warning("No profiles present for create burnin") + return + # QUESTION what is this for and should we raise an exception? if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") @@ -136,18 +143,29 @@ class ExtractBurnin(openpype.api.Extractor): return filtered_repres def main_process(self, instance): - # TODO get these data from context host_name = instance.context.data["hostName"] - task_name = os.environ["AVALON_TASK"] - family = self.main_family_from_instance(instance) + family = instance.data["family"] + task_data = instance.data["anatomyData"].get("task", {}) + task_name = task_data.get("name") + task_type = task_data.get("type") + subset = instance.data["subset"] + + filtering_criteria = { + "hosts": host_name, + "families": family, + "task_names": task_name, + "task_types": task_type, + "subset": subset + } + profile = filter_profiles(self.profiles, filtering_criteria, + logger=self.log) - # Find profile most matching current host, task and instance family - profile = self.find_matching_profile(host_name, task_name, family) if not profile: self.log.info(( "Skipped instance. None of profiles in presets are for" - " Host: \"{}\" | Family: \"{}\" | Task \"{}\"" - ).format(host_name, family, task_name)) + " Host: \"{}\" | Families: \"{}\" | Task \"{}\"" + " | Task type \"{}\" | Subset \"{}\" " + ).format(host_name, family, task_name, task_type, subset)) return self.log.debug("profile: {}".format(profile)) @@ -157,7 +175,8 @@ class ExtractBurnin(openpype.api.Extractor): if not burnin_defs: self.log.info(( "Skipped instance. Burnin definitions are not set for profile" - " Host: \"{}\" | Family: \"{}\" | Task \"{}\" | Profile \"{}\"" + " Host: \"{}\" | Families: \"{}\" | Task \"{}\"" + " | Profile \"{}\"" ).format(host_name, family, task_name, profile)) return @@ -187,8 +206,13 @@ class ExtractBurnin(openpype.api.Extractor): repre_files = repre["files"] if isinstance(repre_files, (tuple, list)): filename = repre_files[0] + src_filepaths = [ + os.path.join(src_repre_staging_dir, filename) + for filename in repre_files + ] else: filename = repre_files + src_filepaths = [os.path.join(src_repre_staging_dir, filename)] first_input_path = os.path.join(src_repre_staging_dir, filename) # Determine if representation requires pre conversion for ffmpeg @@ -209,11 +233,9 @@ class ExtractBurnin(openpype.api.Extractor): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + src_filepaths, new_staging_dir, - _temp_data["frameStart"], - _temp_data["frameEnd"], self.log ) @@ -221,11 +243,17 @@ class ExtractBurnin(openpype.api.Extractor): filled_anatomy = anatomy.format_all(burnin_data) burnin_data["anatomy"] = filled_anatomy.get_solved() - # Add context data burnin_data. - burnin_data["custom"] = ( + custom_data = copy.deepcopy( + instance.data.get("customData") or {} + ) + # Backwards compatibility (since 2022/04/07) + custom_data.update( instance.data.get("custom_burnin_data") or {} ) + # Add context data burnin_data. + burnin_data["custom"] = custom_data + # Add source camera name to burnin data camera_name = repre.get("camera_name") if camera_name: @@ -390,7 +418,7 @@ class ExtractBurnin(openpype.api.Extractor): # Use OpenPype default font if not font_filepath: - font_filepath = openpype.api.resources.get_liberation_font_path() + font_filepath = resources.get_liberation_font_path() burnin_options["font"] = font_filepath @@ -458,7 +486,7 @@ class ExtractBurnin(openpype.api.Extractor): burnin_data.update({ "version": int(version), - "comment": context.data.get("comment") or "" + "comment": instance.data["comment"] }) intent_label = context.data.get("intent") or "" @@ -478,12 +506,6 @@ class ExtractBurnin(openpype.api.Extractor): "frame_end_handle": frame_end_handle } - # use explicit username for webpublishes as rewriting - # OPENPYPE_USERNAME might have side effects - webpublish_user_name = os.environ.get("WEBPUBLISH_OPENPYPE_USERNAME") - if webpublish_user_name: - burnin_data["username"] = webpublish_user_name - self.log.debug( "Basic burnin_data: {}".format(json.dumps(burnin_data, indent=4)) ) @@ -689,130 +711,6 @@ class ExtractBurnin(openpype.api.Extractor): ) }) - def find_matching_profile(self, host_name, task_name, family): - """ Filter profiles by Host name, Task name and main Family. - - Filtering keys are "hosts" (list), "tasks" (list), "families" (list). - If key is not find or is empty than it's expected to match. - - Args: - profiles (list): Profiles definition from presets. - host_name (str): Current running host name. - task_name (str): Current context task name. - family (str): Main family of current Instance. - - Returns: - dict/None: Return most matching profile or None if none of profiles - match at least one criteria. - """ - - matching_profiles = None - highest_points = -1 - for profile in self.profiles or tuple(): - profile_points = 0 - profile_value = [] - - # Host filtering - host_names = profile.get("hosts") - match = self.validate_value_by_regexes(host_name, host_names) - if match == -1: - continue - profile_points += match - profile_value.append(bool(match)) - - # Task filtering - task_names = profile.get("tasks") - match = self.validate_value_by_regexes(task_name, task_names) - if match == -1: - continue - profile_points += match - profile_value.append(bool(match)) - - # Family filtering - families = profile.get("families") - match = self.validate_value_by_regexes(family, families) - if match == -1: - continue - profile_points += match - profile_value.append(bool(match)) - - if profile_points > highest_points: - matching_profiles = [] - highest_points = profile_points - - if profile_points == highest_points: - profile["__value__"] = profile_value - matching_profiles.append(profile) - - if not matching_profiles: - return - - if len(matching_profiles) == 1: - return matching_profiles[0] - - return self.profile_exclusion(matching_profiles) - - def profile_exclusion(self, matching_profiles): - """Find out most matching profile by host, task and family match. - - Profiles are selectivelly filtered. Each profile should have - "__value__" key with list of booleans. Each boolean represents - existence of filter for specific key (host, taks, family). - Profiles are looped in sequence. In each sequence are split into - true_list and false_list. For next sequence loop are used profiles in - true_list if there are any profiles else false_list is used. - - Filtering ends when only one profile left in true_list. Or when all - existence booleans loops passed, in that case first profile from left - profiles is returned. - - Args: - matching_profiles (list): Profiles with same values. - - Returns: - dict: Most matching profile. - """ - self.log.info( - "Search for first most matching profile in match order:" - " Host name -> Task name -> Family." - ) - # Filter all profiles with highest points value. First filter profiles - # with matching host if there are any then filter profiles by task - # name if there are any and lastly filter by family. Else use first in - # list. - idx = 0 - final_profile = None - while True: - profiles_true = [] - profiles_false = [] - for profile in matching_profiles: - value = profile["__value__"] - # Just use first profile when idx is greater than values. - if not idx < len(value): - final_profile = profile - break - - if value[idx]: - profiles_true.append(profile) - else: - profiles_false.append(profile) - - if final_profile is not None: - break - - if profiles_true: - matching_profiles = profiles_true - else: - matching_profiles = profiles_false - - if len(matching_profiles) == 1: - final_profile = matching_profiles[0] - break - idx += 1 - - final_profile.pop("__value__") - return final_profile - def filter_burnins_defs(self, profile, instance): """Filter outputs by their values from settings. @@ -905,56 +803,6 @@ class ExtractBurnin(openpype.api.Extractor): return True return False - def compile_list_of_regexes(self, in_list): - """Convert strings in entered list to compiled regex objects.""" - regexes = [] - if not in_list: - return regexes - - for item in in_list: - if not item: - continue - - try: - regexes.append(re.compile(item)) - except TypeError: - self.log.warning(( - "Invalid type \"{}\" value \"{}\"." - " Expected string based object. Skipping." - ).format(str(type(item)), str(item))) - - return regexes - - def validate_value_by_regexes(self, value, in_list): - """Validate in any regexe from list match entered value. - - Args: - in_list (list): List with regexes. - value (str): String where regexes is checked. - - Returns: - int: Returns `0` when list is not set or is empty. Returns `1` when - any regex match value and returns `-1` when none of regexes - match value entered. - """ - if not in_list: - return 0 - - output = -1 - regexes = self.compile_list_of_regexes(in_list) - for regex in regexes: - if re.match(regex, value): - output = 1 - break - return output - - def main_family_from_instance(self, instance): - """Return main family of entered instance.""" - family = instance.data.get("family") - if not family: - family = instance.data["families"][0] - return family - def families_from_instance(self, instance): """Return all families of entered instance.""" families = [] @@ -971,7 +819,7 @@ class ExtractBurnin(openpype.api.Extractor): """Return path to python script for burnin processing.""" scriptpath = os.path.normpath( os.path.join( - openpype.PACKAGE_DIR, + PACKAGE_DIR, "scripts", "otio_burnin.py" ) diff --git a/openpype/plugins/publish/extract_colorspace_data.py b/openpype/plugins/publish/extract_colorspace_data.py new file mode 100644 index 0000000000..611fb91cbb --- /dev/null +++ b/openpype/plugins/publish/extract_colorspace_data.py @@ -0,0 +1,47 @@ +import pyblish.api +from openpype.pipeline import publish + + +class ExtractColorspaceData(publish.ExtractorColormanaged): + """ Inject Colorspace data to available representations. + + Input data: + - context.data[colorspace_config_path]: + for anatomy formatting of possible template tokens in config path + - context.data[colorspace_config_path]: + for resolving project and host related config.ocio + - context.data[colorspace_file_rules]: + for resolving matched file rule from representation file name + and adding it to representation + + Output data: + representation[colorspaceData] = { + "colorspace": "linear", + "config": { + "path": "/abs/path/to/config.ocio", + "template": "{project[root]}/path/to/config.ocio" + } + } + """ + label = "Extract Colorspace data" + order = pyblish.api.ExtractorOrder + 0.49 + + def process(self, instance): + representations = instance.data.get("representations") + if not representations: + self.log.info("No representations at instance : `{}`".format( + instance)) + return + + # get colorspace settings + context = instance.context + + # loop representations + for representation in representations: + # skip if colorspaceData is already at representation + if representation.get("colorspaceData"): + continue + + self.set_representation_colorspace( + representation, context + ) diff --git a/openpype/plugins/publish/extract_hierarchy_avalon.py b/openpype/plugins/publish/extract_hierarchy_avalon.py index b062a9c4b5..b2a6adc210 100644 --- a/openpype/plugins/publish/extract_hierarchy_avalon.py +++ b/openpype/plugins/publish/extract_hierarchy_avalon.py @@ -1,6 +1,12 @@ -import pyblish.api -from avalon import io +import collections from copy import deepcopy +import pyblish.api +from openpype.client import ( + get_assets, + get_archived_assets +) +from openpype.pipeline import legacy_io + class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): """Create entities in Avalon based on collected data.""" @@ -10,17 +16,305 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): families = ["clip", "shot"] def process(self, context): - # processing starts here if "hierarchyContext" not in context.data: self.log.info("skipping IntegrateHierarchyToAvalon") return - hierarchy_context = deepcopy(context.data["hierarchyContext"]) - if not io.Session: - io.install() + if not legacy_io.Session: + legacy_io.install() + + hierarchy_context = self._get_active_assets(context) + self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) + + project_name = context.data["projectName"] + asset_names = self.extract_asset_names(hierarchy_context) + + asset_docs_by_name = {} + for asset_doc in get_assets(project_name, asset_names=asset_names): + name = asset_doc["name"] + asset_docs_by_name[name] = asset_doc + + archived_asset_docs_by_name = collections.defaultdict(list) + for asset_doc in get_archived_assets( + project_name, asset_names=asset_names + ): + name = asset_doc["name"] + archived_asset_docs_by_name[name].append(asset_doc) + + project_doc = None + hierarchy_queue = collections.deque() + for name, data in hierarchy_context.items(): + hierarchy_queue.append((name, data, None)) + + while hierarchy_queue: + item = hierarchy_queue.popleft() + name, entity_data, parent = item + + entity_type = entity_data["entity_type"] + if entity_type.lower() == "project": + new_parent = project_doc = self.sync_project( + context, + entity_data + ) + + else: + new_parent = self.sync_asset( + name, + entity_data, + parent, + project_doc, + asset_docs_by_name, + archived_asset_docs_by_name + ) + # make sure all relative instances have correct avalon data + self._set_avalon_data_to_relative_instances( + context, + project_name, + new_parent + ) + + children = entity_data.get("childs") + if not children: + continue + + for child_name, child_data in children.items(): + hierarchy_queue.append((child_name, child_data, new_parent)) + + def extract_asset_names(self, hierarchy_context): + """Extract all possible asset names from hierarchy context. + + Args: + hierarchy_context (Dict[str, Any]): Nested hierarchy structure. + + Returns: + Set[str]: All asset names from the hierarchy structure. + """ + + hierarchy_queue = collections.deque() + for name, data in hierarchy_context.items(): + hierarchy_queue.append((name, data)) + + asset_names = set() + while hierarchy_queue: + item = hierarchy_queue.popleft() + name, data = item + if data["entity_type"].lower() != "project": + asset_names.add(name) + + children = data.get("childs") + if children: + for child_name, child_data in children.items(): + hierarchy_queue.append((child_name, child_data)) + return asset_names + + def sync_project(self, context, entity_data): + project_doc = context.data["projectEntity"] + + if "data" not in project_doc: + project_doc["data"] = {} + current_data = project_doc["data"] + + changes = {} + entity_type = entity_data["entity_type"] + if current_data.get("entityType") != entity_type: + changes["entityType"] = entity_type + + # Custom attributes. + attributes = entity_data.get("custom_attributes") or {} + for key, value in attributes.items(): + if key not in current_data or current_data[key] != value: + update_key = "data.{}".format(key) + changes[update_key] = value + current_data[key] = value + + if changes: + # Update entity data with input data + legacy_io.update_one( + {"_id": project_doc["_id"]}, + {"$set": changes} + ) + return project_doc + + def sync_asset( + self, + asset_name, + entity_data, + parent, + project, + asset_docs_by_name, + archived_asset_docs_by_name + ): + # Prepare data for new asset or for update comparison + data = { + "entityType": entity_data["entity_type"] + } + + # Custom attributes. + attributes = entity_data.get("custom_attributes") or {} + for key, value in attributes.items(): + data[key] = value + + data["inputs"] = entity_data.get("inputs") or [] + + # Parents and visual parent are empty if parent is project + parents = [] + parent_id = None + if project["_id"] != parent["_id"]: + parent_id = parent["_id"] + # Use parent's parents as source value + parents.extend(parent["data"]["parents"]) + # Add parent's name to parents + parents.append(parent["name"]) + + data["visualParent"] = parent_id + data["parents"] = parents + + asset_doc = asset_docs_by_name.get(asset_name) + # --- Create/Unarchive asset and end --- + if not asset_doc: + # Just use tasks from entity data as they are + # - this is different from the case when tasks are updated + data["tasks"] = entity_data.get("tasks") or {} + archived_asset_doc = None + for archived_entity in archived_asset_docs_by_name[asset_name]: + archived_parents = ( + archived_entity + .get("data", {}) + .get("parents") + ) + if data["parents"] == archived_parents: + archived_asset_doc = archived_entity + break + + # Create entity if doesn't exist + if archived_asset_doc is None: + return self.create_avalon_asset( + asset_name, data, project + ) + + return self.unarchive_entity( + archived_asset_doc, data, project + ) + + # --- Update existing asset --- + # Make sure current entity has "data" key + if "data" not in asset_doc: + asset_doc["data"] = {} + cur_entity_data = asset_doc["data"] + cur_entity_tasks = cur_entity_data.get("tasks") or {} + + # Tasks + data["tasks"] = {} + new_tasks = entity_data.get("tasks") or {} + for task_name, task_info in new_tasks.items(): + task_info = deepcopy(task_info) + if task_name in cur_entity_tasks: + src_task_info = deepcopy(cur_entity_tasks[task_name]) + src_task_info.update(task_info) + task_info = src_task_info + + data["tasks"][task_name] = task_info + + changes = {} + for key, value in data.items(): + if key not in cur_entity_data or value != cur_entity_data[key]: + update_key = "data.{}".format(key) + changes[update_key] = value + cur_entity_data[key] = value + + # Update asset in database if necessary + if changes: + # Update entity data with input data + legacy_io.update_one( + {"_id": asset_doc["_id"]}, + {"$set": changes} + ) + return asset_doc + + def unarchive_entity(self, archived_doc, data, project): + # Unarchived asset should not use same data + asset_doc = { + "_id": archived_doc["_id"], + "schema": "openpype:asset-3.0", + "name": archived_doc["name"], + "parent": project["_id"], + "type": "asset", + "data": data + } + legacy_io.replace_one( + {"_id": archived_doc["_id"]}, + asset_doc + ) + + return asset_doc + + def create_avalon_asset(self, name, data, project): + asset_doc = { + "schema": "openpype:asset-3.0", + "name": name, + "parent": project["_id"], + "type": "asset", + "data": data + } + self.log.debug("Creating asset: {}".format(asset_doc)) + asset_doc["_id"] = legacy_io.insert_one(asset_doc).inserted_id + + return asset_doc + + def _set_avalon_data_to_relative_instances( + self, + context, + project_name, + asset_doc + ): + asset_name = asset_doc["name"] + new_parents = asset_doc["data"]["parents"] + hierarchy = "/".join(new_parents) + parent_name = project_name + if new_parents: + parent_name = new_parents[-1] + + for instance in context: + # Skip if instance asset does not match + instance_asset_name = instance.data.get("asset") + if asset_name != instance_asset_name: + continue + + instance_asset_doc = instance.data.get("assetEntity") + # Update asset entity with new possible changes of asset document + instance.data["assetEntity"] = asset_doc + + # Update anatomy data if asset was not set on instance + if not instance_asset_doc: + instance.data["anatomyData"].update({ + "hierarchy": hierarchy, + "task": {}, + "parent": parent_name + }) + + def _get_active_assets(self, context): + """ Returns only asset dictionary. + Usually the last part of deep dictionary which + is not having any children + """ + def get_pure_hierarchy_data(input_dict): + input_dict_copy = deepcopy(input_dict) + for key in input_dict.keys(): + self.log.debug("__ key: {}".format(key)) + # check if child key is available + if input_dict[key].get("childs"): + # loop deeper + input_dict_copy[ + key]["childs"] = get_pure_hierarchy_data( + input_dict[key]["childs"]) + elif key not in active_assets: + input_dict_copy.pop(key, None) + return input_dict_copy + + hierarchy_context = context.data["hierarchyContext"] active_assets = [] - # filter only the active publishing insatnces + # filter only the active publishing instances for instance in context: if instance.data.get("publish") is False: continue @@ -31,172 +325,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): active_assets.append(instance.data["asset"]) # remove duplicity in list - self.active_assets = list(set(active_assets)) - self.log.debug("__ self.active_assets: {}".format(self.active_assets)) + active_assets = list(set(active_assets)) + self.log.debug("__ active_assets: {}".format(active_assets)) - hierarchy_context = self._get_assets(hierarchy_context) - - self.log.debug("__ hierarchy_context: {}".format(hierarchy_context)) - input_data = context.data["hierarchyContext"] = hierarchy_context - - self.project = None - self.import_to_avalon(input_data) - - def import_to_avalon(self, input_data, parent=None): - for name in input_data: - self.log.info("input_data[name]: {}".format(input_data[name])) - entity_data = input_data[name] - entity_type = entity_data["entity_type"] - - data = {} - data["entityType"] = entity_type - - # Custom attributes. - for k, val in entity_data.get("custom_attributes", {}).items(): - data[k] = val - - if entity_type.lower() != "project": - data["inputs"] = entity_data.get("inputs", []) - - # Tasks. - tasks = entity_data.get("tasks", {}) - if tasks is not None or len(tasks) > 0: - data["tasks"] = tasks - parents = [] - visualParent = None - # do not store project"s id as visualParent - if self.project is not None: - if self.project["_id"] != parent["_id"]: - visualParent = parent["_id"] - parents.extend( - parent.get("data", {}).get("parents", []) - ) - parents.append(parent["name"]) - data["visualParent"] = visualParent - data["parents"] = parents - - update_data = True - # Process project - if entity_type.lower() == "project": - entity = io.find_one({"type": "project"}) - # TODO: should be in validator? - assert (entity is not None), "Did not find project in DB" - - # get data from already existing project - cur_entity_data = entity.get("data") or {} - cur_entity_data.update(data) - data = cur_entity_data - - self.project = entity - # Raise error if project or parent are not set - elif self.project is None or parent is None: - raise AssertionError( - "Collected items are not in right order!" - ) - # Else process assset - else: - entity = io.find_one({"type": "asset", "name": name}) - if entity: - # Do not override data, only update - cur_entity_data = entity.get("data") or {} - entity_tasks = cur_entity_data["tasks"] or {} - - # create tasks as dict by default - if not entity_tasks: - cur_entity_data["tasks"] = entity_tasks - - new_tasks = data.pop("tasks", {}) - if "tasks" not in cur_entity_data and not new_tasks: - continue - for task_name in new_tasks: - if task_name in entity_tasks.keys(): - continue - cur_entity_data["tasks"][task_name] = new_tasks[ - task_name] - cur_entity_data.update(data) - data = cur_entity_data - else: - # Skip updating data - update_data = False - - archived_entities = io.find({ - "type": "archived_asset", - "name": name - }) - unarchive_entity = None - for archived_entity in archived_entities: - archived_parents = ( - archived_entity - .get("data", {}) - .get("parents") - ) - if data["parents"] == archived_parents: - unarchive_entity = archived_entity - break - - if unarchive_entity is None: - # Create entity if doesn"t exist - entity = self.create_avalon_asset(name, data) - else: - # Unarchive if entity was archived - entity = self.unarchive_entity(unarchive_entity, data) - - if update_data: - # Update entity data with input data - io.update_many( - {"_id": entity["_id"]}, - {"$set": {"data": data}} - ) - - if "childs" in entity_data: - self.import_to_avalon(entity_data["childs"], entity) - - def unarchive_entity(self, entity, data): - # Unarchived asset should not use same data - new_entity = { - "_id": entity["_id"], - "schema": "openpype:asset-3.0", - "name": entity["name"], - "parent": self.project["_id"], - "type": "asset", - "data": data - } - io.replace_one( - {"_id": entity["_id"]}, - new_entity - ) - return new_entity - - def create_avalon_asset(self, name, data): - item = { - "schema": "openpype:asset-3.0", - "name": name, - "parent": self.project["_id"], - "type": "asset", - "data": data - } - self.log.debug("Creating asset: {}".format(item)) - entity_id = io.insert_one(item).inserted_id - - return io.find_one({"_id": entity_id}) - - def _get_assets(self, input_dict): - """ Returns only asset dictionary. - Usually the last part of deep dictionary which - is not having any children - """ - input_dict_copy = deepcopy(input_dict) - - for key in input_dict.keys(): - self.log.debug("__ key: {}".format(key)) - # check if child key is available - if input_dict[key].get("childs"): - # loop deeper - input_dict_copy[key]["childs"] = self._get_assets( - input_dict[key]["childs"]) - else: - # filter out unwanted assets - if key not in self.active_assets: - input_dict_copy.pop(key, None) - - return input_dict_copy + return get_pure_hierarchy_data(hierarchy_context) diff --git a/openpype/plugins/publish/extract_jpeg_exr.py b/openpype/plugins/publish/extract_jpeg_exr.py deleted file mode 100644 index 468ed96199..0000000000 --- a/openpype/plugins/publish/extract_jpeg_exr.py +++ /dev/null @@ -1,173 +0,0 @@ -import os - -import pyblish.api -from openpype.lib import ( - get_ffmpeg_tool_path, - - run_subprocess, - path_to_subprocess_arg, - - get_transcode_temp_directory, - convert_for_ffmpeg, - should_convert_for_ffmpeg -) - -import shutil - - -class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Create jpg thumbnail from sequence using ffmpeg""" - - label = "Extract Jpeg EXR" - order = pyblish.api.ExtractorOrder - families = [ - "imagesequence", "render", "render2d", - "source", "plate", "take" - ] - hosts = ["shell", "fusion", "resolve"] - enabled = False - - # presetable attribute - ffmpeg_args = None - - def process(self, instance): - self.log.info("subset {}".format(instance.data['subset'])) - - # skip crypto passes. - # TODO: This is just a quick fix and has its own side-effects - it is - # affecting every subset name with `crypto` in its name. - # This must be solved properly, maybe using tags on - # representation that can be determined much earlier and - # with better precision. - if 'crypto' in instance.data['subset'].lower(): - self.log.info("Skipping crypto passes.") - return - - # Skip if review not set. - if not instance.data.get("review", True): - self.log.info("Skipping - no review set on instance.") - return - - filtered_repres = self._get_filtered_repres(instance) - for repre in filtered_repres: - repre_files = repre["files"] - if not isinstance(repre_files, (list, tuple)): - input_file = repre_files - else: - file_index = int(float(len(repre_files)) * 0.5) - input_file = repre_files[file_index] - - stagingdir = os.path.normpath(repre["stagingDir"]) - - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("input {}".format(full_input_path)) - - do_convert = should_convert_for_ffmpeg(full_input_path) - # If result is None the requirement of conversion can't be - # determined - if do_convert is None: - self.log.info(( - "Can't determine if representation requires conversion." - " Skipped." - )) - continue - - # Do conversion if needed - # - change staging dir of source representation - # - must be set back after output definitions processing - convert_dir = None - if do_convert: - convert_dir = get_transcode_temp_directory() - filename = os.path.basename(full_input_path) - convert_for_ffmpeg( - full_input_path, - convert_dir, - None, - None, - self.log - ) - full_input_path = os.path.join(convert_dir, filename) - - filename = os.path.splitext(input_file)[0] - if not filename.endswith('.'): - filename += "." - jpeg_file = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpeg_file) - - self.log.info("output {}".format(full_output_path)) - - ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - ffmpeg_args = self.ffmpeg_args or {} - - jpeg_items = [] - jpeg_items.append(path_to_subprocess_arg(ffmpeg_path)) - # override file if already exists - jpeg_items.append("-y") - # use same input args like with mov - jpeg_items.extend(ffmpeg_args.get("input") or []) - # input file - jpeg_items.append("-i {}".format( - path_to_subprocess_arg(full_input_path) - )) - # output arguments from presets - jpeg_items.extend(ffmpeg_args.get("output") or []) - - # If its a movie file, we just want one frame. - if repre["ext"] == "mov": - jpeg_items.append("-vframes 1") - - # output file - jpeg_items.append(path_to_subprocess_arg(full_output_path)) - - subprocess_command = " ".join(jpeg_items) - - # run subprocess - self.log.debug("{}".format(subprocess_command)) - try: # temporary until oiiotool is supported cross platform - run_subprocess( - subprocess_command, shell=True, logger=self.log - ) - except RuntimeError as exp: - if "Compression" in str(exp): - self.log.debug( - "Unsupported compression on input files. Skipping!!!" - ) - return - self.log.warning("Conversion crashed", exc_info=True) - raise - - new_repre = { - "name": "thumbnail", - "ext": "jpg", - "files": jpeg_file, - "stagingDir": stagingdir, - "thumbnail": True, - "tags": ["thumbnail"] - } - - # adding representation - self.log.debug("Adding: {}".format(new_repre)) - instance.data["representations"].append(new_repre) - - # Cleanup temp folder - if convert_dir is not None and os.path.exists(convert_dir): - shutil.rmtree(convert_dir) - - def _get_filtered_repres(self, instance): - filtered_repres = [] - src_repres = instance.data.get("representations") or [] - for repre in src_repres: - self.log.debug(repre) - tags = repre.get("tags") or [] - valid = "review" in tags or "thumb-nuke" in tags - if not valid: - continue - - if not repre.get("files"): - self.log.info(( - "Representation \"{}\" don't have files. Skipping" - ).format(repre["name"])) - continue - - filtered_repres.append(repre) - return filtered_repres diff --git a/openpype/plugins/publish/extract_otio_audio_tracks.py b/openpype/plugins/publish/extract_otio_audio_tracks.py index 00c1748cdc..e19b7eeb13 100644 --- a/openpype/plugins/publish/extract_otio_audio_tracks.py +++ b/openpype/plugins/publish/extract_otio_audio_tracks.py @@ -1,9 +1,8 @@ import os import pyblish -import openpype.api from openpype.lib import ( get_ffmpeg_tool_path, - path_to_subprocess_arg + run_subprocess ) import tempfile import opentimelineio as otio @@ -57,15 +56,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): audio_inputs.insert(0, empty) # create cmd - cmd = path_to_subprocess_arg(self.ffmpeg_path) + " " - cmd += self.create_cmd(audio_inputs) - cmd += path_to_subprocess_arg(audio_temp_fpath) - - # run subprocess - self.log.debug("Executing: {}".format(cmd)) - openpype.api.run_subprocess( - cmd, shell=True, logger=self.log - ) + self.mix_audio(audio_inputs, audio_temp_fpath) # remove empty os.remove(empty["mediaPath"]) @@ -110,9 +101,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): # run subprocess self.log.debug("Executing: {}".format(" ".join(cmd))) - openpype.api.run_subprocess( - cmd, logger=self.log - ) + run_subprocess(cmd, logger=self.log) else: audio_fpath = recycling_file.pop() @@ -233,7 +222,7 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): # run subprocess self.log.debug("Executing: {}".format(" ".join(cmd))) - openpype.api.run_subprocess( + run_subprocess( cmd, logger=self.log ) @@ -245,46 +234,80 @@ class ExtractOtioAudioTracks(pyblish.api.ContextPlugin): "durationSec": max_duration_sec } - def create_cmd(self, inputs): + def mix_audio(self, audio_inputs, audio_temp_fpath): """Creating multiple input cmd string Args: - inputs (list): list of input dicts. Order mater. + audio_inputs (list): list of input dicts. Order mater. Returns: str: the command body - """ + + longest_input = 0 + for audio_input in audio_inputs: + audio_len = audio_input["durationSec"] + if audio_len > longest_input: + longest_input = audio_len + # create cmd segments - _inputs = "" - _filters = "-filter_complex \"" - _channels = "" - for index, input in enumerate(inputs): - input_format = input.copy() - input_format.update({"i": index}) - input_format["mediaPath"] = path_to_subprocess_arg( - input_format["mediaPath"] + input_args = [] + filters = [] + tag_names = [] + for index, audio_input in enumerate(audio_inputs): + input_args.extend([ + "-ss", str(audio_input["startSec"]), + "-t", str(audio_input["durationSec"]), + "-i", audio_input["mediaPath"] + ]) + + # Output tag of a filtered audio input + tag_name = "[r{}]".format(index) + tag_names.append(tag_name) + # Delay in audio by delay in item + filters.append("[{}]adelay={}:all=1{}".format( + index, audio_input["delayMilSec"], tag_name + )) + + # Mixing filter + # - dropout transition (when audio will get loader) is set to be + # higher then any input audio item + # - volume is set to number of inputs - each mix adds 1/n volume + # where n is input inder (to get more info read ffmpeg docs and + # send a giftcard to contributor) + filters.append( + ( + "{}amix=inputs={}:duration=first:" + "dropout_transition={},volume={}[a]" + ).format( + "".join(tag_names), + len(audio_inputs), + (longest_input * 1000) + 1000, + len(audio_inputs), ) + ) - _inputs += ( - "-ss {startSec} " - "-t {durationSec} " - "-i {mediaPath} " - ).format(**input_format) + # Store filters to a file (separated by ',') + # - this is to avoid "too long" command issue in ffmpeg + with tempfile.NamedTemporaryFile( + delete=False, mode="w", suffix=".txt" + ) as tmp_file: + filters_tmp_filepath = tmp_file.name + tmp_file.write(",".join(filters)) - _filters += "[{i}]adelay={delayMilSec}:all=1[r{i}]; ".format( - **input_format) - _channels += "[r{}]".format(index) + args = [self.ffmpeg_path] + args.extend(input_args) + args.extend([ + "-filter_complex_script", filters_tmp_filepath, + "-map", "[a]" + ]) + args.append(audio_temp_fpath) - # merge all cmd segments together - cmd = _inputs + _filters + _channels - cmd += str( - "amix=inputs={inputs}:duration=first:" - "dropout_transition=1000,volume={inputs}[a]\" " - ).format(inputs=len(inputs)) - cmd += "-map \"[a]\" " + # run subprocess + self.log.debug("Executing: {}".format(args)) + run_subprocess(args, logger=self.log) - return cmd + os.remove(filters_tmp_filepath) def create_temp_file(self, name): """Create temp wav file diff --git a/openpype/plugins/publish/extract_otio_file.py b/openpype/plugins/publish/extract_otio_file.py index 3bd217d5d4..1a6a82117d 100644 --- a/openpype/plugins/publish/extract_otio_file.py +++ b/openpype/plugins/publish/extract_otio_file.py @@ -1,10 +1,11 @@ import os import pyblish.api -import openpype.api import opentimelineio as otio +from openpype.pipeline import publish -class ExtractOTIOFile(openpype.api.Extractor): + +class ExtractOTIOFile(publish.Extractor): """ Extractor export OTIO file """ @@ -12,9 +13,11 @@ class ExtractOTIOFile(openpype.api.Extractor): label = "Extract OTIO file" order = pyblish.api.ExtractorOrder - 0.45 families = ["workfile"] - hosts = ["resolve", "hiero"] + hosts = ["resolve", "hiero", "traypublisher"] def process(self, instance): + if not instance.context.data.get("otioTimeline"): + return # create representation data if "representations" not in instance.data: instance.data["representations"] = [] diff --git a/openpype/plugins/publish/extract_otio_review.py b/openpype/plugins/publish/extract_otio_review.py index 35adc97442..9ebcad2af1 100644 --- a/openpype/plugins/publish/extract_otio_review.py +++ b/openpype/plugins/publish/extract_otio_review.py @@ -18,10 +18,22 @@ import os import clique import opentimelineio as otio from pyblish import api -import openpype + +from openpype.lib import ( + get_ffmpeg_tool_path, + run_subprocess, +) +from openpype.pipeline import publish +from openpype.pipeline.editorial import ( + otio_range_to_frame_range, + trim_media_range, + range_from_frames, + frames_to_seconds, + make_sequence_collection +) -class ExtractOTIOReview(openpype.api.Extractor): +class ExtractOTIOReview(publish.Extractor): """ Extract OTIO timeline into one concuted image sequence file. @@ -161,7 +173,7 @@ class ExtractOTIOReview(openpype.api.Extractor): dirname = media_ref.target_url_base head = media_ref.name_prefix tail = media_ref.name_suffix - first, last = openpype.lib.otio_range_to_frame_range( + first, last = otio_range_to_frame_range( available_range) collection = clique.Collection( head=head, @@ -180,7 +192,7 @@ class ExtractOTIOReview(openpype.api.Extractor): # in case it is file sequence but not new OTIO schema # `ImageSequenceReference` path = media_ref.target_url - collection_data = openpype.lib.make_sequence_collection( + collection_data = make_sequence_collection( path, available_range, metadata) dir_path, collection = collection_data @@ -305,8 +317,8 @@ class ExtractOTIOReview(openpype.api.Extractor): duration = avl_durtation # return correct trimmed range - return openpype.lib.trim_media_range( - avl_range, openpype.lib.range_from_frames(start, duration, fps) + return trim_media_range( + avl_range, range_from_frames(start, duration, fps) ) def _render_seqment(self, sequence=None, @@ -327,7 +339,7 @@ class ExtractOTIOReview(openpype.api.Extractor): otio.time.TimeRange: trimmed available range """ # get rendering app path - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") # create path and frame start to destination output_path, out_frame_start = self._get_ffmpeg_output() @@ -338,6 +350,7 @@ class ExtractOTIOReview(openpype.api.Extractor): # start command list command = [ffmpeg_path] + input_extension = None if sequence: input_dir, collection = sequence in_frame_start = min(collection.indexes) @@ -345,6 +358,7 @@ class ExtractOTIOReview(openpype.api.Extractor): # converting image sequence to image sequence input_file = collection.format("{head}{padding}{tail}") input_path = os.path.join(input_dir, input_file) + input_extension = os.path.splitext(input_path)[-1] # form command for rendering gap files command.extend([ @@ -357,10 +371,11 @@ class ExtractOTIOReview(openpype.api.Extractor): frame_start = otio_range.start_time.value input_fps = otio_range.start_time.rate frame_duration = otio_range.duration.value - sec_start = openpype.lib.frames_to_secons(frame_start, input_fps) - sec_duration = openpype.lib.frames_to_secons( + sec_start = frames_to_seconds(frame_start, input_fps) + sec_duration = frames_to_seconds( frame_duration, input_fps ) + input_extension = os.path.splitext(video_path)[-1] # form command for rendering gap files command.extend([ @@ -370,8 +385,7 @@ class ExtractOTIOReview(openpype.api.Extractor): ]) elif gap: - sec_duration = openpype.lib.frames_to_secons( - gap, self.actual_fps) + sec_duration = frames_to_seconds(gap, self.actual_fps) # form command for rendering gap files command.extend([ @@ -386,12 +400,24 @@ class ExtractOTIOReview(openpype.api.Extractor): # add output attributes command.extend([ - "-start_number", str(out_frame_start), - output_path + "-start_number", str(out_frame_start) ]) + + # add copying if extensions are matching + if ( + input_extension + and self.output_ext == input_extension + ): + command.extend([ + "-c", "copy" + ]) + + # add output path at the end + command.append(output_path) + # execute self.log.debug("Executing: {}".format(" ".join(command))) - output = openpype.api.run_subprocess( + output = run_subprocess( command, logger=self.log ) self.log.debug("Output: {}".format(output)) diff --git a/openpype/plugins/publish/extract_otio_trimming_video.py b/openpype/plugins/publish/extract_otio_trimming_video.py index 30b57e2c69..70726338aa 100644 --- a/openpype/plugins/publish/extract_otio_trimming_video.py +++ b/openpype/plugins/publish/extract_otio_trimming_video.py @@ -6,17 +6,24 @@ Requires: """ import os -from pyblish import api -import openpype from copy import deepcopy +import pyblish.api -class ExtractOTIOTrimmingVideo(openpype.api.Extractor): +from openpype.lib import ( + get_ffmpeg_tool_path, + run_subprocess, +) +from openpype.pipeline import publish +from openpype.pipeline.editorial import frames_to_seconds + + +class ExtractOTIOTrimmingVideo(publish.Extractor): """ Trimming video file longer then required lenght """ - order = api.ExtractorOrder + order = pyblish.api.ExtractorOrder label = "Extract OTIO trim longer video" families = ["trim"] hosts = ["resolve", "hiero", "flame"] @@ -69,7 +76,7 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor): """ # get rendering app path - ffmpeg_path = openpype.lib.get_ffmpeg_tool_path("ffmpeg") + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") # create path to destination output_path = self._get_ffmpeg_output(input_file_path) @@ -80,9 +87,9 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor): video_path = input_file_path frame_start = otio_range.start_time.value input_fps = otio_range.start_time.rate - frame_duration = (otio_range.duration.value + 1) - sec_start = openpype.lib.frames_to_secons(frame_start, input_fps) - sec_duration = openpype.lib.frames_to_secons(frame_duration, input_fps) + frame_duration = otio_range.duration.value - 1 + sec_start = frames_to_seconds(frame_start, input_fps) + sec_duration = frames_to_seconds(frame_duration, input_fps) # form command for rendering gap files command.extend([ @@ -95,7 +102,7 @@ class ExtractOTIOTrimmingVideo(openpype.api.Extractor): # execute self.log.debug("Executing: {}".format(" ".join(command))) - output = openpype.api.run_subprocess( + output = run_subprocess( command, logger=self.log ) self.log.debug("Output: {}".format(output)) diff --git a/openpype/plugins/publish/extract_review.py b/openpype/plugins/publish/extract_review.py index 3ecea1f8bd..dcb43d7fa2 100644 --- a/openpype/plugins/publish/extract_review.py +++ b/openpype/plugins/publish/extract_review.py @@ -3,25 +3,26 @@ import re import copy import json import shutil - from abc import ABCMeta, abstractmethod + import six - import clique - +import speedcopy import pyblish.api -import openpype.api + from openpype.lib import ( get_ffmpeg_tool_path, - get_ffprobe_streams, path_to_subprocess_arg, - - should_convert_for_ffmpeg, - convert_for_ffmpeg, - get_transcode_temp_directory + run_subprocess, +) +from openpype.lib.transcoding import ( + IMAGE_EXTENSIONS, + get_ffprobe_streams, + should_convert_for_ffmpeg, + convert_input_paths_for_ffmpeg, + get_transcode_temp_directory, ) -import speedcopy class ExtractReview(pyblish.api.InstancePlugin): @@ -45,13 +46,15 @@ class ExtractReview(pyblish.api.InstancePlugin): "hiero", "premiere", "harmony", + "traypublisher", "standalonepublisher", "fusion", "tvpaint", "resolve", "webpublisher", "aftereffects", - "flame" + "flame", + "unreal" ] # Supported extensions @@ -126,6 +129,7 @@ class ExtractReview(pyblish.api.InstancePlugin): for repre in instance.data["representations"]: repre_name = str(repre.get("name")) tags = repre.get("tags") or [] + custom_tags = repre.get("custom_tags") if "review" not in tags: self.log.debug(( "Repre: {} - Didn't found \"review\" in tags. Skipping" @@ -156,18 +160,41 @@ class ExtractReview(pyblish.api.InstancePlugin): ) continue - # Filter output definition by representation tags (optional) - outputs = self.filter_outputs_by_tags(profile_outputs, tags) + # Filter output definition by representation's + # custom tags (optional) + outputs = self.filter_outputs_by_custom_tags( + profile_outputs, custom_tags) if not outputs: self.log.info(( "Skipped representation. All output definitions from" " selected profile does not match to representation's" - " tags. \"{}\"" + " custom tags. \"{}\"" ).format(str(tags))) continue + outputs_per_representations.append((repre, outputs)) return outputs_per_representations + def _single_frame_filter(self, input_filepaths, output_defs): + single_frame_image = False + if len(input_filepaths) == 1: + ext = os.path.splitext(input_filepaths[0])[-1] + single_frame_image = ext.lower() in IMAGE_EXTENSIONS + + filtered_defs = [] + for output_def in output_defs: + output_filters = output_def.get("filter") or {} + frame_filter = output_filters.get("single_frame_filter") + if ( + (not single_frame_image and frame_filter == "single_frame") + or (single_frame_image and frame_filter == "multi_frame") + ): + continue + + filtered_defs.append(output_def) + + return filtered_defs + @staticmethod def get_instance_label(instance): return ( @@ -188,23 +215,36 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs_per_repres = self._get_outputs_per_representations( instance, profile_outputs ) - fill_data = copy.deepcopy(instance.data["anatomyData"]) - for repre, outputs in outputs_per_repres: + for repre, output_defs in outputs_per_repres: # Check if input should be preconverted before processing # Store original staging dir (it's value may change) src_repre_staging_dir = repre["stagingDir"] # Receive filepath to first file in representation first_input_path = None + input_filepaths = [] if not self.input_is_sequence(repre): first_input_path = os.path.join( src_repre_staging_dir, repre["files"] ) + input_filepaths.append(first_input_path) else: for filename in repre["files"]: - first_input_path = os.path.join( + filepath = os.path.join( src_repre_staging_dir, filename ) - break + input_filepaths.append(filepath) + if first_input_path is None: + first_input_path = filepath + + filtered_output_defs = self._single_frame_filter( + input_filepaths, output_defs + ) + if not filtered_output_defs: + self.log.debug(( + "Repre: {} - All output definitions were filtered" + " out by single frame filter. Skipping" + ).format(repre["name"])) + continue # Skip if file is not set if first_input_path is None: @@ -231,136 +271,151 @@ class ExtractReview(pyblish.api.InstancePlugin): new_staging_dir = get_transcode_temp_directory() repre["stagingDir"] = new_staging_dir - frame_start = instance.data["frameStart"] - frame_end = instance.data["frameEnd"] - convert_for_ffmpeg( - first_input_path, + convert_input_paths_for_ffmpeg( + input_filepaths, new_staging_dir, - frame_start, - frame_end, self.log ) - for _output_def in outputs: - output_def = copy.deepcopy(_output_def) - # Make sure output definition has "tags" key - if "tags" not in output_def: - output_def["tags"] = [] - - if "burnins" not in output_def: - output_def["burnins"] = [] - - # Create copy of representation - new_repre = copy.deepcopy(repre) - # Make sure new representation has origin staging dir - # - this is because source representation may change - # it's staging dir because of ffmpeg conversion - new_repre["stagingDir"] = src_repre_staging_dir - - # Remove "delete" tag from new repre if there is - if "delete" in new_repre["tags"]: - new_repre["tags"].remove("delete") - - # Add additional tags from output definition to representation - for tag in output_def["tags"]: - if tag not in new_repre["tags"]: - new_repre["tags"].append(tag) - - # Add burnin link from output definition to representation - for burnin in output_def["burnins"]: - if burnin not in new_repre.get("burnins", []): - if not new_repre.get("burnins"): - new_repre["burnins"] = [] - new_repre["burnins"].append(str(burnin)) - - self.log.debug( - "Linked burnins: `{}`".format(new_repre.get("burnins")) + try: + self._render_output_definitions( + instance, + repre, + src_repre_staging_dir, + filtered_output_defs ) - self.log.debug( - "New representation tags: `{}`".format( - new_repre.get("tags")) + finally: + # Make sure temporary staging is cleaned up and representation + # has set origin stagingDir + if do_convert: + # Set staging dir of source representation back to previous + # value + repre["stagingDir"] = src_repre_staging_dir + if os.path.exists(new_staging_dir): + shutil.rmtree(new_staging_dir) + + def _render_output_definitions( + self, instance, repre, src_repre_staging_dir, output_defs + ): + fill_data = copy.deepcopy(instance.data["anatomyData"]) + for _output_def in output_defs: + output_def = copy.deepcopy(_output_def) + # Make sure output definition has "tags" key + if "tags" not in output_def: + output_def["tags"] = [] + + if "burnins" not in output_def: + output_def["burnins"] = [] + + # Create copy of representation + new_repre = copy.deepcopy(repre) + # Make sure new representation has origin staging dir + # - this is because source representation may change + # it's staging dir because of ffmpeg conversion + new_repre["stagingDir"] = src_repre_staging_dir + + # Remove "delete" tag from new repre if there is + if "delete" in new_repre["tags"]: + new_repre["tags"].remove("delete") + + # Add additional tags from output definition to representation + for tag in output_def["tags"]: + if tag not in new_repre["tags"]: + new_repre["tags"].append(tag) + + # Add burnin link from output definition to representation + for burnin in output_def["burnins"]: + if burnin not in new_repre.get("burnins", []): + if not new_repre.get("burnins"): + new_repre["burnins"] = [] + new_repre["burnins"].append(str(burnin)) + + self.log.debug( + "Linked burnins: `{}`".format(new_repre.get("burnins")) + ) + + self.log.debug( + "New representation tags: `{}`".format( + new_repre.get("tags")) + ) + + temp_data = self.prepare_temp_data(instance, repre, output_def) + files_to_clean = [] + if temp_data["input_is_sequence"]: + self.log.info("Filling gaps in sequence.") + files_to_clean = self.fill_sequence_gaps( + temp_data["origin_repre"]["files"], + new_repre["stagingDir"], + temp_data["frame_start"], + temp_data["frame_end"]) + + # create or update outputName + output_name = new_repre.get("outputName", "") + output_ext = new_repre["ext"] + if output_name: + output_name += "_" + output_name += output_def["filename_suffix"] + if temp_data["without_handles"]: + output_name += "_noHandles" + + # add outputName to anatomy format fill_data + fill_data.update({ + "output": output_name, + "ext": output_ext + }) + + try: # temporary until oiiotool is supported cross platform + ffmpeg_args = self._ffmpeg_arguments( + output_def, instance, new_repre, temp_data, fill_data ) - - temp_data = self.prepare_temp_data( - instance, repre, output_def) - files_to_clean = [] - if temp_data["input_is_sequence"]: - self.log.info("Filling gaps in sequence.") - files_to_clean = self.fill_sequence_gaps( - temp_data["origin_repre"]["files"], - new_repre["stagingDir"], - temp_data["frame_start"], - temp_data["frame_end"]) - - # create or update outputName - output_name = new_repre.get("outputName", "") - output_ext = new_repre["ext"] - if output_name: - output_name += "_" - output_name += output_def["filename_suffix"] - if temp_data["without_handles"]: - output_name += "_noHandles" - - # add outputName to anatomy format fill_data - fill_data.update({ - "output": output_name, - "ext": output_ext - }) - - try: # temporary until oiiotool is supported cross platform - ffmpeg_args = self._ffmpeg_arguments( - output_def, instance, new_repre, temp_data, fill_data + except ZeroDivisionError: + # TODO recalculate width and height using OIIO before + # conversion + if 'exr' in temp_data["origin_repre"]["ext"]: + self.log.warning( + ( + "Unsupported compression on input files." + " Skipping!!!" + ), + exc_info=True ) - except ZeroDivisionError: - if 'exr' in temp_data["origin_repre"]["ext"]: - self.log.debug("Unsupported compression on input " + - "files. Skipping!!!") - return - raise NotImplementedError + return + raise NotImplementedError - subprcs_cmd = " ".join(ffmpeg_args) + subprcs_cmd = " ".join(ffmpeg_args) - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) - openpype.api.run_subprocess( - subprcs_cmd, shell=True, logger=self.log - ) + run_subprocess(subprcs_cmd, shell=True, logger=self.log) - # delete files added to fill gaps - if files_to_clean: - for f in files_to_clean: - os.unlink(f) + # delete files added to fill gaps + if files_to_clean: + for f in files_to_clean: + os.unlink(f) - new_repre.update({ - "name": "{}_{}".format(output_name, output_ext), - "outputName": output_name, - "outputDef": output_def, - "frameStartFtrack": temp_data["output_frame_start"], - "frameEndFtrack": temp_data["output_frame_end"], - "ffmpeg_cmd": subprcs_cmd - }) + new_repre.update({ + "fps": temp_data["fps"], + "name": "{}_{}".format(output_name, output_ext), + "outputName": output_name, + "outputDef": output_def, + "frameStartFtrack": temp_data["output_frame_start"], + "frameEndFtrack": temp_data["output_frame_end"], + "ffmpeg_cmd": subprcs_cmd + }) - # Force to pop these key if are in new repre - new_repre.pop("preview", None) - new_repre.pop("thumbnail", None) - if "clean_name" in new_repre.get("tags", []): - new_repre.pop("outputName") + # Force to pop these key if are in new repre + new_repre.pop("preview", None) + new_repre.pop("thumbnail", None) + if "clean_name" in new_repre.get("tags", []): + new_repre.pop("outputName") - # adding representation - self.log.debug( - "Adding new representation: {}".format(new_repre) - ) - instance.data["representations"].append(new_repre) - - # Cleanup temp staging dir after procesisng of output definitions - if do_convert: - temp_dir = repre["stagingDir"] - shutil.rmtree(temp_dir) - # Set staging dir of source representation back to previous - # value - repre["stagingDir"] = src_repre_staging_dir + # adding representation + self.log.debug( + "Adding new representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) def input_is_sequence(self, repre): """Deduce from representation data if input is sequence.""" @@ -429,9 +484,24 @@ class ExtractReview(pyblish.api.InstancePlugin): input_is_sequence = self.input_is_sequence(repre) input_allow_bg = False + first_sequence_frame = None if input_is_sequence and repre["files"]: + # Calculate first frame that should be used + cols, _ = clique.assemble(repre["files"]) + input_frames = list(sorted(cols[0].indexes)) + first_sequence_frame = input_frames[0] + # WARNING: This is an issue as we don't know if first frame + # is with or without handles! + # - handle start is added but how do not know if we should + output_duration = (output_frame_end - output_frame_start) + 1 + if ( + without_handles + and len(input_frames) - handle_start >= output_duration + ): + first_sequence_frame += handle_start + ext = os.path.splitext(repre["files"][0])[1].replace(".", "") - if ext in self.alpha_exts: + if ext.lower() in self.alpha_exts: input_allow_bg = True return { @@ -449,6 +519,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "resolution_height": instance.data.get("resolutionHeight"), "origin_repre": repre, "input_is_sequence": input_is_sequence, + "first_sequence_frame": first_sequence_frame, "input_allow_bg": input_allow_bg, "with_audio": with_audio, "without_handles": without_handles, @@ -527,9 +598,13 @@ class ExtractReview(pyblish.api.InstancePlugin): if temp_data["input_is_sequence"]: # Set start frame of input sequence (just frame in filename) # - definition of input filepath - ffmpeg_input_args.append( - "-start_number {}".format(temp_data["output_frame_start"]) - ) + # - add handle start if output should be without handles + start_number = temp_data["first_sequence_frame"] + if temp_data["without_handles"] and temp_data["handles_are_set"]: + start_number += temp_data["handle_start"] + ffmpeg_input_args.extend([ + "-start_number", str(start_number) + ]) # TODO add fps mapping `{fps: fraction}` ? # - e.g.: { @@ -538,49 +613,50 @@ class ExtractReview(pyblish.api.InstancePlugin): # "23.976": "24000/1001" # } # Add framerate to input when input is sequence - ffmpeg_input_args.append( - "-framerate {}".format(temp_data["fps"]) - ) + ffmpeg_input_args.extend([ + "-framerate", str(temp_data["fps"]) + ]) + # Add duration of an input sequence if output is video + if not temp_data["output_is_sequence"]: + ffmpeg_input_args.extend([ + "-to", "{:0.10f}".format(duration_seconds) + ]) if temp_data["output_is_sequence"]: # Set start frame of output sequence (just frame in filename) # - this is definition of an output - ffmpeg_output_args.append( - "-start_number {}".format(temp_data["output_frame_start"]) - ) + ffmpeg_output_args.extend([ + "-start_number", str(temp_data["output_frame_start"]) + ]) # Change output's duration and start point if should not contain # handles - start_sec = 0 if temp_data["without_handles"] and temp_data["handles_are_set"]: - # Set start time without handles - # - check if handle_start is bigger than 0 to avoid zero division - if temp_data["handle_start"] > 0: - start_sec = float(temp_data["handle_start"]) / temp_data["fps"] - ffmpeg_input_args.append("-ss {:0.10f}".format(start_sec)) + # Set output duration in seconds + ffmpeg_output_args.extend([ + "-t", "{:0.10}".format(duration_seconds) + ]) - # Set output duration inn seconds - ffmpeg_output_args.append("-t {:0.10}".format(duration_seconds)) + # Add -ss (start offset in seconds) if input is not sequence + if not temp_data["input_is_sequence"]: + start_sec = float(temp_data["handle_start"]) / temp_data["fps"] + # Set start time without handles + # - Skip if start sec is 0.0 + if start_sec > 0.0: + ffmpeg_input_args.extend([ + "-ss", "{:0.10f}".format(start_sec) + ]) # Set frame range of output when input or output is sequence elif temp_data["output_is_sequence"]: - ffmpeg_output_args.append("-frames:v {}".format(output_frames_len)) - - # Add duration of an input sequence if output is video - if ( - temp_data["input_is_sequence"] - and not temp_data["output_is_sequence"] - ): - ffmpeg_input_args.append("-to {:0.10f}".format( - duration_seconds + start_sec - )) + ffmpeg_output_args.extend([ + "-frames:v", str(output_frames_len) + ]) # Add video/image input path - ffmpeg_input_args.append( - "-i {}".format( - path_to_subprocess_arg(temp_data["full_input_path"]) - ) - ) + ffmpeg_input_args.extend([ + "-i", path_to_subprocess_arg(temp_data["full_input_path"]) + ]) # Add audio arguments if there are any. Skipped when output are images. if not temp_data["output_ext_is_image"] and temp_data["with_audio"]: @@ -745,7 +821,8 @@ class ExtractReview(pyblish.api.InstancePlugin): start_frame = int(start_frame) end_frame = int(end_frame) collections = clique.assemble(files)[0] - assert len(collections) == 1, "Multiple collections found." + msg = "Multiple collections {} found.".format(collections) + assert len(collections) == 1, msg col = collections[0] # do nothing if no gap is found in input range @@ -862,6 +939,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if output_ext.startswith("."): output_ext = output_ext[1:] + output_ext = output_ext.lower() + # Store extension to representation new_repre["ext"] = output_ext @@ -959,6 +1038,9 @@ class ExtractReview(pyblish.api.InstancePlugin): # Set audio duration audio_in_args.append("-to {:0.10f}".format(audio_duration)) + # Ignore video data from audio input + audio_in_args.append("-vn") + # Add audio input path audio_in_args.append("-i {}".format( path_to_subprocess_arg(audio["filename"]) @@ -1174,7 +1256,6 @@ class ExtractReview(pyblish.api.InstancePlugin): # Get instance data pixel_aspect = temp_data["pixel_aspect"] - if reformat_in_baking: self.log.debug(( "Using resolution from input. It is already " @@ -1194,6 +1275,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # - settings value can't have None but has value of 0 output_width = output_def.get("width") or output_width or None output_height = output_def.get("height") or output_height or None + # Force to use input resolution if output resolution was not defined + # in settings. Resolution from instance is not used when + # 'use_input_res' is set to 'True'. + use_input_res = False # Overscal color overscan_color_value = "black" @@ -1205,6 +1290,17 @@ class ExtractReview(pyblish.api.InstancePlugin): ) self.log.debug("Overscan color: `{}`".format(overscan_color_value)) + # Scale input to have proper pixel aspect ratio + # - scale width by the pixel aspect ratio + scale_pixel_aspect = output_def.get("scale_pixel_aspect", True) + if scale_pixel_aspect and pixel_aspect != 1: + # Change input width after pixel aspect + input_width = int(input_width * pixel_aspect) + use_input_res = True + filters.append(( + "scale={}x{}:flags=lanczos".format(input_width, input_height) + )) + # Convert overscan value video filters overscan_crop = output_def.get("overscan_crop") overscan = OverscanCrop( @@ -1215,13 +1311,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # resolution by it's values if overscan_crop_filters: filters.extend(overscan_crop_filters) + # Change input resolution after overscan crop input_width = overscan.width() input_height = overscan.height() - # Use output resolution as inputs after cropping to skip usage of - # instance data resolution - if output_width is None or output_height is None: - output_width = input_width - output_height = input_height + use_input_res = True # Make sure input width and height is not an odd number input_width_is_odd = bool(input_width % 2 != 0) @@ -1247,8 +1340,10 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("input_width: `{}`".format(input_width)) self.log.debug("input_height: `{}`".format(input_height)) - # Use instance resolution if output definition has not set it. - if output_width is None or output_height is None: + # Use instance resolution if output definition has not set it + # - use instance resolution only if there were not scale changes + # that may massivelly affect output 'use_input_res' + if not use_input_res and output_width is None or output_height is None: output_width = temp_data["resolution_width"] output_height = temp_data["resolution_height"] @@ -1290,7 +1385,6 @@ class ExtractReview(pyblish.api.InstancePlugin): output_width == input_width and output_height == input_height and not letter_box_enabled - and pixel_aspect == 1 ): self.log.debug( "Output resolution is same as input's" @@ -1300,66 +1394,16 @@ class ExtractReview(pyblish.api.InstancePlugin): new_repre["resolutionHeight"] = input_height return filters - # defining image ratios - input_res_ratio = ( - (float(input_width) * pixel_aspect) / input_height - ) - output_res_ratio = float(output_width) / float(output_height) - self.log.debug("input_res_ratio: `{}`".format(input_res_ratio)) - self.log.debug("output_res_ratio: `{}`".format(output_res_ratio)) - - # Round ratios to 2 decimal places for comparing - input_res_ratio = round(input_res_ratio, 2) - output_res_ratio = round(output_res_ratio, 2) - - # get scale factor - scale_factor_by_width = ( - float(output_width) / (input_width * pixel_aspect) - ) - scale_factor_by_height = ( - float(output_height) / input_height - ) - - self.log.debug( - "scale_factor_by_with: `{}`".format(scale_factor_by_width) - ) - self.log.debug( - "scale_factor_by_height: `{}`".format(scale_factor_by_height) - ) - # scaling none square pixels and 1920 width - if ( - input_height != output_height - or input_width != output_width - or pixel_aspect != 1 - ): - if input_res_ratio < output_res_ratio: - self.log.debug( - "Input's resolution ratio is lower then output's" - ) - width_scale = int(input_width * scale_factor_by_height) - width_half_pad = int((output_width - width_scale) / 2) - height_scale = output_height - height_half_pad = 0 - else: - self.log.debug("Input is heigher then output") - width_scale = output_width - width_half_pad = 0 - height_scale = int(input_height * scale_factor_by_width) - height_half_pad = int((output_height - height_scale) / 2) - - self.log.debug("width_scale: `{}`".format(width_scale)) - self.log.debug("width_half_pad: `{}`".format(width_half_pad)) - self.log.debug("height_scale: `{}`".format(height_scale)) - self.log.debug("height_half_pad: `{}`".format(height_half_pad)) - + if input_height != output_height or input_width != output_width: filters.extend([ - "scale={}x{}:flags=lanczos".format( - width_scale, height_scale - ), - "pad={}:{}:{}:{}:{}".format( + ( + "scale={}x{}" + ":flags=lanczos" + ":force_original_aspect_ratio=decrease" + ).format(output_width, output_height), + "pad={}:{}:(ow-iw)/2:(oh-ih)/2:{}".format( output_width, output_height, - width_half_pad, height_half_pad, overscan_color_value ), "setsar=1" @@ -1461,6 +1505,8 @@ class ExtractReview(pyblish.api.InstancePlugin): output = -1 regexes = self.compile_list_of_regexes(in_list) for regex in regexes: + if not value: + continue if re.match(regex, value): output = 1 break @@ -1664,6 +1710,7 @@ class ExtractReview(pyblish.api.InstancePlugin): Args: profile (dict): Profile from presets matching current context. families (list): All families of current instance. + subset_name (str): name of subset Returns: list: Containg all output definitions matching entered families. @@ -1711,40 +1758,51 @@ class ExtractReview(pyblish.api.InstancePlugin): return filtered_outputs - def filter_outputs_by_tags(self, outputs, tags): - """Filter output definitions by entered representation tags. + def filter_outputs_by_custom_tags(self, outputs, custom_tags): + """Filter output definitions by entered representation custom_tags. - Output definitions without tags filter are marked as valid. + Output definitions without custom_tags filter are marked as invalid, + only in case representation is having any custom_tags defined. Args: outputs (list): Contain list of output definitions from presets. - tags (list): Tags of processed representation. + custom_tags (list): Custom Tags of processed representation. Returns: list: Containg all output definitions matching entered tags. """ - filtered_outputs = [] - repre_tags_low = [tag.lower() for tag in tags] - for output_def in outputs: - valid = True - output_filters = output_def.get("filter") - if output_filters: - # Check tag filters - tag_filters = output_filters.get("tags") - if tag_filters: - tag_filters_low = [tag.lower() for tag in tag_filters] - valid = False - for tag in repre_tags_low: - if tag in tag_filters_low: - valid = True - break - if not valid: - continue + filtered_outputs = [] + repre_c_tags_low = [tag.lower() for tag in (custom_tags or [])] + for output_def in outputs: + tag_filters = output_def.get("filter", {}).get("custom_tags") + + if not custom_tags and not tag_filters: + # Definition is valid if both tags are empty + valid = True + + elif not custom_tags or not tag_filters: + # Invalid if one is empty + valid = False + + else: + # Check if output definition tags are in representation tags + valid = False + # lower all filter tags + tag_filters_low = [tag.lower() for tag in tag_filters] + # check if any repre tag is not in filter tags + for tag in repre_c_tags_low: + if tag in tag_filters_low: + valid = True + break if valid: filtered_outputs.append(output_def) + self.log.debug("__ filtered_outputs: {}".format( + [_o["filename_suffix"] for _o in filtered_outputs] + )) + return filtered_outputs def add_video_filter_args(self, args, inserting_arg): diff --git a/openpype/plugins/publish/extract_review_slate.py b/openpype/plugins/publish/extract_review_slate.py index 505ae75169..fca3d96ca6 100644 --- a/openpype/plugins/publish/extract_review_slate.py +++ b/openpype/plugins/publish/extract_review_slate.py @@ -1,17 +1,22 @@ import os -import openpype.api -import pyblish +import re +from pprint import pformat + +import pyblish.api + from openpype.lib import ( path_to_subprocess_arg, + run_subprocess, get_ffmpeg_tool_path, get_ffprobe_data, get_ffprobe_streams, get_ffmpeg_codec_args, get_ffmpeg_format_args, ) +from openpype.pipeline import publish -class ExtractReviewSlate(openpype.api.Extractor): +class ExtractReviewSlate(publish.Extractor): """ Will add slate frame at the start of the video files """ @@ -21,6 +26,8 @@ class ExtractReviewSlate(openpype.api.Extractor): families = ["slate", "review"] match = pyblish.api.Subset + SUFFIX = "_slate" + hosts = ["nuke", "shell"] optional = True @@ -29,28 +36,19 @@ class ExtractReviewSlate(openpype.api.Extractor): if "representations" not in inst_data: raise RuntimeError("Burnin needs already created mov to work on.") - suffix = "_slate" - slate_path = inst_data.get("slateFrame") + # get slates frame from upstream + slates_data = inst_data.get("slateFrames") + if not slates_data: + # make it backward compatible and open for slates generator + # premium plugin + slates_data = { + "*": inst_data["slateFrame"] + } + + self.log.info("_ slates_data: {}".format(pformat(slates_data))) + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") - slate_streams = get_ffprobe_streams(slate_path, self.log) - # Try to find first stream with defined 'width' and 'height' - # - this is to avoid order of streams where audio can be as first - # - there may be a better way (checking `codec_type`?)+ - slate_width = None - slate_height = None - for slate_stream in slate_streams: - if "width" in slate_stream and "height" in slate_stream: - slate_width = int(slate_stream["width"]) - slate_height = int(slate_stream["height"]) - break - - # Raise exception of any stream didn't define input resolution - if slate_width is None: - raise AssertionError(( - "FFprobe couldn't read resolution from input file: \"{}\"" - ).format(slate_path)) - if "reviewToWidth" in inst_data: use_legacy_code = True else: @@ -58,6 +56,7 @@ class ExtractReviewSlate(openpype.api.Extractor): pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") + self.log.debug("fps {} ".format(fps)) for idx, repre in enumerate(inst_data["representations"]): self.log.debug("repre ({}): `{}`".format(idx + 1, repre)) @@ -73,20 +72,22 @@ class ExtractReviewSlate(openpype.api.Extractor): os.path.normpath(stagingdir), repre["files"]) self.log.debug("__ input_path: {}".format(input_path)) - video_streams = get_ffprobe_streams( + streams = get_ffprobe_streams( input_path, self.log ) + # get slate data + slate_path = self._get_slate_path(input_file, slates_data) + self.log.info("_ slate_path: {}".format(slate_path)) - # Try to find first stream with defined 'width' and 'height' - # - this is to avoid order of streams where audio can be as first - # - there may be a better way (checking `codec_type`?) - input_width = None - input_height = None - for stream in video_streams: - if "width" in stream and "height" in stream: - input_width = int(stream["width"]) - input_height = int(stream["height"]) - break + slate_width, slate_height = self._get_slates_resolution(slate_path) + + # Get video metadata + ( + input_width, + input_height, + input_timecode, + input_frame_rate + ) = self._get_video_metadata(streams) # Raise exception of any stream didn't define input resolution if input_width is None: @@ -94,6 +95,14 @@ class ExtractReviewSlate(openpype.api.Extractor): "FFprobe couldn't read resolution from input file: \"{}\"" ).format(input_path)) + ( + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_audio + ) = self._get_audio_metadata(streams) + # values are set in ExtractReview if use_legacy_code: to_width = inst_data["reviewToWidth"] @@ -133,7 +142,7 @@ class ExtractReviewSlate(openpype.api.Extractor): _remove_at_end = [] ext = os.path.splitext(input_file)[1] - output_file = input_file.replace(ext, "") + suffix + ext + output_file = input_file.replace(ext, "") + self.SUFFIX + ext _remove_at_end.append(input_path) @@ -149,30 +158,43 @@ class ExtractReviewSlate(openpype.api.Extractor): input_args.extend(repre["_profile"].get('input', [])) else: input_args.extend(repre["outputDef"].get('input', [])) - input_args.append("-loop 1 -i {}".format( - path_to_subprocess_arg(slate_path) - )) + input_args.extend([ - "-r {}".format(fps), - "-t 0.04" + "-loop", "1", + "-i", path_to_subprocess_arg(slate_path), + "-r", str(input_frame_rate), + "-frames:v", "1", ]) + # add timecode from source to the slate, substract one frame + offset_timecode = "" + if input_timecode: + offset_timecode = self._tc_offset( + str(input_timecode), + framerate=fps, + frame_offset=-1 + ) + self.log.debug("Slate Timecode: `{}`".format( + offset_timecode + )) + if use_legacy_code: + format_args = [] codec_args = repre["_profile"].get('codec', []) output_args.extend(codec_args) # preset's output data output_args.extend(repre["_profile"].get('output', [])) else: # Codecs are copied from source for whole input - codec_args = self._get_codec_args(repre) + format_args, codec_args = self._get_format_codec_args(repre) + output_args.extend(format_args) output_args.extend(codec_args) # make sure colors are correct output_args.extend([ - "-vf scale=out_color_matrix=bt709", - "-color_primaries bt709", - "-color_trc bt709", - "-colorspace bt709" + "-color_primaries", "bt709", + "-color_trc", "bt709", + "-colorspace", "bt709", ]) # scaling none square pixels and 1920 width @@ -208,15 +230,25 @@ class ExtractReviewSlate(openpype.api.Extractor): "__ height_half_pad: `{}`".format(height_half_pad) ) - scaling_arg = ("scale={0}x{1}:flags=lanczos," - "pad={2}:{3}:{4}:{5}:black,setsar=1").format( - width_scale, height_scale, to_width, to_height, - width_half_pad, height_half_pad + scaling_arg = ( + "scale={0}x{1}:flags=lanczos" + ":out_color_matrix=bt709" + ",pad={2}:{3}:{4}:{5}:black" + ",setsar=1" + ",fps={6}" + ).format( + width_scale, + height_scale, + to_width, + to_height, + width_half_pad, + height_half_pad, + input_frame_rate ) - vf_back = self.add_video_filter_args(output_args, scaling_arg) - # add it to output_args - output_args.insert(0, vf_back) + vf_back = self.add_video_filter_args(output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) # overrides output file output_args.append("-y") @@ -238,42 +270,88 @@ class ExtractReviewSlate(openpype.api.Extractor): self.log.debug( "Slate Executing: {}".format(slate_subprocess_cmd) ) - openpype.api.run_subprocess( + run_subprocess( slate_subprocess_cmd, shell=True, logger=self.log ) - # create ffmpeg concat text file path - conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt" - conc_text_path = os.path.join( - os.path.normpath(stagingdir), conc_text_file) - _remove_at_end.append(conc_text_path) - self.log.debug("__ conc_text_path: {}".format(conc_text_path)) + # Create slate with silent audio track + if input_audio: + # silent slate output path + slate_silent_path = "_silent".join( + os.path.splitext(slate_v_path)) + _remove_at_end.append(slate_silent_path) + self._create_silent_slate( + ffmpeg_path, + slate_v_path, + slate_silent_path, + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_frame_rate + ) - new_line = "\n" - with open(conc_text_path, "w") as conc_text_f: - conc_text_f.writelines([ - "file {}".format( - slate_v_path.replace("\\", "/")), - new_line, - "file {}".format(input_path.replace("\\", "/")) - ]) + # replace slate with silent slate for concat + slate_v_path = slate_silent_path - # concat slate and videos together + # concat slate and videos together with concat filter + # this will reencode the output + if input_audio: + fmap = [ + "-filter_complex", + "[0:v] [0:a] [1:v] [1:a] concat=n=2:v=1:a=1 [v] [a]", + "-map", '[v]', + "-map", '[a]' + ] + else: + fmap = [ + "-filter_complex", + "[0:v] [1:v] concat=n=2:v=1:a=0 [v]", + "-map", '[v]' + ] concat_args = [ ffmpeg_path, "-y", - "-f", "concat", - "-safe", "0", - "-i", conc_text_path, - "-c", "copy", - output_path + "-i", slate_v_path, + "-i", input_path, ] + concat_args.extend(fmap) + if offset_timecode: + concat_args.extend(["-timecode", offset_timecode]) + # NOTE: Added because of OP Atom demuxers + # Add format arguments if there are any + # - keep format of output + if format_args: + concat_args.extend(format_args) + + if codec_args: + concat_args.extend(codec_args) + + # Use arguments from ffmpeg preset + source_ffmpeg_cmd = repre.get("ffmpeg_cmd") + if source_ffmpeg_cmd: + copy_args = ( + "-metadata", + "-metadata:s:v:0", + "-b:v", + "-b:a", + ) + args = source_ffmpeg_cmd.split(" ") + for indx, arg in enumerate(args): + if arg in copy_args: + concat_args.append(arg) + # assumes arg has one parameter + concat_args.append(args[indx + 1]) + + # add final output path + concat_args.append(output_path) # ffmpeg concat subprocess self.log.debug( - "Executing concat: {}".format(" ".join(concat_args)) + "Executing concat filter: {}".format + (" ".join(concat_args)) ) - openpype.api.run_subprocess( + run_subprocess( concat_args, logger=self.log ) @@ -301,6 +379,167 @@ class ExtractReviewSlate(openpype.api.Extractor): self.log.debug(inst_data["representations"]) + def _get_slate_path(self, input_file, slates_data): + slate_path = None + for sl_n, _slate_path in slates_data.items(): + if "*" in sl_n: + slate_path = _slate_path + break + elif re.search(sl_n, input_file): + slate_path = _slate_path + break + + if not slate_path: + raise AttributeError( + "Missing slates paths: {}".format(slates_data)) + + return slate_path + + def _get_slates_resolution(self, slate_path): + slate_streams = get_ffprobe_streams(slate_path, self.log) + # Try to find first stream with defined 'width' and 'height' + # - this is to avoid order of streams where audio can be as first + # - there may be a better way (checking `codec_type`?)+ + slate_width = None + slate_height = None + for slate_stream in slate_streams: + if "width" in slate_stream and "height" in slate_stream: + slate_width = int(slate_stream["width"]) + slate_height = int(slate_stream["height"]) + break + + # Raise exception of any stream didn't define input resolution + if slate_width is None: + raise AssertionError(( + "FFprobe couldn't read resolution from input file: \"{}\"" + ).format(slate_path)) + + return (slate_width, slate_height) + + def _get_video_metadata(self, streams): + input_timecode = "" + input_width = None + input_height = None + input_frame_rate = None + for stream in streams: + if stream.get("codec_type") != "video": + continue + self.log.debug("FFprobe Video: {}".format(stream)) + + if "width" not in stream or "height" not in stream: + continue + width = int(stream["width"]) + height = int(stream["height"]) + if not width or not height: + continue + + # Make sure that width and height are captured even if frame rate + # is not available + input_width = width + input_height = height + + tags = stream.get("tags") or {} + input_timecode = tags.get("timecode") or "" + + input_frame_rate = stream.get("r_frame_rate") + if input_frame_rate is not None: + break + return ( + input_width, + input_height, + input_timecode, + input_frame_rate + ) + + def _get_audio_metadata(self, streams): + # Get audio metadata + audio_codec = None + audio_channels = None + audio_sample_rate = None + audio_channel_layout = None + input_audio = False + + for stream in streams: + if stream.get("codec_type") != "audio": + continue + self.log.debug("__Ffprobe Audio: {}".format(stream)) + + if all( + stream.get(key) + for key in ( + "codec_name", + "channels", + "sample_rate", + "channel_layout", + ) + ): + audio_codec = stream["codec_name"] + audio_channels = stream["channels"] + audio_sample_rate = stream["sample_rate"] + audio_channel_layout = stream["channel_layout"] + input_audio = True + break + + return ( + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_audio, + ) + + def _create_silent_slate( + self, + ffmpeg_path, + src_path, + dst_path, + audio_codec, + audio_channels, + audio_sample_rate, + audio_channel_layout, + input_frame_rate + ): + # Get duration of one frame in micro seconds + items = input_frame_rate.split("/") + if len(items) == 1: + one_frame_duration = 1.0 / float(items[0]) + elif len(items) == 2: + one_frame_duration = float(items[1]) / float(items[0]) + else: + one_frame_duration = None + + if one_frame_duration is None: + one_frame_duration = "40000us" + else: + one_frame_duration *= 1000000 + one_frame_duration = str(int(one_frame_duration)) + "us" + self.log.debug("One frame duration is {}".format(one_frame_duration)) + + slate_silent_args = [ + ffmpeg_path, + "-i", src_path, + "-f", "lavfi", "-i", + "anullsrc=r={}:cl={}:d={}".format( + audio_sample_rate, + audio_channel_layout, + one_frame_duration + ), + "-c:v", "copy", + "-c:a", audio_codec, + "-map", "0:v", + "-map", "1:a", + "-shortest", + "-y", + dst_path + ] + # run slate generation subprocess + self.log.debug("Silent Slate Executing: {}".format( + " ".join(slate_silent_args) + )) + run_subprocess( + slate_silent_args, logger=self.log + ) + def add_video_filter_args(self, args, inserting_arg): """ Fixing video filter argumets to be one long string @@ -338,7 +577,7 @@ class ExtractReviewSlate(openpype.api.Extractor): return vf_back - def _get_codec_args(self, repre): + def _get_format_codec_args(self, repre): """Detect possible codec arguments from representation.""" codec_args = [] @@ -361,13 +600,47 @@ class ExtractReviewSlate(openpype.api.Extractor): return codec_args source_ffmpeg_cmd = repre.get("ffmpeg_cmd") - codec_args.extend( - get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) - ) - codec_args.extend( - get_ffmpeg_codec_args( - ffprobe_data, source_ffmpeg_cmd, logger=self.log - ) + format_args = get_ffmpeg_format_args(ffprobe_data, source_ffmpeg_cmd) + codec_args = get_ffmpeg_codec_args( + ffprobe_data, source_ffmpeg_cmd, logger=self.log ) - return codec_args + return format_args, codec_args + + def _tc_offset(self, timecode, framerate=24.0, frame_offset=-1): + """Offsets timecode by frame""" + def _seconds(value, framerate): + if isinstance(value, str): + _zip_ft = zip((3600, 60, 1, 1 / framerate), value.split(':')) + _s = sum(f * float(t) for f, t in _zip_ft) + elif isinstance(value, (int, float)): + _s = value / framerate + else: + _s = 0 + return _s + + def _frames(seconds, framerate, frame_offset): + _f = seconds * framerate + frame_offset + if _f < 0: + _f = framerate * 60 * 60 * 24 + _f + return _f + + def _timecode(seconds, framerate): + return '{h:02d}:{m:02d}:{s:02d}:{f:02d}'.format( + h=int(seconds / 3600), + m=int(seconds / 60 % 60), + s=int(seconds % 60), + f=int(round((seconds - int(seconds)) * framerate))) + drop = False + if ';' in timecode: + timecode = timecode.replace(';', ':') + drop = True + frames = _frames( + _seconds(timecode, framerate), + framerate, + frame_offset + ) + tc = _timecode(_seconds(frames, framerate), framerate) + if drop: + tc = ';'.join(tc.rsplit(':', 1)) + return tc diff --git a/openpype/plugins/publish/extract_scanline_exr.py b/openpype/plugins/publish/extract_scanline_exr.py index a7f7de5188..0e4c0ca65f 100644 --- a/openpype/plugins/publish/extract_scanline_exr.py +++ b/openpype/plugins/publish/extract_scanline_exr.py @@ -4,8 +4,8 @@ import os import shutil import pyblish.api -import openpype.api -import openpype.lib + +from openpype.lib import run_subprocess, get_oiio_tools_path class ExtractScanlineExr(pyblish.api.InstancePlugin): @@ -45,7 +45,7 @@ class ExtractScanlineExr(pyblish.api.InstancePlugin): stagingdir = os.path.normpath(repre.get("stagingDir")) - oiio_tool_path = openpype.lib.get_oiio_tools_path() + oiio_tool_path = get_oiio_tools_path() if not os.path.exists(oiio_tool_path): self.log.error( "OIIO tool not found in {}".format(oiio_tool_path)) @@ -65,7 +65,7 @@ class ExtractScanlineExr(pyblish.api.InstancePlugin): subprocess_exr = " ".join(oiio_cmd) self.log.info(f"running: {subprocess_exr}") - openpype.api.run_subprocess(subprocess_exr, logger=self.log) + run_subprocess(subprocess_exr, logger=self.log) # raise error if there is no ouptput if not os.path.exists(os.path.join(stagingdir, original_name)): diff --git a/openpype/plugins/publish/extract_thumbnail.py b/openpype/plugins/publish/extract_thumbnail.py new file mode 100644 index 0000000000..aa5497a99f --- /dev/null +++ b/openpype/plugins/publish/extract_thumbnail.py @@ -0,0 +1,231 @@ +import os +import tempfile + +import pyblish.api +from openpype.lib import ( + get_ffmpeg_tool_path, + get_oiio_tools_path, + is_oiio_supported, + + run_subprocess, + path_to_subprocess_arg, +) + + +class ExtractThumbnail(pyblish.api.InstancePlugin): + """Create jpg thumbnail from sequence using ffmpeg""" + + label = "Extract Thumbnail" + order = pyblish.api.ExtractorOrder + families = [ + "imagesequence", "render", "render2d", "prerender", + "source", "clip", "take", "online" + ] + hosts = ["shell", "fusion", "resolve", "traypublisher"] + enabled = False + + # presetable attribute + ffmpeg_args = None + + def process(self, instance): + subset_name = instance.data["subset"] + instance_repres = instance.data.get("representations") + if not instance_repres: + self.log.debug(( + "Instance {} does not have representations. Skipping" + ).format(subset_name)) + return + + self.log.info( + "Processing instance with subset name {}".format(subset_name) + ) + + # Skip if instance have 'review' key in data set to 'False' + if not self._is_review_instance(instance): + self.log.info("Skipping - no review set on instance.") + return + + # Check if already has thumbnail created + if self._already_has_thumbnail(instance_repres): + self.log.info("Thumbnail representation already present.") + return + + # skip crypto passes. + # TODO: This is just a quick fix and has its own side-effects - it is + # affecting every subset name with `crypto` in its name. + # This must be solved properly, maybe using tags on + # representation that can be determined much earlier and + # with better precision. + if "crypto" in subset_name.lower(): + self.log.info("Skipping crypto passes.") + return + + filtered_repres = self._get_filtered_repres(instance) + if not filtered_repres: + self.log.info(( + "Instance don't have representations" + " that can be used as source for thumbnail. Skipping" + )) + return + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + instance.context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() + for repre in filtered_repres: + repre_files = repre["files"] + if not isinstance(repre_files, (list, tuple)): + input_file = repre_files + else: + file_index = int(float(len(repre_files)) * 0.5) + input_file = repre_files[file_index] + + src_staging = os.path.normpath(repre["stagingDir"]) + full_input_path = os.path.join(src_staging, input_file) + self.log.info("input {}".format(full_input_path)) + filename = os.path.splitext(input_file)[0] + jpeg_file = filename + "_thumb.jpg" + full_output_path = os.path.join(dst_staging, jpeg_file) + + if oiio_supported: + self.log.info("Trying to convert with OIIO") + # If the input can read by OIIO then use OIIO method for + # conversion otherwise use ffmpeg + thumbnail_created = self.create_thumbnail_oiio( + full_input_path, full_output_path + ) + + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available + if not thumbnail_created: + if oiio_supported: + self.log.info(( + "Converting with FFMPEG because input" + " can't be read by OIIO." + )) + + thumbnail_created = self.create_thumbnail_ffmpeg( + full_input_path, full_output_path + ) + + # Skip representation and try next one if wasn't created + if not thumbnail_created: + continue + + new_repre = { + "name": "thumbnail", + "ext": "jpg", + "files": jpeg_file, + "stagingDir": dst_staging, + "thumbnail": True, + "tags": ["thumbnail"] + } + + # adding representation + self.log.debug( + "Adding thumbnail representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) + # There is no need to create more then one thumbnail + break + + if not thumbnail_created: + self.log.warning("Thumbanil has not been created.") + + def _is_review_instance(self, instance): + # TODO: We should probably handle "not creating" of thumbnail + # other way then checking for "review" key on instance data? + if instance.data.get("review", True): + return True + return False + + def _already_has_thumbnail(self, repres): + for repre in repres: + self.log.info("repre {}".format(repre)) + if repre["name"] == "thumbnail": + return True + return False + + def _get_filtered_repres(self, instance): + filtered_repres = [] + src_repres = instance.data.get("representations") or [] + for repre in src_repres: + self.log.debug(repre) + tags = repre.get("tags") or [] + valid = "review" in tags or "thumb-nuke" in tags + if not valid: + continue + + if not repre.get("files"): + self.log.info(( + "Representation \"{}\" don't have files. Skipping" + ).format(repre["name"])) + continue + + filtered_repres.append(repre) + return filtered_repres + + def create_thumbnail_oiio(self, src_path, dst_path): + self.log.info("outputting {}".format(dst_path)) + oiio_tool_path = get_oiio_tools_path() + oiio_cmd = [ + oiio_tool_path, + "-a", src_path, + "-o", dst_path + ] + self.log.info("running: {}".format(" ".join(oiio_cmd))) + try: + run_subprocess(oiio_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using oiiotool", + exc_info=True + ) + return False + + def create_thumbnail_ffmpeg(self, src_path, dst_path): + self.log.info("outputting {}".format(dst_path)) + + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + ffmpeg_args = self.ffmpeg_args or {} + + jpeg_items = [] + jpeg_items.append(path_to_subprocess_arg(ffmpeg_path)) + # override file if already exists + jpeg_items.append("-y") + # flag for large file sizes + max_int = 2147483647 + jpeg_items.append("-analyzeduration {}".format(max_int)) + jpeg_items.append("-probesize {}".format(max_int)) + # use same input args like with mov + jpeg_items.extend(ffmpeg_args.get("input") or []) + # input file + jpeg_items.append("-i {}".format( + path_to_subprocess_arg(src_path) + )) + # output arguments from presets + jpeg_items.extend(ffmpeg_args.get("output") or []) + # we just want one frame from movie files + jpeg_items.append("-vframes 1") + # output file + jpeg_items.append(path_to_subprocess_arg(dst_path)) + subprocess_command = " ".join(jpeg_items) + try: + run_subprocess( + subprocess_command, shell=True, logger=self.log + ) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using ffmpeg", + exc_info=True + ) + return False diff --git a/openpype/plugins/publish/extract_thumbnail_from_source.py b/openpype/plugins/publish/extract_thumbnail_from_source.py new file mode 100644 index 0000000000..a92f762cde --- /dev/null +++ b/openpype/plugins/publish/extract_thumbnail_from_source.py @@ -0,0 +1,195 @@ +"""Create instance thumbnail from "thumbnailSource" on 'instance.data'. + +Output is new representation with "thumbnail" name on instance. If instance +already have such representation the process is skipped. + +This way a collector can point to a file from which should be thumbnail +generated. This is different approach then what global plugin for thumbnails +does. The global plugin has specific logic which does not support + +Todos: + No size handling. Size of input is used for output thumbnail which can + cause issues. +""" + +import os +import tempfile + +import pyblish.api +from openpype.lib import ( + get_ffmpeg_tool_path, + get_oiio_tools_path, + is_oiio_supported, + + run_subprocess, +) + + +class ExtractThumbnailFromSource(pyblish.api.InstancePlugin): + """Create jpg thumbnail for instance based on 'thumbnailSource'. + + Thumbnail source must be a single image or video filepath. + """ + + label = "Extract Thumbnail (from source)" + # Before 'ExtractThumbnail' in global plugins + order = pyblish.api.ExtractorOrder - 0.00001 + + def process(self, instance): + self._create_context_thumbnail(instance.context) + + subset_name = instance.data["subset"] + self.log.info( + "Processing instance with subset name {}".format(subset_name) + ) + thumbnail_source = instance.data.get("thumbnailSource") + if not thumbnail_source: + self.log.debug("Thumbnail source not filled. Skipping.") + return + + # Check if already has thumbnail created + if self._instance_has_thumbnail(instance): + self.log.info("Thumbnail representation already present.") + return + + dst_filepath = self._create_thumbnail( + instance.context, thumbnail_source + ) + if not dst_filepath: + return + + dst_staging, dst_filename = os.path.split(dst_filepath) + new_repre = { + "name": "thumbnail", + "ext": "jpg", + "files": dst_filename, + "stagingDir": dst_staging, + "thumbnail": True, + "tags": ["thumbnail"] + } + + # adding representation + self.log.debug( + "Adding thumbnail representation: {}".format(new_repre) + ) + instance.data["representations"].append(new_repre) + instance.data["thumbnailPath"] = dst_filepath + + def _create_thumbnail(self, context, thumbnail_source): + if not thumbnail_source: + self.log.debug("Thumbnail source not filled. Skipping.") + return + + if not os.path.exists(thumbnail_source): + self.log.debug(( + "Thumbnail source is set but file was not found {}. Skipping." + ).format(thumbnail_source)) + return + + # Create temp directory for thumbnail + # - this is to avoid "override" of source file + dst_staging = tempfile.mkdtemp(prefix="pyblish_tmp_") + self.log.debug( + "Create temp directory {} for thumbnail".format(dst_staging) + ) + # Store new staging to cleanup paths + context.data["cleanupFullPaths"].append(dst_staging) + + thumbnail_created = False + oiio_supported = is_oiio_supported() + + self.log.info("Thumbnail source: {}".format(thumbnail_source)) + src_basename = os.path.basename(thumbnail_source) + dst_filename = os.path.splitext(src_basename)[0] + "_thumb.jpg" + full_output_path = os.path.join(dst_staging, dst_filename) + + if oiio_supported: + self.log.info("Trying to convert with OIIO") + # If the input can read by OIIO then use OIIO method for + # conversion otherwise use ffmpeg + thumbnail_created = self.create_thumbnail_oiio( + thumbnail_source, full_output_path + ) + + # Try to use FFMPEG if OIIO is not supported or for cases when + # oiiotool isn't available + if not thumbnail_created: + if oiio_supported: + self.log.info(( + "Converting with FFMPEG because input" + " can't be read by OIIO." + )) + + thumbnail_created = self.create_thumbnail_ffmpeg( + thumbnail_source, full_output_path + ) + + # Skip representation and try next one if wasn't created + if thumbnail_created: + return full_output_path + + self.log.warning("Thumbanil has not been created.") + + def _instance_has_thumbnail(self, instance): + if "representations" not in instance.data: + self.log.warning( + "Instance does not have 'representations' key filled" + ) + instance.data["representations"] = [] + + for repre in instance.data["representations"]: + if repre["name"] == "thumbnail": + return True + return False + + def create_thumbnail_oiio(self, src_path, dst_path): + self.log.info("outputting {}".format(dst_path)) + oiio_tool_path = get_oiio_tools_path() + oiio_cmd = [ + oiio_tool_path, + "-a", src_path, + "-o", dst_path + ] + self.log.info("Running: {}".format(" ".join(oiio_cmd))) + try: + run_subprocess(oiio_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using oiiotool", + exc_info=True + ) + return False + + def create_thumbnail_ffmpeg(self, src_path, dst_path): + ffmpeg_path = get_ffmpeg_tool_path("ffmpeg") + + max_int = str(2147483647) + ffmpeg_cmd = [ + ffmpeg_path, + "-y", + "-analyzeduration", max_int, + "-probesize", max_int, + "-i", src_path, + "-vframes", "1", + dst_path + ] + + self.log.info("Running: {}".format(" ".join(ffmpeg_cmd))) + try: + run_subprocess(ffmpeg_cmd, logger=self.log) + return True + except Exception: + self.log.warning( + "Failed to create thubmnail using ffmpeg", + exc_info=True + ) + return False + + def _create_context_thumbnail(self, context): + if "thumbnailPath" in context.data: + return + + thumbnail_source = context.data.get("thumbnailSource") + thumbnail_path = self._create_thumbnail(context, thumbnail_source) + context.data["thumbnailPath"] = thumbnail_path diff --git a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py b/openpype/plugins/publish/extract_trim_video_audio.py similarity index 67% rename from openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py rename to openpype/plugins/publish/extract_trim_video_audio.py index f327895b83..b951136391 100644 --- a/openpype/hosts/standalonepublisher/plugins/publish/extract_trim_video_audio.py +++ b/openpype/plugins/publish/extract_trim_video_audio.py @@ -1,20 +1,22 @@ import os +from pprint import pformat + import pyblish.api -import openpype.api from openpype.lib import ( get_ffmpeg_tool_path, + run_subprocess, ) -from pprint import pformat +from openpype.pipeline import publish -class ExtractTrimVideoAudio(openpype.api.Extractor): +class ExtractTrimVideoAudio(publish.Extractor): """Trim with ffmpeg "mov" and "wav" files.""" # must be before `ExtractThumbnailSP` order = pyblish.api.ExtractorOrder - 0.01 label = "Extract Trim Video/Audio" - hosts = ["standalonepublisher"] + hosts = ["standalonepublisher", "traypublisher"] families = ["clip", "trimming"] # make sure it is enabled only if at least both families are available @@ -39,23 +41,35 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): # Generate mov file. fps = instance.data["fps"] video_file_path = instance.data["editorialSourcePath"] - extensions = instance.data.get("extensions", [".mov"]) + extensions = instance.data.get("extensions", ["mov"]) + output_file_type = instance.data.get("outputFileType") + reviewable = "review" in instance.data["families"] + + frame_start = int(instance.data["frameStart"]) + frame_end = int(instance.data["frameEnd"]) + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + clip_start_h = float(instance.data["clipInH"]) + _dur = instance.data["clipDuration"] + handle_dur = (handle_start + handle_end) + clip_dur_h = float(_dur + handle_dur) + + if output_file_type: + extensions = [output_file_type] for ext in extensions: self.log.info("Processing ext: `{}`".format(ext)) + if not ext.startswith("."): + ext = "." + ext + clip_trimed_path = os.path.join( staging_dir, instance.data["name"] + ext) - # # check video file metadata - # input_data = plib.get_ffprobe_streams(video_file_path)[0] - # self.log.debug(f"__ input_data: `{input_data}`") - - start = float(instance.data["clipInH"]) - dur = float(instance.data["clipDurationH"]) if ext == ".wav": # offset time as ffmpeg is having bug - start += 0.5 + clip_start_h += 0.5 # remove "review" from families instance.data["families"] = [ fml for fml in instance.data["families"] @@ -64,9 +78,9 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): ffmpeg_args = [ ffmpeg_path, - "-ss", str(start / fps), + "-ss", str(clip_start_h / fps), "-i", video_file_path, - "-t", str(dur / fps) + "-t", str(clip_dur_h / fps) ] if ext in [".mov", ".mp4"]: ffmpeg_args.extend([ @@ -86,7 +100,7 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): joined_args = " ".join(ffmpeg_args) self.log.info(f"Processing: {joined_args}") - openpype.api.run_subprocess( + run_subprocess( ffmpeg_args, logger=self.log ) @@ -95,14 +109,15 @@ class ExtractTrimVideoAudio(openpype.api.Extractor): "ext": ext[1:], "files": os.path.basename(clip_trimed_path), "stagingDir": staging_dir, - "frameStart": int(instance.data["frameStart"]), - "frameEnd": int(instance.data["frameEnd"]), - "frameStartFtrack": int(instance.data["frameStartH"]), - "frameEndFtrack": int(instance.data["frameEndH"]), + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start - handle_start, + "frameEndFtrack": frame_end + handle_end, "fps": fps, + "tags": [] } - if ext in [".mov", ".mp4"]: + if ext in [".mov", ".mp4"] and reviewable: repre.update({ "thumbnail": True, "tags": ["review", "ftrackreview", "delete"]}) diff --git a/openpype/plugins/publish/help/validate_containers.xml b/openpype/plugins/publish/help/validate_containers.xml new file mode 100644 index 0000000000..5d18bb4c19 --- /dev/null +++ b/openpype/plugins/publish/help/validate_containers.xml @@ -0,0 +1,23 @@ + + + +Not up-to-date assets + +## Outdated containers found + +Scene contains one or more outdated loaded containers, eg. versions loaded into scene by Loader are not latest. + +### How to repair? + +Use 'Scene Inventory' and update all highlighted old container to latest OR + refresh Publish and switch 'Validate Containers' toggle on 'Options' tab. + + WARNING: Skipping this validator will result in publishing (and probably rendering) old version of loaded assets. + + +### __Detailed Info__ (optional) + +This validates whether you're working with the latest versions of published content loaded into your scene. This protects you from using outdated versions of an asset. + + + \ No newline at end of file diff --git a/openpype/plugins/publish/help/validate_publish_dir.xml b/openpype/plugins/publish/help/validate_publish_dir.xml new file mode 100644 index 0000000000..9f62b264bf --- /dev/null +++ b/openpype/plugins/publish/help/validate_publish_dir.xml @@ -0,0 +1,31 @@ + + + +Source directory not collected + +## Source directory not collected + +Instance is marked for in place publishing. Its 'originalDirname' must be collected. Contact OP developer to modify collector. + + + +### __Detailed Info__ (optional) + +In place publishing uses source directory and file name in resulting path and file name of published item. For this instance + all required metadata weren't filled. This is not recoverable error, unless instance itself is removed. + Collector for this instance must be updated for instance to be published. + + + +Source file not in project dir + +## Source file not in project dir + +Path '{original_dirname}' not in project folder. Please publish from inside of project folder. + +### How to repair? + +Restart publish after you moved source file into project directory. + + + \ No newline at end of file diff --git a/openpype/plugins/publish/help/validate_unique_subsets.xml b/openpype/plugins/publish/help/validate_unique_subsets.xml new file mode 100644 index 0000000000..b18f046f84 --- /dev/null +++ b/openpype/plugins/publish/help/validate_unique_subsets.xml @@ -0,0 +1,17 @@ + + + +Subset not unique + +## Clashing subset names found + +Multiples instances from your scene are set to publish into the same asset > subset. + + Non unique subset names: '{non_unique}' + +### How to repair? + +Remove the offending instances or rename to have a unique name. + + + \ No newline at end of file diff --git a/openpype/plugins/publish/integrate.py b/openpype/plugins/publish/integrate.py new file mode 100644 index 0000000000..b117006871 --- /dev/null +++ b/openpype/plugins/publish/integrate.py @@ -0,0 +1,1021 @@ +import os +import logging +import sys +import copy +import clique +import six + +from bson.objectid import ObjectId +import pyblish.api + +from openpype.client.operations import ( + OperationsSession, + new_subset_document, + new_version_doc, + new_representation_doc, + prepare_subset_update_data, + prepare_version_update_data, + prepare_representation_update_data, +) + +from openpype.client import ( + get_representations, + get_subset_by_name, + get_version_by_name, +) +from openpype.lib import source_hash +from openpype.lib.file_transaction import FileTransaction +from openpype.pipeline.publish import ( + KnownPublishError, + get_publish_template_name, +) + +log = logging.getLogger(__name__) + + +def get_instance_families(instance): + """Get all families of the instance""" + # todo: move this to lib? + family = instance.data.get("family") + families = [] + if family: + families.append(family) + + for _family in (instance.data.get("families") or []): + if _family not in families: + families.append(_family) + + return families + + +def get_frame_padded(frame, padding): + """Return frame number as string with `padding` amount of padded zeros""" + return "{frame:0{padding}d}".format(padding=padding, frame=frame) + + +class IntegrateAsset(pyblish.api.InstancePlugin): + """Register publish in the database and transfer files to destinations. + + Steps: + 1) Register the subset and version + 2) Transfer the representation files to the destination + 3) Register the representation + + Requires: + instance.data['representations'] - must be a list and each member + must be a dictionary with following data: + 'files': list of filenames for sequence, string for single file. + Only the filename is allowed, without the folder path. + 'stagingDir': "path/to/folder/with/files" + 'name': representation name (usually the same as extension) + 'ext': file extension + optional data + "frameStart" + "frameEnd" + 'fps' + "data": additional metadata for each representation. + """ + + label = "Integrate Asset" + order = pyblish.api.IntegratorOrder + families = ["workfile", + "pointcache", + "proxyAbc", + "camera", + "animation", + "model", + "mayaAscii", + "mayaScene", + "setdress", + "layout", + "ass", + "vdbcache", + "scene", + "vrayproxy", + "vrayscene_layer", + "render", + "prerender", + "imagesequence", + "review", + "rendersetup", + "rig", + "plate", + "look", + "audio", + "yetiRig", + "yeticache", + "nukenodes", + "gizmo", + "source", + "matchmove", + "image", + "assembly", + "fbx", + "gltf", + "textures", + "action", + "harmony.template", + "harmony.palette", + "editorial", + "background", + "camerarig", + "redshiftproxy", + "effect", + "xgen", + "hda", + "usd", + "staticMesh", + "skeletalMesh", + "mvLook", + "mvUsd", + "mvUsdComposition", + "mvUsdOverride", + "simpleUnrealTexture", + "online", + "uasset" + ] + + default_template_name = "publish" + + # Representation context keys that should always be written to + # the database even if not used by the destination template + db_representation_context_keys = [ + "project", "asset", "task", "subset", "version", "representation", + "family", "hierarchy", "username", "user", "output" + ] + skip_host_families = [] + + def process(self, instance): + if self._temp_skip_instance_by_settings(instance): + return + + # Mark instance as processed for legacy integrator + instance.data["processedWithNewIntegrator"] = True + + # Instance should be integrated on a farm + if instance.data.get("farm"): + self.log.info( + "Instance is marked to be processed on farm. Skipping") + return + + filtered_repres = self.filter_representations(instance) + # Skip instance if there are not representations to integrate + # all representations should not be integrated + if not filtered_repres: + self.log.warning(( + "Skipping, there are no representations" + " to integrate for instance {}" + ).format(instance.data["family"])) + return + + file_transactions = FileTransaction(log=self.log) + try: + self.register(instance, file_transactions, filtered_repres) + except Exception: + # clean destination + # todo: preferably we'd also rollback *any* changes to the database + file_transactions.rollback() + self.log.critical("Error when registering", exc_info=True) + six.reraise(*sys.exc_info()) + + # Finalizing can't rollback safely so no use for moving it to + # the try, except. + file_transactions.finalize() + + def _temp_skip_instance_by_settings(self, instance): + """Decide if instance will be processed with new or legacy integrator. + + This is temporary solution until we test all usecases with new (this) + integrator plugin. + """ + + host_name = instance.context.data["hostName"] + instance_family = instance.data["family"] + instance_families = set(instance.data.get("families") or []) + + skip = False + for item in self.skip_host_families: + if host_name not in item["host"]: + continue + + families = set(item["families"]) + if instance_family in families: + skip = True + break + + for family in instance_families: + if family in families: + skip = True + break + + if skip: + break + + if skip: + self.log.debug("Instance is marked to be skipped by settings.") + return skip + + def filter_representations(self, instance): + # Prepare repsentations that should be integrated + repres = instance.data.get("representations") + # Raise error if instance don't have any representations + if not repres: + raise KnownPublishError( + "Instance {} has no representations to integrate".format( + instance.data["family"] + ) + ) + + # Validate type of stored representations + if not isinstance(repres, (list, tuple)): + raise TypeError( + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) + ) + + # Filter representations + filtered_repres = [] + for repre in repres: + if "delete" in repre.get("tags", []): + continue + filtered_repres.append(repre) + + return filtered_repres + + def register(self, instance, file_transactions, filtered_repres): + project_name = instance.context.data["projectName"] + + instance_stagingdir = instance.data.get("stagingDir") + if not instance_stagingdir: + self.log.info(( + "{0} is missing reference to staging directory." + " Will try to get it from representation." + ).format(instance)) + + else: + self.log.debug( + "Establishing staging directory " + "@ {0}".format(instance_stagingdir) + ) + + template_name = self.get_template_name(instance) + + op_session = OperationsSession() + subset = self.prepare_subset( + instance, op_session, project_name + ) + version = self.prepare_version( + instance, op_session, subset, project_name + ) + instance.data["versionEntity"] = version + + anatomy = instance.context.data["anatomy"] + + # Get existing representations (if any) + existing_repres_by_name = { + repre_doc["name"].lower(): repre_doc + for repre_doc in get_representations( + project_name, + version_ids=[version["_id"]], + fields=["_id", "name"] + ) + } + + # Prepare all representations + prepared_representations = [] + for repre in filtered_repres: + # todo: reduce/simplify what is returned from this function + prepared = self.prepare_representation( + repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance) + + for src, dst in prepared["transfers"]: + # todo: add support for hardlink transfers + file_transactions.add(src, dst) + + prepared_representations.append(prepared) + + # Each instance can also have pre-defined transfers not explicitly + # part of a representation - like texture resources used by a + # .ma representation. Those destination paths are pre-defined, etc. + # todo: should we move or simplify this logic? + resource_destinations = set() + + file_copy_modes = [ + ("transfers", FileTransaction.MODE_COPY), + ("hardlinks", FileTransaction.MODE_HARDLINK) + ] + for files_type, copy_mode in file_copy_modes: + for src, dst in instance.data.get(files_type, []): + self._validate_path_in_project_roots(anatomy, dst) + + file_transactions.add(src, dst, mode=copy_mode) + resource_destinations.add(os.path.abspath(dst)) + + # Bulk write to the database + # We write the subset and version to the database before the File + # Transaction to reduce the chances of another publish trying to + # publish to the same version number since that chance can greatly + # increase if the file transaction takes a long time. + op_session.commit() + + self.log.info("Subset {subset[name]} and Version {version[name]} " + "written to database..".format(subset=subset, + version=version)) + + # Process all file transfers of all integrations now + self.log.debug("Integrating source files to destination ...") + file_transactions.process() + self.log.debug( + "Backed up existing files: {}".format(file_transactions.backups)) + self.log.debug( + "Transferred files: {}".format(file_transactions.transferred)) + self.log.debug("Retrieving Representation Site Sync information ...") + + # Get the accessible sites for Site Sync + modules_by_name = instance.context.data["openPypeModules"] + sync_server_module = modules_by_name["sync_server"] + sites = sync_server_module.compute_resource_sync_sites( + project_name=instance.data["projectEntity"]["name"] + ) + self.log.debug("Sync Server Sites: {}".format(sites)) + + # Compute the resource file infos once (files belonging to the + # version instance instead of an individual representation) so + # we can re-use those file infos per representation + resource_file_infos = self.get_files_info(resource_destinations, + sites=sites, + anatomy=anatomy) + + # Finalize the representations now the published files are integrated + # Get 'files' info for representations and its attached resources + new_repre_names_low = set() + for prepared in prepared_representations: + repre_doc = prepared["representation"] + repre_update_data = prepared["repre_doc_update_data"] + transfers = prepared["transfers"] + destinations = [dst for src, dst in transfers] + repre_doc["files"] = self.get_files_info( + destinations, sites=sites, anatomy=anatomy + ) + + # Add the version resource file infos to each representation + repre_doc["files"] += resource_file_infos + + # Set up representation for writing to the database. Since + # we *might* be overwriting an existing entry if the version + # already existed we'll use ReplaceOnce with `upsert=True` + if repre_update_data is None: + op_session.create_entity( + project_name, repre_doc["type"], repre_doc + ) + else: + op_session.update_entity( + project_name, + repre_doc["type"], + repre_doc["_id"], + repre_update_data + ) + + new_repre_names_low.add(repre_doc["name"].lower()) + + # Delete any existing representations that didn't get any new data + # if the instance is not set to append mode + if not instance.data.get("append", False): + for name, existing_repres in existing_repres_by_name.items(): + if name not in new_repre_names_low: + # We add the exact representation name because `name` is + # lowercase for name matching only and not in the database + op_session.delete_entity( + project_name, "representation", existing_repres["_id"] + ) + + self.log.debug("{}".format(op_session.to_data())) + op_session.commit() + + # Backwards compatibility + # todo: can we avoid the need to store this? + instance.data["published_representations"] = { + p["representation"]["_id"]: p for p in prepared_representations + } + + self.log.info("Registered {} representations" + "".format(len(prepared_representations))) + + def prepare_subset(self, instance, op_session, project_name): + asset_doc = instance.data["assetEntity"] + subset_name = instance.data["subset"] + family = instance.data["family"] + self.log.debug("Subset: {}".format(subset_name)) + + # Get existing subset if it exists + existing_subset_doc = get_subset_by_name( + project_name, subset_name, asset_doc["_id"] + ) + + # Define subset data + data = { + "families": get_instance_families(instance) + } + + subset_group = instance.data.get("subsetGroup") + if subset_group: + data["subsetGroup"] = subset_group + elif existing_subset_doc: + # Preserve previous subset group if new version does not set it + if "subsetGroup" in existing_subset_doc.get("data", {}): + subset_group = existing_subset_doc["data"]["subsetGroup"] + data["subsetGroup"] = subset_group + + subset_id = None + if existing_subset_doc: + subset_id = existing_subset_doc["_id"] + subset_doc = new_subset_document( + subset_name, family, asset_doc["_id"], data, subset_id + ) + + if existing_subset_doc is None: + # Create a new subset + self.log.info("Subset '%s' not found, creating ..." % subset_name) + op_session.create_entity( + project_name, subset_doc["type"], subset_doc + ) + + else: + # Update existing subset data with new data and set in database. + # We also change the found subset in-place so we don't need to + # re-query the subset afterwards + subset_doc["data"].update(data) + update_data = prepare_subset_update_data( + existing_subset_doc, subset_doc + ) + op_session.update_entity( + project_name, + subset_doc["type"], + subset_doc["_id"], + update_data + ) + + self.log.info("Prepared subset: {}".format(subset_name)) + return subset_doc + + def prepare_version(self, instance, op_session, subset_doc, project_name): + version_number = instance.data["version"] + + existing_version = get_version_by_name( + project_name, + version_number, + subset_doc["_id"], + fields=["_id"] + ) + version_id = None + if existing_version: + version_id = existing_version["_id"] + + version_data = self.create_version_data(instance) + version_doc = new_version_doc( + version_number, + subset_doc["_id"], + version_data, + version_id + ) + + if existing_version: + self.log.debug("Updating existing version ...") + update_data = prepare_version_update_data( + existing_version, version_doc + ) + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + update_data + ) + else: + self.log.debug("Creating new version ...") + op_session.create_entity( + project_name, version_doc["type"], version_doc + ) + + self.log.info("Prepared version: v{0:03d}".format(version_doc["name"])) + + return version_doc + + def _validate_repre_files(self, files, is_sequence_representation): + """Validate representation files before transfer preparation. + + Check if files contain only filenames instead of full paths and check + if sequence don't contain more than one sequence or has remainders. + + Args: + files (Union[str, List[str]]): Files from representation. + is_sequence_representation (bool): Files are for sequence. + + Raises: + KnownPublishError: If validations don't pass. + """ + + if not files: + return + + if not is_sequence_representation: + files = [files] + + if any(os.path.isabs(fname) for fname in files): + raise KnownPublishError("Given file names contain full paths") + + if not is_sequence_representation: + return + + src_collections, remainders = clique.assemble(files) + if len(files) < 2 or len(src_collections) != 1 or remainders: + raise KnownPublishError(( + "Files of representation does not contain proper" + " sequence files.\nCollected collections: {}" + "\nCollected remainders: {}" + ).format( + ", ".join([str(col) for col in src_collections]), + ", ".join([str(rem) for rem in remainders]) + )) + + def prepare_representation(self, repre, + template_name, + existing_repres_by_name, + version, + instance_stagingdir, + instance): + + # pre-flight validations + if repre["ext"].startswith("."): + raise KnownPublishError(( + "Extension must not start with a dot '.': {}" + ).format(repre["ext"])) + + if repre.get("transfers"): + raise KnownPublishError(( + "Representation is not allowed to have transfers" + "data before integration. They are computed in " + "the integrator. Got: {}" + ).format(repre["transfers"])) + + # create template data for Anatomy + template_data = copy.deepcopy(instance.data["anatomyData"]) + + # required representation keys + files = repre["files"] + template_data["representation"] = repre["name"] + template_data["ext"] = repre["ext"] + + # allow overwriting existing version + template_data["version"] = version["name"] + + # add template data for colorspaceData + if repre.get("colorspaceData"): + colorspace = repre["colorspaceData"]["colorspace"] + # replace spaces with underscores + # pipeline.colorspace.parse_colorspace_from_filepath + # is checking it with underscores too + colorspace = colorspace.replace(" ", "_") + template_data["colorspace"] = colorspace + + stagingdir = repre.get("stagingDir") + if not stagingdir: + # Fall back to instance staging dir if not explicitly + # set for representation in the instance + self.log.debug(( + "Representation uses instance staging dir: {}" + ).format(instance_stagingdir)) + stagingdir = instance_stagingdir + + if not stagingdir: + raise KnownPublishError( + "No staging directory set for representation: {}".format(repre) + ) + + # optionals + # retrieve additional anatomy data from representation if exists + for key, anatomy_key in { + # Representation Key: Anatomy data key + "resolutionWidth": "resolution_width", + "resolutionHeight": "resolution_height", + "fps": "fps", + "outputName": "output", + "originalBasename": "originalBasename" + }.items(): + # Allow to take value from representation + # if not found also consider instance.data + value = repre.get(key) + if value is None: + value = instance.data.get(key) + + if value is not None: + template_data[anatomy_key] = value + + self.log.debug("Anatomy template name: {}".format(template_name)) + anatomy = instance.context.data["anatomy"] + publish_template_category = anatomy.templates[template_name] + template = os.path.normpath(publish_template_category["path"]) + + is_udim = bool(repre.get("udim")) + + # handle publish in place + if "{originalDirname}" in template: + # store as originalDirname only original value without project root + # if instance collected originalDirname is present, it should be + # used for all represe + # from temp to final + original_directory = ( + instance.data.get("originalDirname") or instance_stagingdir) + + _rootless = self.get_rootless_path(anatomy, original_directory) + if _rootless == original_directory: + raise KnownPublishError(( + "Destination path '{}' ".format(original_directory) + + "must be in project dir" + )) + relative_path_start = _rootless.rfind('}') + 2 + without_root = _rootless[relative_path_start:] + template_data["originalDirname"] = without_root + + is_sequence_representation = isinstance(files, (list, tuple)) + self._validate_repre_files(files, is_sequence_representation) + + # Output variables of conditions below: + # - transfers (List[Tuple[str, str]]): src -> dst filepaths to copy + # - repre_context (Dict[str, Any]): context data used to fill template + # - template_data (Dict[str, Any]): source data used to fill template + # - to add required data to 'repre_context' not used for + # formatting + # - anatomy_filled (Dict[str, Any]): filled anatomy of last file + # - to fill 'publishDir' on instance.data -> not ideal + + # Treat template with 'orignalBasename' in special way + if "{originalBasename}" in template: + # Remove 'frame' from template data + template_data.pop("frame", None) + + # Find out first frame string value + first_index_padded = None + if not is_udim and is_sequence_representation: + col = clique.assemble(files)[0][0] + sorted_frames = tuple(sorted(col.indexes)) + # First frame used for end value + first_frame = sorted_frames[0] + # Get last frame for padding + last_frame = sorted_frames[-1] + # Use padding from collection of length of last frame as string + padding = max(col.padding, len(str(last_frame))) + first_index_padded = get_frame_padded( + frame=first_frame, + padding=padding + ) + + # Convert files to list for single file as remaining part is only + # transfers creation (iteration over files) + if not is_sequence_representation: + files = [files] + + repre_context = None + transfers = [] + for src_file_name in files: + template_data["originalBasename"], _ = os.path.splitext( + src_file_name) + + anatomy_filled = anatomy.format(template_data) + dst = anatomy_filled[template_name]["path"] + src = os.path.join(stagingdir, src_file_name) + transfers.append((src, dst)) + if repre_context is None: + repre_context = dst.used_values + + if not is_udim and first_index_padded is not None: + repre_context["frame"] = first_index_padded + + elif is_sequence_representation: + # Collection of files (sequence) + src_collections, remainders = clique.assemble(files) + + src_collection = src_collections[0] + destination_indexes = list(src_collection.indexes) + # Use last frame for minimum padding + # - that should cover both 'udim' and 'frame' minimum padding + destination_padding = len(str(destination_indexes[-1])) + if not is_udim: + # Change padding for frames if template has defined higher + # padding. + template_padding = int( + publish_template_category["frame_padding"] + ) + if template_padding > destination_padding: + destination_padding = template_padding + + # If the representation has `frameStart` set it renumbers the + # frame indices of the published collection. It will start from + # that `frameStart` index instead. Thus if that frame start + # differs from the collection we want to shift the destination + # frame indices from the source collection. + # In case source are published in place we need to + # skip renumbering + repre_frame_start = repre.get("frameStart") + if repre_frame_start is not None: + index_frame_start = int(repre_frame_start) + # Shift destination sequence to the start frame + destination_indexes = [ + index_frame_start + idx + for idx in range(len(destination_indexes)) + ] + + # To construct the destination template with anatomy we require + # a Frame or UDIM tile set for the template data. We use the first + # index of the destination for that because that could've shifted + # from the source indexes, etc. + first_index_padded = get_frame_padded( + frame=destination_indexes[0], + padding=destination_padding + ) + + # Construct destination collection from template + repre_context = None + dst_filepaths = [] + for index in destination_indexes: + if is_udim: + template_data["udim"] = index + else: + template_data["frame"] = index + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + dst_filepaths.append(template_filled) + if repre_context is None: + self.log.debug( + "Template filled: {}".format(str(template_filled)) + ) + repre_context = template_filled.used_values + + # Make sure context contains frame + # NOTE: Frame would not be available only if template does not + # contain '{frame}' in template -> Do we want support it? + if not is_udim: + repre_context["frame"] = first_index_padded + + # Update the destination indexes and padding + dst_collection = clique.assemble(dst_filepaths)[0][0] + dst_collection.padding = destination_padding + if len(src_collection.indexes) != len(dst_collection.indexes): + raise KnownPublishError(( + "This is a bug. Source sequence frames length" + " does not match integration frames length" + )) + + # Multiple file transfers + transfers = [] + for src_file_name, dst in zip(src_collection, dst_collection): + src = os.path.join(stagingdir, src_file_name) + transfers.append((src, dst)) + + else: + # Single file + # Manage anatomy template data + template_data.pop("frame", None) + if is_udim: + template_data["udim"] = repre["udim"][0] + # Construct destination filepath from template + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + repre_context = template_filled.used_values + dst = os.path.normpath(template_filled) + + # Single file transfer + src = os.path.join(stagingdir, files) + transfers = [(src, dst)] + + # todo: Are we sure the assumption each representation + # ends up in the same folder is valid? + if not instance.data.get("publishDir"): + instance.data["publishDir"] = ( + anatomy_filled + [template_name] + ["folder"] + ) + + for key in self.db_representation_context_keys: + # Also add these values to the context even if not used by the + # destination template + value = template_data.get(key) + if value is not None: + repre_context[key] = value + + # Explicitly store the full list even though template data might + # have a different value because it uses just a single udim tile + if repre.get("udim"): + repre_context["udim"] = repre.get("udim") # store list + + # Use previous representation's id if there is a name match + existing = existing_repres_by_name.get(repre["name"].lower()) + repre_id = None + if existing: + repre_id = existing["_id"] + + # Store first transferred destination as published path data + # - used primarily for reviews that are integrated to custom modules + # TODO we should probably store all integrated files + # related to the representation? + published_path = transfers[0][1] + repre["published_path"] = published_path + + # todo: `repre` is not the actual `representation` entity + # we should simplify/clarify difference between data above + # and the actual representation entity for the database + data = repre.get("data", {}) + data.update({"path": published_path, "template": template}) + + # add colorspace data if any exists on representation + if repre.get("colorspaceData"): + data["colorspaceData"] = repre["colorspaceData"] + + repre_doc = new_representation_doc( + repre["name"], version["_id"], repre_context, data, repre_id + ) + update_data = None + if repre_id is not None: + update_data = prepare_representation_update_data( + existing, repre_doc + ) + + return { + "representation": repre_doc, + "repre_doc_update_data": update_data, + "anatomy_data": template_data, + "transfers": transfers, + # todo: avoid the need for 'published_files' used by Integrate Hero + # backwards compatibility + "published_files": [transfer[1] for transfer in transfers] + } + + def create_version_data(self, instance): + """Create the data dictionary for the version + + Args: + instance: the current instance being published + + Returns: + dict: the required information for version["data"] + """ + + context = instance.context + + # create relative source path for DB + if "source" in instance.data: + source = instance.data["source"] + else: + source = context.data["currentFile"] + anatomy = instance.context.data["anatomy"] + source = self.get_rootless_path(anatomy, source) + self.log.debug("Source: {}".format(source)) + + version_data = { + "families": get_instance_families(instance), + "time": context.data["time"], + "author": context.data["user"], + "source": source, + "comment": instance.data["comment"], + "machine": context.data.get("machine"), + "fps": instance.data.get("fps", context.data.get("fps")) + } + + # todo: preferably we wouldn't need this "if dict" etc. logic and + # instead be able to rely what the input value is if it's set. + intent_value = context.data.get("intent") + if intent_value and isinstance(intent_value, dict): + intent_value = intent_value.get("value") + + if intent_value: + version_data["intent"] = intent_value + + # Include optional data if present in + optionals = [ + "frameStart", "frameEnd", "step", "handles", + "handleEnd", "handleStart", "sourceHashes" + ] + for key in optionals: + if key in instance.data: + version_data[key] = instance.data[key] + + # Include instance.data[versionData] directly + version_data_instance = instance.data.get("versionData") + if version_data_instance: + version_data.update(version_data_instance) + + return version_data + + def get_template_name(self, instance): + """Return anatomy template name to use for integration""" + + # Anatomy data is pre-filled by Collectors + context = instance.context + project_name = context.data["projectName"] + + # Task can be optional in anatomy data + host_name = context.data["hostName"] + anatomy_data = instance.data["anatomyData"] + family = anatomy_data["family"] + task_info = anatomy_data.get("task") or {} + + return get_publish_template_name( + project_name, + host_name, + family, + task_name=task_info.get("name"), + task_type=task_info.get("type"), + project_settings=context.data["project_settings"], + logger=self.log + ) + + def get_rootless_path(self, anatomy, path): + """Returns, if possible, path without absolute portion from root + (eg. 'c:\' or '/opt/..') + + This information is platform dependent and shouldn't be captured. + Example: + 'c:/projects/MyProject1/Assets/publish...' > + '{root}/MyProject1/Assets...' + + Args: + anatomy: anatomy part from instance + path: path (absolute) + Returns: + path: modified path if possible, or unmodified path + + warning logged + """ + + success, rootless_path = anatomy.find_root_template_from_path(path) + if success: + path = rootless_path + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(path)) + return path + + def get_files_info(self, destinations, sites, anatomy): + """Prepare 'files' info portion for representations. + + Arguments: + destinations (list): List of transferred file destinations + sites (list): array of published locations + anatomy: anatomy part from instance + Returns: + output_resources: array of dictionaries to be added to 'files' key + in representation + """ + + file_infos = [] + for file_path in destinations: + file_info = self.prepare_file_info(file_path, anatomy, sites=sites) + file_infos.append(file_info) + return file_infos + + def prepare_file_info(self, path, anatomy, sites): + """ Prepare information for one file (asset or resource) + + Arguments: + path: destination url of published file + anatomy: anatomy part from instance + sites: array of published locations, + [ {'name':'studio', 'created_dt':date} by default + keys expected ['studio', 'site1', 'gdrive1'] + + Returns: + dict: file info dictionary + """ + + return { + "_id": ObjectId(), + "path": self.get_rootless_path(anatomy, path), + "size": os.path.getsize(path), + "hash": source_hash(path), + "sites": sites + } + + def _validate_path_in_project_roots(self, anatomy, file_path): + """Checks if 'file_path' starts with any of the roots. + + Used to check that published path belongs to project, eg. we are not + trying to publish to local only folder. + Args: + anatomy (Anatomy) + file_path (str) + Raises + (KnownPublishError) + """ + path = self.get_rootless_path(anatomy, file_path) + if not path: + raise KnownPublishError(( + "Destination path '{}' ".format(file_path) + + "must be in project dir" + )) diff --git a/openpype/plugins/publish/integrate_hero_version.py b/openpype/plugins/publish/integrate_hero_version.py index 466606d08b..e796f7b376 100644 --- a/openpype/plugins/publish/integrate_hero_version.py +++ b/openpype/plugins/publish/integrate_hero_version.py @@ -4,11 +4,25 @@ import clique import errno import shutil -from bson.objectid import ObjectId -from pymongo import InsertOne, ReplaceOne import pyblish.api -from avalon import api, io, schema + +from openpype.client import ( + get_version_by_id, + get_hero_version_by_subset_id, + get_archived_representations, + get_representations, +) +from openpype.client.operations import ( + OperationsSession, + new_hero_version_doc, + prepare_hero_version_update_data, + prepare_representation_update_data, +) from openpype.lib import create_hard_link +from openpype.pipeline import ( + schema +) +from openpype.pipeline.publish import get_publish_template_name class IntegrateHeroVersion(pyblish.api.InstancePlugin): @@ -17,7 +31,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder + 0.1 optional = True + active = True + # Families are modified using settings families = [ "model", "rig", @@ -31,13 +47,15 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): ignored_representation_names = [] db_representation_context_keys = [ "project", "asset", "task", "subset", "representation", - "family", "hierarchy", "task", "username" + "family", "hierarchy", "task", "username", "user" ] - # TODO add family filtering # QUESTION/TODO this process should happen on server if crashed due to # permissions error on files (files were used or user didn't have perms) # *but all other plugins must be sucessfully completed + template_name_profiles = [] + _default_template_name = "hero" + def process(self, instance): self.log.debug( "--- Integration of Hero version for subset `{}` begins.".format( @@ -47,31 +65,44 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): published_repres = instance.data.get("published_representations") if not published_repres: self.log.debug( - "*** There are not published representations on the instance." + "*** There are no published representations on the instance." ) return - project_name = api.Session["AVALON_PROJECT"] - - # TODO raise error if Hero not set? anatomy = instance.context.data["anatomy"] - if "hero" not in anatomy.templates: - self.log.warning("!!! Anatomy does not have set `hero` key!") - return + project_name = anatomy.project_name - if "path" not in anatomy.templates["hero"]: + template_key = self._get_template_key(project_name, instance) + + if template_key not in anatomy.templates: self.log.warning(( - "!!! There is not set `path` template in `hero` anatomy" - " for project \"{}\"." - ).format(project_name)) + "!!! Anatomy of project \"{}\" does not have set" + " \"{}\" template key!" + ).format(project_name, template_key)) return - hero_template = anatomy.templates["hero"]["path"] + if "path" not in anatomy.templates[template_key]: + self.log.warning(( + "!!! There is not set \"path\" template in \"{}\" anatomy" + " for project \"{}\"." + ).format(template_key, project_name)) + return + + hero_template = anatomy.templates[template_key]["path"] self.log.debug("`hero` template check was successful. `{}`".format( hero_template )) - hero_publish_dir = self.get_publish_dir(instance) + self.integrate_instance( + instance, project_name, template_key, hero_template + ) + + def integrate_instance( + self, instance, project_name, template_key, hero_template + ): + anatomy = instance.context.data["anatomy"] + published_repres = instance.data["published_representations"] + hero_publish_dir = self.get_publish_dir(instance, template_key) src_version_entity = instance.data.get("versionEntity") filtered_repre_ids = [] @@ -99,8 +130,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): "Published version entity was not sent in representation data." " Querying entity from database." )) - src_version_entity = ( - self.version_from_representations(published_repres) + src_version_entity = self.version_from_representations( + project_name, published_repres ) if not src_version_entity: @@ -151,43 +182,40 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): other_file_paths_mapping.append((file_path, dst_filepath)) # Current version - old_version, old_repres = ( - self.current_hero_ents(src_version_entity) + old_version, old_repres = self.current_hero_ents( + project_name, src_version_entity ) old_repres_by_name = { repre["name"].lower(): repre for repre in old_repres } + op_session = OperationsSession() + + entity_id = None if old_version: - new_version_id = old_version["_id"] - else: - new_version_id = ObjectId() - - new_hero_version = { - "_id": new_version_id, - "version_id": src_version_entity["_id"], - "parent": src_version_entity["parent"], - "type": "hero_version", - "schema": "openpype:hero_version-1.0" - } - schema.validate(new_hero_version) - - # Don't make changes in database until everything is O.K. - bulk_writes = [] + entity_id = old_version["_id"] + new_hero_version = new_hero_version_doc( + src_version_entity["_id"], + src_version_entity["parent"], + entity_id=entity_id + ) if old_version: self.log.debug("Replacing old hero version.") - bulk_writes.append( - ReplaceOne( - {"_id": new_hero_version["_id"]}, - new_hero_version - ) + update_data = prepare_hero_version_update_data( + old_version, new_hero_version + ) + op_session.update_entity( + project_name, + new_hero_version["type"], + old_version["_id"], + update_data ) else: self.log.debug("Creating first hero version.") - bulk_writes.append( - InsertOne(new_hero_version) + op_session.create_entity( + project_name, new_hero_version["type"], new_hero_version ) # Separate old representations into `to replace` and `to delete` @@ -204,11 +232,11 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): if old_repres_by_name: old_repres_to_delete = old_repres_by_name - archived_repres = list(io.find({ + archived_repres = list(get_archived_representations( + project_name, # Check what is type of archived representation - "type": "archived_repsentation", - "parent": new_version_id - })) + version_ids=[new_hero_version["_id"]] + )) archived_repres_by_name = {} for repre in archived_repres: repre_name_low = repre["name"].lower() @@ -271,12 +299,12 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): continue # Prepare anatomy data - anatomy_data = repre_info["anatomy_data"] + anatomy_data = copy.deepcopy(repre_info["anatomy_data"]) anatomy_data.pop("version", None) # Get filled path to repre context anatomy_filled = anatomy.format(anatomy_data) - template_filled = anatomy_filled["hero"]["path"] + template_filled = anatomy_filled[template_key]["path"] repre_data = { "path": str(template_filled), @@ -284,13 +312,9 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): } repre_context = template_filled.used_values for key in self.db_representation_context_keys: - if ( - key in repre_context or - key not in anatomy_data - ): - continue - - repre_context[key] = anatomy_data[key] + value = anatomy_data.get(key) + if value is not None: + repre_context[key] = value # Prepare new repre repre = copy.deepcopy(repre_info["representation"]) @@ -308,11 +332,11 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): collections, remainders = clique.assemble(published_files) if remainders or not collections or len(collections) > 1: raise Exception(( - "Integrity error. Files of published representation " - "is combination of frame collections and single files." - "Collections: `{}` Single files: `{}`" - ).format(str(collections), - str(remainders))) + "Integrity error. Files of published" + " representation is combination of frame" + " collections and single files. Collections:" + " `{}` Single files: `{}`" + ).format(str(collections), str(remainders))) src_col = collections[0] @@ -320,13 +344,10 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): frame_splitter = "_-_FRAME_SPLIT_-_" anatomy_data["frame"] = frame_splitter _anatomy_filled = anatomy.format(anatomy_data) - _template_filled = _anatomy_filled["hero"]["path"] + _template_filled = _anatomy_filled[template_key]["path"] head, tail = _template_filled.split(frame_splitter) padding = int( - anatomy.templates["render"].get( - "frame_padding", - anatomy.templates["render"].get("padding") - ) + anatomy.templates[template_key]["frame_padding"] ) dst_col = clique.Collection( @@ -361,12 +382,34 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): # Replace current representation if repre_name_low in old_repres_to_replace: old_repre = old_repres_to_replace.pop(repre_name_low) + repre["_id"] = old_repre["_id"] - bulk_writes.append( - ReplaceOne( - {"_id": old_repre["_id"]}, - repre - ) + update_data = prepare_representation_update_data( + old_repre, repre) + + # Keep previously synchronized sites up-to-date + # by comparing old and new sites and adding old sites + # if missing in new ones + old_repre_files_sites = [ + f.get("sites", []) for f in old_repre.get("files", []) + ] + for i, file in enumerate(repre.get("files", [])): + repre_sites_names = { + s["name"] for s in file.get("sites", []) + } + for site in old_repre_files_sites[i]: + if site["name"] not in repre_sites_names: + # Pop the date to tag for sync + site.pop("created_dt", None) + file["sites"].append(site) + + update_data["files"][i] = file + + op_session.update_entity( + project_name, + old_repre["type"], + old_repre["_id"], + update_data ) # Unarchive representation @@ -374,21 +417,21 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): archived_repre = archived_repres_by_name.pop( repre_name_low ) - old_id = archived_repre["old_id"] - repre["_id"] = old_id - bulk_writes.append( - ReplaceOne( - {"old_id": old_id}, - repre - ) + repre["_id"] = archived_repre["old_id"] + update_data = prepare_representation_update_data( + archived_repre, repre) + op_session.update_entity( + project_name, + old_repre["type"], + archived_repre["_id"], + update_data ) # Create representation else: - repre["_id"] = ObjectId() - bulk_writes.append( - InsertOne(repre) - ) + repre.pop("_id", None) + op_session.create_entity(project_name, "representation", + repre) self.path_checks = [] @@ -409,28 +452,22 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): archived_repre = archived_repres_by_name.pop( repre_name_low ) - repre["old_id"] = repre["_id"] - repre["_id"] = archived_repre["_id"] - repre["type"] = archived_repre["type"] - bulk_writes.append( - ReplaceOne( - {"_id": archived_repre["_id"]}, - repre - ) - ) + changes = {"old_id": repre["_id"], + "_id": archived_repre["_id"], + "type": archived_repre["type"]} + op_session.update_entity(project_name, + archived_repre["type"], + archived_repre["_id"], + changes) else: - repre["old_id"] = repre["_id"] - repre["_id"] = ObjectId() + repre["old_id"] = repre.pop("_id") repre["type"] = "archived_representation" - bulk_writes.append( - InsertOne(repre) - ) + op_session.create_entity(project_name, + "archived_representation", + repre) - if bulk_writes: - io._database[io.Session["AVALON_PROJECT"]].bulk_write( - bulk_writes - ) + op_session.commit() # Remove backuped previous hero if ( @@ -444,6 +481,8 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): backup_hero_publish_dir is not None and os.path.exists(backup_hero_publish_dir) ): + if os.path.exists(hero_publish_dir): + shutil.rmtree(hero_publish_dir) os.rename(backup_hero_publish_dir, hero_publish_dir) self.log.error(( "!!! Creating of hero version failed." @@ -466,13 +505,18 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): files.append(_path) return files - def get_publish_dir(self, instance): + def get_publish_dir(self, instance, template_key): anatomy = instance.context.data["anatomy"] template_data = copy.deepcopy(instance.data["anatomyData"]) - if "folder" in anatomy.templates["hero"]: + if "originalBasename" in instance.data: + template_data.update({ + "originalBasename": instance.data.get("originalBasename") + }) + + if "folder" in anatomy.templates[template_key]: anatomy_filled = anatomy.format(template_data) - publish_folder = anatomy_filled["hero"]["folder"] + publish_folder = anatomy_filled[template_key]["folder"] else: # This is for cases of Deprecated anatomy without `folder` # TODO remove when all clients have solved this issue @@ -483,13 +527,12 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): anatomy_filled = anatomy.format(template_data) # solve deprecated situation when `folder` key is not underneath # `publish` anatomy - project_name = api.Session["AVALON_PROJECT"] self.log.warning(( "Deprecation warning: Anatomy does not have set `folder`" " key underneath `publish` (in global of for project `{}`)." - ).format(project_name)) + ).format(anatomy.project_name)) - file_path = anatomy_filled["hero"]["path"] + file_path = anatomy_filled[template_key]["path"] # Directory publish_folder = os.path.dirname(file_path) @@ -499,6 +542,32 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): return publish_folder + def _get_template_key(self, project_name, instance): + anatomy_data = instance.data["anatomyData"] + task_info = anatomy_data.get("task") or {} + host_name = instance.context.data["hostName"] + + # TODO raise error if Hero not set? + family = self.main_family_from_instance(instance) + + return get_publish_template_name( + project_name, + host_name, + family, + task_info.get("name"), + task_info.get("type"), + project_settings=instance.context.data["project_settings"], + hero=True, + logger=self.log + ) + + def main_family_from_instance(self, instance): + """Returns main family of entered instance.""" + family = instance.data.get("family") + if not family: + family = instance.data["families"][0] + return family + def copy_file(self, src_path, dst_path): # TODO check drives if are the same to check if cas hardlink dirname = os.path.dirname(dst_path) @@ -524,31 +593,32 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): return except OSError as exc: - # re-raise exception if different than cross drive path - if exc.errno != errno.EXDEV: + # re-raise exception if different than + # EXDEV - cross drive path + # EINVAL - wrong format, must be NTFS + self.log.debug("Hardlink failed with errno:'{}'".format(exc.errno)) + if exc.errno not in [errno.EXDEV, errno.EINVAL]: raise shutil.copy(src_path, dst_path) - def version_from_representations(self, repres): + def version_from_representations(self, project_name, repres): for repre in repres: - version = io.find_one({"_id": repre["parent"]}) + version = get_version_by_id(project_name, repre["parent"]) if version: return version - def current_hero_ents(self, version): - hero_version = io.find_one({ - "parent": version["parent"], - "type": "hero_version" - }) + def current_hero_ents(self, project_name, version): + hero_version = get_hero_version_by_subset_id( + project_name, version["parent"] + ) if not hero_version: return (None, []) - hero_repres = list(io.find({ - "parent": hero_version["_id"], - "type": "representation" - })) + hero_repres = list(get_representations( + project_name, version_ids=[hero_version["_id"]] + )) return (hero_version, hero_repres) def _update_path(self, anatomy, path, src_file, dst_file): @@ -564,22 +634,16 @@ class IntegrateHeroVersion(pyblish.api.InstancePlugin): src_file (string) - original file path dst_file (string) - hero file path """ - _, rootless = anatomy.find_root_template_from_path( - dst_file - ) - _, rtls_src = anatomy.find_root_template_from_path( - src_file - ) + _, rootless = anatomy.find_root_template_from_path(dst_file) + _, rtls_src = anatomy.find_root_template_from_path(src_file) return path.replace(rtls_src, rootless) def _update_hash(self, hash, src_file_name, dst_file): """ Updates hash value with proper hero name """ - src_file_name = self._get_name_without_ext( - src_file_name) - hero_file_name = self._get_name_without_ext( - dst_file) + src_file_name = self._get_name_without_ext(src_file_name) + hero_file_name = self._get_name_without_ext(dst_file) return hash.replace(src_file_name, hero_file_name) def _get_name_without_ext(self, value): diff --git a/openpype/plugins/publish/integrate_inputlinks.py b/openpype/plugins/publish/integrate_inputlinks.py index 11cffc4638..6964f2d938 100644 --- a/openpype/plugins/publish/integrate_inputlinks.py +++ b/openpype/plugins/publish/integrate_inputlinks.py @@ -3,7 +3,7 @@ from collections import OrderedDict from bson.objectid import ObjectId import pyblish.api -from avalon import io +from openpype.pipeline import legacy_io class IntegrateInputLinks(pyblish.api.ContextPlugin): @@ -129,5 +129,7 @@ class IntegrateInputLinks(pyblish.api.ContextPlugin): if input_links is None: continue - io.update_one({"_id": version_doc["_id"]}, - {"$set": {"data.inputLinks": input_links}}) + legacy_io.update_one( + {"_id": version_doc["_id"]}, + {"$set": {"data.inputLinks": input_links}} + ) diff --git a/openpype/plugins/publish/integrate_new.py b/openpype/plugins/publish/integrate_legacy.py similarity index 84% rename from openpype/plugins/publish/integrate_new.py rename to openpype/plugins/publish/integrate_legacy.py index 9b2ab9c935..b93abab1d8 100644 --- a/openpype/plugins/publish/integrate_new.py +++ b/openpype/plugins/publish/integrate_legacy.py @@ -8,21 +8,33 @@ import errno import six import re import shutil +from collections import deque, defaultdict +from datetime import datetime from bson.objectid import ObjectId from pymongo import DeleteOne, InsertOne import pyblish.api -from avalon import io -import openpype.api -from datetime import datetime -# from pype.modules import ModulesManager -from openpype.lib.profiles_filtering import filter_profiles + +from openpype.client import ( + get_asset_by_name, + get_subset_by_id, + get_subset_by_name, + get_version_by_id, + get_version_by_name, + get_representations, + get_archived_representations, +) from openpype.lib import ( prepare_template_data, create_hard_link, StringTemplate, - TemplateUnsolved + TemplateUnsolved, + source_hash, + filter_profiles, + get_local_site_id, ) +from openpype.pipeline import legacy_io +from openpype.pipeline.publish import get_publish_template_name # this is needed until speedcopy for linux is fixed if sys.platform == "win32": @@ -59,10 +71,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "data": additional metadata for each representation. """ - label = "Integrate Asset New" - order = pyblish.api.IntegratorOrder + label = "Integrate Asset (legacy)" + # Make sure it happens after new integrator + order = pyblish.api.IntegratorOrder + 0.00001 families = ["workfile", "pointcache", + "proxyAbc", "camera", "animation", "model", @@ -91,9 +105,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "source", "matchmove", "image", - "source", "assembly", "fbx", + "gltf", "textures", "action", "harmony.template", @@ -105,12 +119,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "effect", "xgen", "hda", - "usd" + "usd", + "staticMesh", + "skeletalMesh", + "mvLook", + "mvUsdComposition", + "mvUsdOverride", + "simpleUnrealTexture" ] - exclude_families = ["clip"] + exclude_families = ["render.farm"] db_representation_context_keys = [ "project", "asset", "task", "subset", "version", "representation", - "family", "hierarchy", "task", "username" + "family", "hierarchy", "task", "username", "user" ] default_template_name = "publish" @@ -121,17 +141,62 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): integrated_file_sizes = {} # Attributes set by settings - template_name_profiles = None subset_grouping_profiles = None def process(self, instance): - self.integrated_file_sizes = {} - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: + if instance.data.get("processedWithNewIntegrator"): + self.log.info("Instance was already processed with new integrator") return + for ef in self.exclude_families: + if ( + instance.data["family"] == ef or + ef in instance.data["families"]): + self.log.debug("Excluded family '{}' in '{}' or {}".format( + ef, instance.data["family"], instance.data["families"])) + return + + # instance should be published on a farm + if instance.data.get("farm"): + return + + # Prepare repsentations that should be integrated + repres = instance.data.get("representations") + # Raise error if instance don't have any representations + if not repres: + raise ValueError( + "Instance {} has no files to transfer".format( + instance.data["family"] + ) + ) + + # Validate type of stored representations + if not isinstance(repres, (list, tuple)): + raise TypeError( + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) + ) + + # Filter representations + filtered_repres = [] + for repre in repres: + if "delete" in repre.get("tags", []): + continue + filtered_repres.append(repre) + + # Skip instance if there are not representations to integrate + # all representations should not be integrated + if not filtered_repres: + self.log.warning(( + "Skipping, there are no representations" + " to integrate for instance {}" + ).format(instance.data["family"])) + return + + self.integrated_file_sizes = {} try: - self.register(instance) + self.register(instance, filtered_repres) self.log.info("Integrated Asset in to the database ...") self.log.info("instance.data: {}".format(instance.data)) self.handle_destination_files(self.integrated_file_sizes, @@ -142,15 +207,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.handle_destination_files(self.integrated_file_sizes, 'remove') six.reraise(*sys.exc_info()) - def register(self, instance): + def register(self, instance, repres): # Required environment variables anatomy_data = instance.data["anatomyData"] - io.install() + legacy_io.install() context = instance.context project_entity = instance.data["projectEntity"] + project_name = project_entity["name"] context_asset_name = None context_asset_doc = context.data.get("assetEntity") @@ -160,11 +226,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] asset_entity = instance.data.get("assetEntity") if not asset_entity or asset_entity["name"] != context_asset_name: - asset_entity = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) + asset_entity = get_asset_by_name(project_name, asset_name) assert asset_entity, ( "No asset found by the name \"{0}\" in project \"{1}\"" ).format(asset_name, project_entity["name"]) @@ -220,16 +282,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "Establishing staging directory @ {0}".format(stagingdir) ) - # Ensure at least one file is set up for transfer in staging dir. - repres = instance.data.get("representations") - assert repres, "Instance has no files to transfer" - assert isinstance(repres, (list, tuple)), ( - "Instance 'files' must be a list, got: {0} {1}".format( - str(type(repres)), str(repres) - ) - ) - - subset = self.get_subset(asset_entity, instance) + subset = self.get_subset(project_name, asset_entity, instance) instance.data["subsetEntity"] = subset version_number = instance.data["version"] @@ -251,16 +304,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("Creating version ...") - new_repre_names_low = [_repre["name"].lower() for _repre in repres] + new_repre_names_low = [ + _repre["name"].lower() + for _repre in repres + ] - existing_version = io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': version_number - }) + existing_version = get_version_by_name( + project_name, version_number, subset["_id"] + ) if existing_version is None: - version_id = io.insert_one(version).inserted_id + version_id = legacy_io.insert_one(version).inserted_id else: # Check if instance have set `append` mode which cause that # only replicated representations are set to archive @@ -268,7 +322,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Update version data # TODO query by _id and - io.update_many({ + legacy_io.update_many({ 'type': 'version', 'parent': subset["_id"], 'name': version_number @@ -278,10 +332,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_id = existing_version['_id'] # Find representations of existing version and archive them - current_repres = list(io.find({ - "type": "representation", - "parent": version_id - })) + current_repres = list(get_representations( + project_name, version_ids=[version_id] + )) bulk_writes = [] for repre in current_repres: if append_repres: @@ -301,17 +354,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # bulk updates if bulk_writes: - io._database[io.Session["AVALON_PROJECT"]].bulk_write( + legacy_io.database[project_name].bulk_write( bulk_writes ) - version = io.find_one({"_id": version_id}) + version = get_version_by_id(project_name, version_id) instance.data["versionEntity"] = version - existing_repres = list(io.find({ - "parent": version_id, - "type": "archived_representation" - })) + existing_repres = list(get_archived_representations( + project_name, + version_ids=[version_id] + )) instance.data['version'] = version['name'] @@ -337,32 +390,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): family = self.main_family_from_instance(instance) - key_values = { - "families": family, - "tasks": task_name, - "hosts": instance.context.data["hostName"], - "task_types": task_type - } - profile = filter_profiles( - self.template_name_profiles, - key_values, + template_name = get_publish_template_name( + project_name, + instance.context.data["hostName"], + family, + task_name=task_info.get("name"), + task_type=task_info.get("type"), + project_settings=instance.context.data["project_settings"], logger=self.log ) - template_name = "publish" - if profile: - template_name = profile["template_name"] - published_representations = {} - for idx, repre in enumerate(instance.data["representations"]): - # reset transfers for next representation - # instance.data['transfers'] is used as a global variable - # in current codebase - instance.data['transfers'] = list(orig_transfers) - - if "delete" in repre.get("tags", []): - continue - + for idx, repre in enumerate(repres): published_files = [] # create template data for Anatomy @@ -381,6 +420,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if resolution_width: template_data["fps"] = fps + if "originalBasename" in instance.data: + template_data.update({ + "originalBasename": instance.data.get("originalBasename") + }) + files = repre['files'] if repre.get('stagingDir'): stagingdir = repre['stagingDir'] @@ -552,6 +596,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre['published_path'] = dst self.log.debug("__ dst: {}".format(dst)) + if not instance.data.get("publishDir"): + instance.data["publishDir"] = ( + anatomy_filled + [template_name] + ["folder"] + ) if repre.get("udim"): repre_context["udim"] = repre.get("udim") # store list @@ -629,18 +679,22 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "published_files": published_files } self.log.debug("__ representations: {}".format(representations)) + # reset transfers for next representation + # instance.data['transfers'] is used as a global variable + # in current codebase + instance.data['transfers'] = list(orig_transfers) # Remove old representations if there are any (before insertion of new) if existing_repres: repre_ids_to_remove = [] for repre in existing_repres: repre_ids_to_remove.append(repre["_id"]) - io.delete_many({"_id": {"$in": repre_ids_to_remove}}) + legacy_io.delete_many({"_id": {"$in": repre_ids_to_remove}}) for rep in instance.data["representations"]: self.log.debug("__ rep: {}".format(rep)) - io.insert_many(representations) + legacy_io.insert_many(representations) instance.data["published_representations"] = ( published_representations ) @@ -740,13 +794,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): create_hard_link(src, dst) - def get_subset(self, asset, instance): + def get_subset(self, project_name, asset, instance): subset_name = instance.data["subset"] - subset = io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": subset_name - }) + subset = get_subset_by_name(project_name, subset_name, asset["_id"]) if subset is None: self.log.info("Subset '%s' not found, creating ..." % subset_name) @@ -763,7 +813,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if _family not in families: families.append(_family) - _id = io.insert_one({ + _id = legacy_io.insert_one({ "schema": "openpype:subset-3.0", "type": "subset", "name": subset_name, @@ -773,7 +823,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "parent": asset["_id"] }).inserted_id - subset = io.find_one({"_id": _id}) + subset = get_subset_by_id(project_name, _id) # QUESTION Why is changing of group and updating it's # families in 'get_subset'? @@ -782,7 +832,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Update families on subset. families = [instance.data["family"]] families.extend(instance.data.get("families", [])) - io.update_many( + legacy_io.update_many( {"type": "subset", "_id": ObjectId(subset["_id"])}, {"$set": {"data.families": families}} ) @@ -806,7 +856,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset_group = self._get_subset_group(instance) if subset_group: - io.update_many({ + legacy_io.update_many({ 'type': 'subset', '_id': ObjectId(subset_id) }, {'$set': {'data.subsetGroup': subset_group}}) @@ -907,9 +957,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): families += current_families # create relative source path for DB - if "source" in instance.data: - source = instance.data["source"] - else: + source = instance.data.get("source") + if not source: source = context.data["currentFile"] anatomy = instance.context.data["anatomy"] source = self.get_rootless_path(anatomy, source) @@ -920,7 +969,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "time": context.data["time"], "author": context.data["user"], "source": source, - "comment": context.data.get("comment"), + "comment": instance.data["comment"], "machine": context.data.get("machine"), "fps": context.data.get( "fps", instance.data.get("fps") @@ -1005,7 +1054,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): for _src, dest in resources: path = self.get_rootless_path(anatomy, dest) dest = self.get_dest_temp_url(dest) - file_hash = openpype.api.source_hash(dest) + file_hash = source_hash(dest) if self.TMP_FILE_EXT and \ ',{}'.format(self.TMP_FILE_EXT) in file_hash: file_hash = file_hash.replace(',{}'.format(self.TMP_FILE_EXT), @@ -1098,25 +1147,24 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): rec["sites"].append(meta) already_attached_sites[meta["name"]] = None + # add alternative sites + rec, already_attached_sites = self._add_alternative_sites( + system_sync_server_presets, already_attached_sites, rec) + # add skeleton for site where it should be always synced to - for always_on_site in always_accesible: + for always_on_site in set(always_accesible): if always_on_site not in already_attached_sites.keys(): meta = {"name": always_on_site.strip()} rec["sites"].append(meta) already_attached_sites[meta["name"]] = None - # add alternative sites - rec = self._add_alternative_sites(system_sync_server_presets, - already_attached_sites, - rec) - log.debug("final sites:: {}".format(rec["sites"])) return rec def _get_sites(self, sync_project_presets): """Returns tuple (local_site, remote_site)""" - local_site_id = openpype.api.get_local_site_id() + local_site_id = get_local_site_id() local_site = sync_project_presets["config"]. \ get("active_site", "studio").strip() @@ -1140,22 +1188,60 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ conf_sites = system_sync_server_presets.get("sites", {}) + alt_site_pairs = self._get_alt_site_pairs(conf_sites) + + already_attached_keys = list(already_attached_sites.keys()) + for added_site in already_attached_keys: + real_created = already_attached_sites[added_site] + for alt_site in alt_site_pairs.get(added_site, []): + if alt_site in already_attached_sites.keys(): + continue + meta = {"name": alt_site} + # alt site inherits state of 'created_dt' + if real_created: + meta["created_dt"] = real_created + rec["sites"].append(meta) + already_attached_sites[meta["name"]] = real_created + + return rec, already_attached_sites + + def _get_alt_site_pairs(self, conf_sites): + """Returns dict of site and its alternative sites. + + If `site` has alternative site, it means that alt_site has 'site' as + alternative site + Args: + conf_sites (dict) + Returns: + (dict): {'site': [alternative sites]...} + """ + alt_site_pairs = defaultdict(list) for site_name, site_info in conf_sites.items(): alt_sites = set(site_info.get("alternative_sites", [])) - already_attached_keys = list(already_attached_sites.keys()) - for added_site in already_attached_keys: - if added_site in alt_sites: - if site_name in already_attached_keys: - continue - meta = {"name": site_name} - real_created = already_attached_sites[added_site] - # alt site inherits state of 'created_dt' - if real_created: - meta["created_dt"] = real_created - rec["sites"].append(meta) - already_attached_sites[meta["name"]] = real_created + alt_site_pairs[site_name].extend(alt_sites) - return rec + for alt_site in alt_sites: + alt_site_pairs[alt_site].append(site_name) + + for site_name, alt_sites in alt_site_pairs.items(): + sites_queue = deque(alt_sites) + while sites_queue: + alt_site = sites_queue.popleft() + + # safety against wrong config + # {"SFTP": {"alternative_site": "SFTP"} + if alt_site == site_name or alt_site not in alt_site_pairs: + continue + + for alt_alt_site in alt_site_pairs[alt_site]: + if ( + alt_alt_site != site_name + and alt_alt_site not in alt_sites + ): + alt_sites.append(alt_alt_site) + sites_queue.append(alt_alt_site) + + return alt_site_pairs def handle_destination_files(self, integrated_file_sizes, mode): """ Clean destination files diff --git a/openpype/plugins/publish/integrate_subset_group.py b/openpype/plugins/publish/integrate_subset_group.py new file mode 100644 index 0000000000..a24ebba3a5 --- /dev/null +++ b/openpype/plugins/publish/integrate_subset_group.py @@ -0,0 +1,98 @@ +"""Produces instance.data["subsetGroup"] data used during integration. + +Requires: + dict -> context["anatomyData"] *(pyblish.api.CollectorOrder + 0.49) + +Provides: + instance -> subsetGroup (str) + +""" +import pyblish.api + +from openpype.lib.profiles_filtering import filter_profiles +from openpype.lib import ( + prepare_template_data, + StringTemplate, + TemplateUnsolved +) + + +class IntegrateSubsetGroup(pyblish.api.InstancePlugin): + """Integrate Subset Group for publish.""" + + # Run after CollectAnatomyInstanceData + order = pyblish.api.IntegratorOrder - 0.1 + label = "Subset Group" + + # Attributes set by settings + subset_grouping_profiles = None + + def process(self, instance): + """Look into subset group profiles set by settings. + + Attribute 'subset_grouping_profiles' is defined by OpenPype settings. + """ + + # Skip if 'subset_grouping_profiles' is empty + if not self.subset_grouping_profiles: + return + + if instance.data.get("subsetGroup"): + # If subsetGroup is already set then allow that value to remain + self.log.debug(( + "Skipping collect subset group due to existing value: {}" + ).format(instance.data["subsetGroup"])) + return + + # Skip if there is no matching profile + filter_criteria = self.get_profile_filter_criteria(instance) + profile = filter_profiles( + self.subset_grouping_profiles, + filter_criteria, + logger=self.log + ) + + if not profile: + return + + template = profile["template"] + + fill_pairs = prepare_template_data({ + "family": filter_criteria["families"], + "task": filter_criteria["tasks"], + "host": filter_criteria["hosts"], + "subset": instance.data["subset"], + "renderlayer": instance.data.get("renderlayer") + }) + + filled_template = None + try: + filled_template = StringTemplate.format_strict_template( + template, fill_pairs + ) + except (KeyError, TemplateUnsolved): + keys = fill_pairs.keys() + self.log.warning(( + "Subset grouping failed. Only {} are expected in Settings" + ).format(','.join(keys))) + + if filled_template: + instance.data["subsetGroup"] = filled_template + + def get_profile_filter_criteria(self, instance): + """Return filter criteria for `filter_profiles`""" + # TODO: This logic is used in much more plug-ins in one way or another + # Maybe better suited for lib? + # Anatomy data is pre-filled by Collectors + anatomy_data = instance.data["anatomyData"] + + # Task can be optional in anatomy data + task = anatomy_data.get("task", {}) + + # Return filter criteria + return { + "families": anatomy_data["family"], + "tasks": task.get("name"), + "hosts": instance.context.data["hostName"], + "task_types": task.get("type") + } diff --git a/openpype/plugins/publish/integrate_thumbnail.py b/openpype/plugins/publish/integrate_thumbnail.py index 28a93efb9a..809a1782e0 100644 --- a/openpype/plugins/publish/integrate_thumbnail.py +++ b/openpype/plugins/publish/integrate_thumbnail.py @@ -1,160 +1,348 @@ +""" Integrate Thumbnails for Openpype use in Loaders. + + This thumbnail is different from 'thumbnail' representation which could + be uploaded to Ftrack, or used as any other representation in Loaders to + pull into a scene. + + This one is used only as image describing content of published item and + shows up only in Loader in right column section. +""" + import os import sys import errno import shutil import copy +import collections import six import pyblish.api -from bson.objectid import ObjectId -from avalon import api, io +from openpype.client import get_versions +from openpype.client.operations import OperationsSession, new_thumbnail_doc + +InstanceFilterResult = collections.namedtuple( + "InstanceFilterResult", + ["instance", "thumbnail_path", "version_id"] +) -class IntegrateThumbnails(pyblish.api.InstancePlugin): - """Integrate Thumbnails.""" +class IntegrateThumbnails(pyblish.api.ContextPlugin): + """Integrate Thumbnails for Openpype use in Loaders.""" label = "Integrate Thumbnails" order = pyblish.api.IntegratorOrder + 0.01 - families = ["review"] required_context_keys = [ "project", "asset", "task", "subset", "version" ] - def process(self, instance): - - if not os.environ.get("AVALON_THUMBNAIL_ROOT"): - self.log.warning( - "AVALON_THUMBNAIL_ROOT is not set." - " Skipping thumbnail integration." + def process(self, context): + # Filter instances which can be used for integration + filtered_instance_items = self._prepare_instances(context) + if not filtered_instance_items: + self.log.info( + "All instances were filtered. Thumbnail integration skipped." ) return - published_repres = instance.data.get("published_representations") - if not published_repres: - self.log.debug( - "There are no published representations on the instance." - ) - return + # Initial validation of available templated and required keys + env_key = "AVALON_THUMBNAIL_ROOT" + thumbnail_root_format_key = "{thumbnail_root}" + thumbnail_root = os.environ.get(env_key) or "" - project_name = api.Session["AVALON_PROJECT"] - - anatomy = instance.context.data["anatomy"] + anatomy = context.data["anatomy"] + project_name = anatomy.project_name if "publish" not in anatomy.templates: - self.log.warning("Anatomy is missing the \"publish\" key!") + self.log.warning( + "Anatomy is missing the \"publish\" key. Skipping." + ) return if "thumbnail" not in anatomy.templates["publish"]: self.log.warning(( - "There is no \"thumbnail\" template set for the project \"{}\"" + "There is no \"thumbnail\" template set for the project" + " \"{}\". Skipping." ).format(project_name)) return - thumb_repre = None - thumb_repre_anatomy_data = None - for repre_info in published_repres.values(): - repre = repre_info["representation"] - if repre["name"].lower() == "thumbnail": - thumb_repre = repre - thumb_repre_anatomy_data = repre_info["anatomy_data"] + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + if not thumbnail_template: + self.log.info("Thumbnail template is not filled. Skipping.") + return + + if ( + not thumbnail_root + and thumbnail_root_format_key in thumbnail_template + ): + self.log.warning(("{} is not set. Skipping.").format(env_key)) + return + + # Collect verion ids from all filtered instance + version_ids = { + instance_items.version_id + for instance_items in filtered_instance_items + } + # Query versions + version_docs = get_versions( + project_name, + version_ids=version_ids, + hero=True, + fields=["_id", "type", "name"] + ) + # Store version by their id (converted to string) + version_docs_by_str_id = { + str(version_doc["_id"]): version_doc + for version_doc in version_docs + } + self._integrate_thumbnails( + filtered_instance_items, + version_docs_by_str_id, + anatomy, + thumbnail_root + ) + + def _get_thumbnail_from_instance(self, instance): + # 1. Look for thumbnail in published representations + published_repres = instance.data.get("published_representations") + path = self._get_thumbnail_path_from_published(published_repres) + if path and os.path.exists(path): + return path + + if path: + self.log.warning( + "Could not find published thumbnail path {}".format(path) + ) + + # 2. Look for thumbnail in "not published" representations + thumbnail_path = self._get_thumbnail_path_from_unpublished(instance) + if thumbnail_path and os.path.exists(thumbnail_path): + return thumbnail_path + + # 3. Look for thumbnail path on instance in 'thumbnailPath' + thumbnail_path = instance.data.get("thumbnailPath") + if thumbnail_path and os.path.exists(thumbnail_path): + return thumbnail_path + return None + + def _prepare_instances(self, context): + context_thumbnail_path = context.data.get("thumbnailPath") + valid_context_thumbnail = False + if context_thumbnail_path and os.path.exists(context_thumbnail_path): + valid_context_thumbnail = True + + filtered_instances = [] + for instance in context: + instance_label = self._get_instance_label(instance) + # Skip instances without published representations + # - there is no place where to put the thumbnail + published_repres = instance.data.get("published_representations") + if not published_repres: + self.log.debug(( + "There are no published representations" + " on the instance {}." + ).format(instance_label)) + continue + + # Find thumbnail path on instance + thumbnail_path = self._get_thumbnail_from_instance(instance) + if thumbnail_path: + self.log.debug(( + "Found thumbnail path for instance \"{}\"." + " Thumbnail path: {}" + ).format(instance_label, thumbnail_path)) + + elif valid_context_thumbnail: + # Use context thumbnail path if is available + thumbnail_path = context_thumbnail_path + self.log.debug(( + "Using context thumbnail path for instance \"{}\"." + " Thumbnail path: {}" + ).format(instance_label, thumbnail_path)) + + # Skip instance if thumbnail path is not available for it + if not thumbnail_path: + self.log.info(( + "Skipping thumbnail integration for instance \"{}\"." + " Instance and context" + " thumbnail paths are not available." + ).format(instance_label)) + continue + + version_id = str(self._get_version_id(published_repres)) + filtered_instances.append( + InstanceFilterResult(instance, thumbnail_path, version_id) + ) + return filtered_instances + + def _get_version_id(self, published_representations): + for repre_info in published_representations.values(): + return repre_info["representation"]["parent"] + + def _get_thumbnail_path_from_published(self, published_representations): + if not published_representations: + return None + + thumb_repre_doc = None + for repre_info in published_representations.values(): + repre_doc = repre_info["representation"] + if repre_doc["name"].lower() == "thumbnail": + thumb_repre_doc = repre_doc break - if not thumb_repre: + if thumb_repre_doc is None: self.log.debug( "There is not representation with name \"thumbnail\"" ) - return + return None - io.install() - - thumbnail_template = anatomy.templates["publish"]["thumbnail"] - - version = io.find_one({"_id": thumb_repre["parent"]}) - if not version: - raise AssertionError( - "There does not exist version with id {}".format( - str(thumb_repre["parent"]) - ) + path = thumb_repre_doc["data"]["path"] + if not os.path.exists(path): + self.log.warning( + "Thumbnail file cannot be found. Path: {}".format(path) ) + return None + return os.path.normpath(path) - # Get full path to thumbnail file from representation - src_full_path = os.path.normpath(thumb_repre["data"]["path"]) - if not os.path.exists(src_full_path): - self.log.warning("Thumbnail file was not found. Path: {}".format( - src_full_path - )) - return + def _get_thumbnail_path_from_unpublished(self, instance): + repres = instance.data.get("representations") + if not repres: + return None - filename, file_extension = os.path.splitext(src_full_path) - # Create id for mongo entity now to fill anatomy template - thumbnail_id = ObjectId() - - # Prepare anatomy template fill data - template_data = copy.deepcopy(thumb_repre_anatomy_data) - template_data.update({ - "_id": str(thumbnail_id), - "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), - "ext": file_extension[1:], - "thumbnail_type": "thumbnail" - }) - - anatomy_filled = anatomy.format(template_data) - template_filled = anatomy_filled["publish"]["thumbnail"] - - dst_full_path = os.path.normpath(str(template_filled)) - self.log.debug( - "Copying file .. {} -> {}".format(src_full_path, dst_full_path) + thumbnail_repre = next( + ( + repre + for repre in repres + if repre["name"] == "thumbnail" + ), + None ) - dirname = os.path.dirname(dst_full_path) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno != errno.EEXIST: - tp, value, tb = sys.exc_info() - six.reraise(tp, value, tb) + if not thumbnail_repre: + return None - shutil.copy(src_full_path, dst_full_path) + staging_dir = thumbnail_repre.get("stagingDir") + if not staging_dir: + staging_dir = instance.data.get("stagingDir") - # Clean template data from keys that are dynamic - template_data.pop("_id") - template_data.pop("thumbnail_root") + filename = thumbnail_repre.get("files") + if not staging_dir or not filename: + return None - repre_context = template_filled.used_values - for key in self.required_context_keys: - value = template_data.get(key) - if not value: + if isinstance(filename, (list, tuple, set)): + filename = filename[0] + + thumbnail_path = os.path.join(staging_dir, filename) + if os.path.exists(thumbnail_path): + return thumbnail_path + return None + + def _integrate_thumbnails( + self, + filtered_instance_items, + version_docs_by_str_id, + anatomy, + thumbnail_root + ): + op_session = OperationsSession() + project_name = anatomy.project_name + + for instance_item in filtered_instance_items: + instance, thumbnail_path, version_id = instance_item + instance_label = self._get_instance_label(instance) + version_doc = version_docs_by_str_id.get(version_id) + if not version_doc: + self.log.warning(( + "Version entity for instance \"{}\" was not found." + ).format(instance_label)) continue - repre_context[key] = template_data[key] - thumbnail_entity = { - "_id": thumbnail_id, - "type": "thumbnail", - "schema": "openpype:thumbnail-1.0", - "data": { + filename, file_extension = os.path.splitext(thumbnail_path) + # Create id for mongo entity now to fill anatomy template + thumbnail_doc = new_thumbnail_doc() + thumbnail_id = thumbnail_doc["_id"] + + # Prepare anatomy template fill data + template_data = copy.deepcopy(instance.data["anatomyData"]) + template_data.update({ + "_id": str(thumbnail_id), + "ext": file_extension[1:], + "name": "thumbnail", + "thumbnail_root": thumbnail_root, + "thumbnail_type": "thumbnail" + }) + + anatomy_filled = anatomy.format(template_data) + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + template_filled = anatomy_filled["publish"]["thumbnail"] + + dst_full_path = os.path.normpath(str(template_filled)) + self.log.debug("Copying file .. {} -> {}".format( + thumbnail_path, dst_full_path + )) + dirname = os.path.dirname(dst_full_path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + tp, value, tb = sys.exc_info() + six.reraise(tp, value, tb) + + shutil.copy(thumbnail_path, dst_full_path) + + # Clean template data from keys that are dynamic + for key in ("_id", "thumbnail_root"): + template_data.pop(key, None) + + repre_context = template_filled.used_values + for key in self.required_context_keys: + value = template_data.get(key) + if not value: + continue + repre_context[key] = template_data[key] + + thumbnail_doc["data"] = { "template": thumbnail_template, "template_data": repre_context } - } - # Create thumbnail entity - io.insert_one(thumbnail_entity) - self.log.debug( - "Creating entity in database {}".format(str(thumbnail_entity)) - ) - # Set thumbnail id for version - io.update_many( - {"_id": version["_id"]}, - {"$set": {"data.thumbnail_id": thumbnail_id}} - ) - self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( - version["name"], str(version["_id"]) - )) + op_session.create_entity( + project_name, thumbnail_doc["type"], thumbnail_doc + ) + # Create thumbnail entity + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_doc)) + ) - asset_entity = instance.data["assetEntity"] - io.update_many( - {"_id": asset_entity["_id"]}, - {"$set": {"data.thumbnail_id": thumbnail_id}} + # Set thumbnail id for version + op_session.update_entity( + project_name, + version_doc["type"], + version_doc["_id"], + {"data.thumbnail_id": thumbnail_id} + ) + if version_doc["type"] == "hero_version": + version_name = "Hero" + else: + version_name = version_doc["name"] + self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( + version_name, version_id + )) + + asset_entity = instance.data["assetEntity"] + op_session.update_entity( + project_name, + asset_entity["type"], + asset_entity["_id"], + {"data.thumbnail_id": thumbnail_id} + ) + self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format( + asset_entity["name"], version_id + )) + + op_session.commit() + + def _get_instance_label(self, instance): + return ( + instance.data.get("label") + or instance.data.get("name") + or "N/A" ) - self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format( - asset_entity["name"], str(version["_id"]) - )) diff --git a/openpype/plugins/publish/preintegrate_thumbnail_representation.py b/openpype/plugins/publish/preintegrate_thumbnail_representation.py new file mode 100644 index 0000000000..b88ccee9dc --- /dev/null +++ b/openpype/plugins/publish/preintegrate_thumbnail_representation.py @@ -0,0 +1,71 @@ +""" Marks thumbnail representation for integrate to DB or not. + + Some hosts produce thumbnail representation, most of them do not create + them explicitly, but they created during extract phase. + + In some cases it might be useful to override implicit setting for host/task + + This plugin needs to run after extract phase, but before integrate.py as + thumbnail is part of review family and integrated there. + + It should be better to control integration of thumbnail in one place than + configure it in multiple places on host implementations. +""" +import pyblish.api + +from openpype.lib.profiles_filtering import filter_profiles + + +class PreIntegrateThumbnails(pyblish.api.InstancePlugin): + """Marks thumbnail representation for integrate to DB or not.""" + + label = "Override Integrate Thumbnail Representations" + order = pyblish.api.IntegratorOrder - 0.1 + + integrate_profiles = [] + + def process(self, instance): + repres = instance.data.get("representations") + if not repres: + return + + thumbnail_repre = None + for repre in repres: + if repre["name"] == "thumbnail": + thumbnail_repre = repre + break + + if not thumbnail_repre: + return + + family = instance.data["family"] + subset_name = instance.data["subset"] + host_name = instance.context.data["hostName"] + + anatomy_data = instance.data["anatomyData"] + task = anatomy_data.get("task", {}) + + found_profile = filter_profiles( + self.integrate_profiles, + { + "hosts": host_name, + "task_names": task.get("name"), + "task_types": task.get("type"), + "families": family, + "subsets": subset_name, + }, + logger=self.log + ) + + if not found_profile: + return + + if not found_profile["integrate_thumbnail"]: + if "delete" not in thumbnail_repre["tags"]: + thumbnail_repre["tags"].append("delete") + else: + if "delete" in thumbnail_repre["tags"]: + thumbnail_repre["tags"].remove("delete") + + self.log.debug( + "Thumbnail repre tags {}".format(thumbnail_repre["tags"])) diff --git a/openpype/plugins/publish/start_timer.py b/openpype/plugins/publish/start_timer.py deleted file mode 100644 index 112d92bef0..0000000000 --- a/openpype/plugins/publish/start_timer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api - -from openpype.lib import change_timer_to_current_context - - -class StartTimer(pyblish.api.ContextPlugin): - label = "Start Timer" - order = pyblish.api.IntegratorOrder + 1 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - change_timer_to_current_context() diff --git a/openpype/plugins/publish/stop_timer.py b/openpype/plugins/publish/stop_timer.py deleted file mode 100644 index 414e43a3c4..0000000000 --- a/openpype/plugins/publish/stop_timer.py +++ /dev/null @@ -1,17 +0,0 @@ -import os -import requests - -import pyblish.api - - -class StopTimer(pyblish.api.ContextPlugin): - label = "Stop Timer" - order = pyblish.api.ExtractorOrder - 0.49 - hosts = ["*"] - - def process(self, context): - modules_settings = context.data["system_settings"]["modules"] - if modules_settings["timers_manager"]["disregard_publishing"]: - webserver_url = os.environ.get("OPENPYPE_WEBSERVER_URL") - rest_api_url = "{}/timers_manager/stop_timer".format(webserver_url) - requests.post(rest_api_url) diff --git a/openpype/plugins/publish/validate_aseset_docs.py b/openpype/plugins/publish/validate_asset_docs.py similarity index 60% rename from openpype/plugins/publish/validate_aseset_docs.py rename to openpype/plugins/publish/validate_asset_docs.py index eed75cdf8a..9a1ca5b8de 100644 --- a/openpype/plugins/publish/validate_aseset_docs.py +++ b/openpype/plugins/publish/validate_asset_docs.py @@ -2,8 +2,8 @@ import pyblish.api from openpype.pipeline import PublishValidationError -class ValidateContainers(pyblish.api.InstancePlugin): - """Validate existence of asset asset documents on instances. +class ValidateAssetDocs(pyblish.api.InstancePlugin): + """Validate existence of asset documents on instances. Without asset document it is not possible to publish the instance. @@ -22,10 +22,14 @@ class ValidateContainers(pyblish.api.InstancePlugin): return if instance.data.get("assetEntity"): - self.log.info("Instance have set asset document in it's data.") + self.log.info("Instance has set asset document in its data.") + + elif instance.data.get("newAssetPublishing"): + # skip if it is editorial + self.log.info("Editorial instance is no need to check...") else: raise PublishValidationError(( - "Instance \"{}\" don't have set asset" - " document which is needed for publishing." + "Instance \"{}\" doesn't have asset document " + "set which is needed for publishing." ).format(instance.data["name"])) diff --git a/openpype/plugins/publish/validate_containers.py b/openpype/plugins/publish/validate_containers.py index ce91bd3396..8dc0c61cab 100644 --- a/openpype/plugins/publish/validate_containers.py +++ b/openpype/plugins/publish/validate_containers.py @@ -1,5 +1,9 @@ import pyblish.api -import openpype.lib +from openpype.pipeline.load import any_outdated_containers +from openpype.pipeline import ( + PublishXmlValidationError, + OptionalPyblishPluginMixin +) class ShowInventory(pyblish.api.Action): @@ -14,15 +18,21 @@ class ShowInventory(pyblish.api.Action): host_tools.show_scene_inventory() -class ValidateContainers(pyblish.api.ContextPlugin): +class ValidateContainers(OptionalPyblishPluginMixin, + pyblish.api.ContextPlugin): + """Containers are must be updated to latest version on publish.""" - label = "Validate Containers" + label = "Validate Outdated Containers" order = pyblish.api.ValidatorOrder - hosts = ["maya", "houdini", "nuke", "harmony", "photoshop"] + hosts = ["maya", "houdini", "nuke", "harmony", "photoshop", "aftereffects"] optional = True actions = [ShowInventory] def process(self, context): - if openpype.lib.any_outdated(): - raise ValueError("There are outdated containers in the scene.") + if not self.is_active(context.data): + return + + if any_outdated_containers(): + msg = "There are outdated containers in the scene." + raise PublishXmlValidationError(self, msg) diff --git a/openpype/plugins/publish/validate_editorial_asset_name.py b/openpype/plugins/publish/validate_editorial_asset_name.py index 4a65f3c64a..4f8a1abf2e 100644 --- a/openpype/plugins/publish/validate_editorial_asset_name.py +++ b/openpype/plugins/publish/validate_editorial_asset_name.py @@ -1,7 +1,9 @@ -import pyblish.api -from avalon import io from pprint import pformat +import pyblish.api + +from openpype.client import get_assets + class ValidateEditorialAssetName(pyblish.api.ContextPlugin): """ Validating if editorial's asset names are not already created in db. @@ -16,7 +18,8 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): "hiero", "standalonepublisher", "resolve", - "flame" + "flame", + "traypublisher" ] def process(self, context): @@ -24,11 +27,10 @@ class ValidateEditorialAssetName(pyblish.api.ContextPlugin): asset_and_parents = self.get_parents(context) self.log.debug("__ asset_and_parents: {}".format(asset_and_parents)) - if not io.Session: - io.install() - - db_assets = list(io.find( - {"type": "asset"}, {"name": 1, "data.parents": 1})) + project_name = context.data["projectName"] + db_assets = list(get_assets( + project_name, fields=["name", "data.parents"] + )) self.log.debug("__ db_assets: {}".format(db_assets)) asset_db_docs = { diff --git a/openpype/plugins/publish/validate_publish_dir.py b/openpype/plugins/publish/validate_publish_dir.py new file mode 100644 index 0000000000..2f41127548 --- /dev/null +++ b/openpype/plugins/publish/validate_publish_dir.py @@ -0,0 +1,74 @@ +import pyblish.api +from openpype.pipeline.publish import ValidateContentsOrder +from openpype.pipeline.publish import ( + PublishXmlValidationError, + get_publish_template_name, +) + + +class ValidatePublishDir(pyblish.api.InstancePlugin): + """Validates if 'publishDir' is a project directory + + 'publishDir' is collected based on publish templates. In specific cases + ('source' template) source folder of items is used as a 'publishDir', this + validates if it is inside any project dir for the project. + (eg. files are not published from local folder, unaccessible for studio' + + """ + + order = ValidateContentsOrder + label = "Validate publish dir" + + checked_template_names = ["source"] + # validate instances might have interim family, needs to be mapped to final + family_mapping = { + "renderLayer": "render", + "renderLocal": "render" + } + + def process(self, instance): + + template_name = self._get_template_name_from_instance(instance) + + if template_name not in self.checked_template_names: + return + + original_dirname = instance.data.get("originalDirname") + if not original_dirname: + raise PublishXmlValidationError( + self, + "Instance meant for in place publishing." + " Its 'originalDirname' must be collected." + " Contact OP developer to modify collector." + ) + + anatomy = instance.context.data["anatomy"] + + success, _ = anatomy.find_root_template_from_path(original_dirname) + + formatting_data = { + "original_dirname": original_dirname, + } + msg = "Path '{}' not in project folder.".format(original_dirname) + \ + " Please publish from inside of project folder." + if not success: + raise PublishXmlValidationError(self, msg, key="not_in_dir", + formatting_data=formatting_data) + + def _get_template_name_from_instance(self, instance): + project_name = instance.context.data["projectName"] + host_name = instance.context.data["hostName"] + anatomy_data = instance.data["anatomyData"] + family = anatomy_data["family"] + family = self.family_mapping.get("family") or family + task_info = anatomy_data.get("task") or {} + + return get_publish_template_name( + project_name, + host_name, + family, + task_name=task_info.get("name"), + task_type=task_info.get("type"), + project_settings=instance.context.data["project_settings"], + logger=self.log + ) diff --git a/openpype/plugins/publish/validate_resources.py b/openpype/plugins/publish/validate_resources.py index 644977ecd4..7911c70c2d 100644 --- a/openpype/plugins/publish/validate_resources.py +++ b/openpype/plugins/publish/validate_resources.py @@ -1,7 +1,6 @@ -import pyblish.api -import openpype.api - import os +import pyblish.api +from openpype.pipeline.publish import ValidateContentsOrder class ValidateResources(pyblish.api.InstancePlugin): @@ -17,7 +16,7 @@ class ValidateResources(pyblish.api.InstancePlugin): """ - order = openpype.api.ValidateContentsOrder + order = ValidateContentsOrder label = "Resources" def process(self, instance): diff --git a/openpype/plugins/publish/validate_unique_subsets.py b/openpype/plugins/publish/validate_unique_subsets.py new file mode 100644 index 0000000000..11fb827770 --- /dev/null +++ b/openpype/plugins/publish/validate_unique_subsets.py @@ -0,0 +1,76 @@ +from collections import defaultdict +import pyblish.api +from openpype.pipeline.publish import ( + PublishXmlValidationError, +) + + +class ValidateSubsetUniqueness(pyblish.api.ContextPlugin): + """Validate all subset names are unique. + + This only validates whether the instances currently set to publish from + the workfile overlap one another for the asset + subset they are publishing + to. + + This does not perform any check against existing publishes in the database + since it is allowed to publish into existing subsets resulting in + versioning. + + A subset may appear twice to publish from the workfile if one + of them is set to publish to another asset than the other. + + """ + + label = "Validate Subset Uniqueness" + order = pyblish.api.ValidatorOrder + families = ["*"] + + def process(self, context): + + # Find instance per (asset,subset) + instance_per_asset_subset = defaultdict(list) + for instance in context: + + # Ignore disabled instances + if not instance.data.get('publish', True): + continue + + # Ignore instance without asset data + asset = instance.data.get("asset") + if asset is None: + self.log.warning("Instance found without `asset` data: " + "{}".format(instance.name)) + continue + + # Ignore instance without subset data + subset = instance.data.get("subset") + if subset is None: + self.log.warning("Instance found without `subset` data: " + "{}".format(instance.name)) + continue + + instance_per_asset_subset[(asset, subset)].append(instance) + + non_unique = [] + for (asset, subset), instances in instance_per_asset_subset.items(): + + # A single instance per asset, subset is fine + if len(instances) < 2: + continue + + non_unique.append("{asset} > {subset}".format(asset=asset, + subset=subset)) + + if not non_unique: + # All is ok + return + + msg = ("Instance subset names {} are not unique. ".format(non_unique) + + "Please remove or rename duplicates.") + formatting_data = { + "non_unique": ",".join(non_unique) + } + + if non_unique: + raise PublishXmlValidationError(self, msg, + formatting_data=formatting_data) diff --git a/openpype/plugins/publish/validate_version.py b/openpype/plugins/publish/validate_version.py index b94152ef2d..2b919a3119 100644 --- a/openpype/plugins/publish/validate_version.py +++ b/openpype/plugins/publish/validate_version.py @@ -1,16 +1,18 @@ import pyblish.api +from openpype.pipeline.publish import PublishValidationError class ValidateVersion(pyblish.api.InstancePlugin): """Validate instance version. - Pype is not allowing overwiting previously published versions. + OpenPype does not allow overwriting previously published versions. """ order = pyblish.api.ValidatorOrder label = "Validate Version" - hosts = ["nuke", "maya", "houdini", "blender", "standalonepublisher"] + hosts = ["nuke", "maya", "houdini", "blender", "standalonepublisher", + "photoshop", "aftereffects"] optional = False active = True @@ -19,11 +21,25 @@ class ValidateVersion(pyblish.api.InstancePlugin): version = instance.data.get("version") latest_version = instance.data.get("latestVersion") - if latest_version is not None: + if latest_version is not None and int(version) <= int(latest_version): + # TODO: Remove full non-html version upon drop of old publisher msg = ( - "Version `{0}` from instance `{1}` that you are trying to" - " publish, already exists in the database. Version in" - " database: `{2}`. Please version up your workfile to a higher" - " version number than: `{2}`." + "Version '{0}' from instance '{1}' that you are " + " trying to publish is lower or equal to an existing version " + " in the database. Version in database: '{2}'." + "Please version up your workfile to a higher version number " + "than: '{2}'." ).format(version, instance.data["name"], latest_version) - assert (int(version) > int(latest_version)), msg + + msg_html = ( + "Version {0} from instance {1} that you are " + " trying to publish is lower or equal to an existing version " + " in the database. Version in database: {2}.

" + "Please version up your workfile to a higher version number " + "than: {2}." + ).format(version, instance.data["name"], latest_version) + raise PublishValidationError( + title="Higher version of publish already exists", + message=msg, + description=msg_html + ) diff --git a/openpype/pype_commands.py b/openpype/pype_commands.py index c05eece2be..932fdc9be4 100644 --- a/openpype/pype_commands.py +++ b/openpype/pype_commands.py @@ -4,19 +4,7 @@ import os import sys import json import time - -from openpype.lib import PypeLogger -from openpype.api import get_app_environments_for_context -from openpype.lib.plugin_tools import parse_json, get_batch_asset_task_info -from openpype.lib.remote_publish import ( - get_webpublish_conn, - start_webpublish_log, - publish_and_log, - fail_batch, - find_variant_key, - get_task_data, - IN_PROGRESS_STATUS -) +import signal class PypeCommands: @@ -25,11 +13,12 @@ class PypeCommands: Most of its methods are called by :mod:`cli` module. """ @staticmethod - def launch_tray(debug=False): - PypeLogger.set_process_name("Tray") - + def launch_tray(): + from openpype.lib import Logger from openpype.tools import tray + Logger.set_process_name("Tray") + tray.main() @staticmethod @@ -46,10 +35,12 @@ class PypeCommands: @staticmethod def add_modules(click_func): """Modules/Addons can add their cli commands dynamically.""" + + from openpype.lib import Logger from openpype.modules import ModulesManager manager = ModulesManager() - log = PypeLogger.get_logger("AddModulesCLI") + log = Logger.get_logger("CLI-AddModules") for module in manager.modules: try: module.cli(click_func) @@ -71,14 +62,9 @@ class PypeCommands: @staticmethod def launch_webpublisher_webservercli(*args, **kwargs): - from openpype.hosts.webpublisher.webserver_service.webserver_cli \ - import (run_webserver) - return run_webserver(*args, **kwargs) + from openpype.hosts.webpublisher.webserver_service import run_webserver - @staticmethod - def launch_standalone_publisher(): - from openpype.tools import standalonepublish - standalonepublish.main() + return run_webserver(*args, **kwargs) @staticmethod def launch_traypublisher(): @@ -100,9 +86,11 @@ class PypeCommands: Raises: RuntimeError: When there is no path to process. """ + + from openpype.lib import Logger + from openpype.lib.applications import get_app_environments_for_context from openpype.modules import ModulesManager - from openpype import install, uninstall - from openpype.api import Logger + from openpype.pipeline import install_openpype_plugins from openpype.tools.utils.host_tools import show_publish from openpype.tools.utils.lib import qt_app_context @@ -110,9 +98,9 @@ class PypeCommands: import pyblish.api import pyblish.util - log = Logger.get_logger() + log = Logger.get_logger("CLI-publish") - install() + install_openpype_plugins() manager = ModulesManager() @@ -124,13 +112,14 @@ class PypeCommands: if not any(paths): raise RuntimeError("No publish paths specified") - env = get_app_environments_for_context( - os.environ["AVALON_PROJECT"], - os.environ["AVALON_ASSET"], - os.environ["AVALON_TASK"], - os.environ["AVALON_APP_NAME"] - ) - os.environ.update(env) + if os.getenv("AVALON_APP_NAME"): + env = get_app_environments_for_context( + os.environ["AVALON_PROJECT"], + os.environ["AVALON_ASSET"], + os.environ["AVALON_TASK"], + os.environ["AVALON_APP_NAME"] + ) + os.environ.update(env) pyblish.api.register_host("shell") @@ -142,6 +131,7 @@ class PypeCommands: pyblish.api.register_target("farm") os.environ["OPENPYPE_PUBLISH_DATA"] = os.pathsep.join(paths) + os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib log.info("Running publish ...") @@ -167,13 +157,15 @@ class PypeCommands: log.info("Publish finished.") @staticmethod - def remotepublishfromapp(project, batch_path, host_name, + def remotepublishfromapp(project_name, batch_path, host_name, user_email, targets=None): """Opens installed variant of 'host' and run remote publish there. + Eventually should be yanked out to Webpublisher cli. + Currently implemented and tested for Photoshop where customer wants to process uploaded .psd file and publish collected layers - from there. + from there. Triggered by Webpublisher. Checks if no other batches are running (status =='in_progress). If so, it sleeps for SLEEP (this is separate process), @@ -184,8 +176,8 @@ class PypeCommands: Runs publish process as user would, in automatic fashion. Args: - project (str): project to publish (only single context is expected - per call of remotepublish + project_name (str): project to publish (only single context is + expected per call of remotepublish batch_path (str): Path batch folder. Contains subfolders with resources (workfile, another subfolder 'renders' etc.) host_name (str): 'photoshop' @@ -194,84 +186,21 @@ class PypeCommands: targets (list): Pyblish targets (to choose validator for example) """ - import pyblish.api - from openpype.api import Logger - from openpype.lib import ApplicationManager - log = Logger.get_logger() - - log.info("remotepublishphotoshop command") - - task_data = get_task_data(batch_path) - - workfile_path = os.path.join(batch_path, - task_data["task"], - task_data["files"][0]) - - print("workfile_path {}".format(workfile_path)) - - batch_id = task_data["batch"] - dbcon = get_webpublish_conn() - # safer to start logging here, launch might be broken altogether - _id = start_webpublish_log(dbcon, batch_id, user_email) - - batches_in_progress = list(dbcon.find({"status": IN_PROGRESS_STATUS})) - if len(batches_in_progress) > 1: - fail_batch(_id, batches_in_progress, dbcon) - print("Another batch running, probably stuck, ask admin for help") - - asset, task_name, _ = get_batch_asset_task_info(task_data["context"]) - - application_manager = ApplicationManager() - found_variant_key = find_variant_key(application_manager, host_name) - app_name = "{}/{}".format(host_name, found_variant_key) - - # must have for proper launch of app - env = get_app_environments_for_context( - project, - asset, - task_name, - app_name + from openpype.hosts.webpublisher.publish_functions import ( + cli_publish_from_app ) - print("env:: {}".format(env)) - os.environ.update(env) - os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path - # must pass identifier to update log lines for a batch - os.environ["BATCH_LOG_ID"] = str(_id) - os.environ["HEADLESS_PUBLISH"] = 'true' # to use in app lib - os.environ["USER_EMAIL"] = user_email - - pyblish.api.register_host(host_name) - if targets: - if isinstance(targets, str): - targets = [targets] - current_targets = os.environ.get("PYBLISH_TARGETS", "").split( - os.pathsep) - for target in targets: - current_targets.append(target) - - os.environ["PYBLISH_TARGETS"] = os.pathsep.join( - set(current_targets)) - - data = { - "last_workfile_path": workfile_path, - "start_last_workfile": True, - "project_name": project, - "asset_name": asset, - "task_name": task_name - } - - launched_app = application_manager.launch(app_name, **data) - - while launched_app.poll() is None: - time.sleep(0.5) + cli_publish_from_app( + project_name, batch_path, host_name, user_email, targets + ) @staticmethod def remotepublish(project, batch_path, user_email, targets=None): """Start headless publishing. - Used to publish rendered assets, workfiles etc. + Used to publish rendered assets, workfiles etc via Webpublisher. + Eventually should be yanked out to Webpublisher cli. Publish use json from passed paths argument. @@ -288,51 +217,24 @@ class PypeCommands: Raises: RuntimeError: When there is no path to process. """ - if not batch_path: - raise RuntimeError("No publish paths specified") - # Register target and host - import pyblish.api - import pyblish.util - import avalon.api - from openpype.hosts.webpublisher import api as webpublisher + from openpype.hosts.webpublisher.publish_functions import ( + cli_publish + ) - log = PypeLogger.get_logger() - - log.info("remotepublish command") - - host_name = "webpublisher" - os.environ["OPENPYPE_PUBLISH_DATA"] = batch_path - os.environ["AVALON_PROJECT"] = project - os.environ["AVALON_APP"] = host_name - os.environ["USER_EMAIL"] = user_email - - pyblish.api.register_host(host_name) - - if targets: - if isinstance(targets, str): - targets = [targets] - for target in targets: - pyblish.api.register_target(target) - - avalon.api.install(webpublisher) - - log.info("Running publish ...") - - _, batch_id = os.path.split(batch_path) - dbcon = get_webpublish_conn() - _id = start_webpublish_log(dbcon, batch_id, user_email) - - publish_and_log(dbcon, _id, log, batch_id=batch_id) - - log.info("Publish finished.") + cli_publish(project, batch_path, user_email, targets) @staticmethod - def extractenvironments( - output_json_path, project, asset, task, app, env_group - ): + def extractenvironments(output_json_path, project, asset, task, app, + env_group): + """Produces json file with environment based on project and app. + + Called by Deadline plugin to propagate environment into render jobs. + """ + + from openpype.lib.applications import get_app_environments_for_context + if all((project, asset, task, app)): - from openpype.api import get_app_environments_for_context env = get_app_environments_for_context( project, asset, task, app, env_group ) @@ -397,7 +299,7 @@ class PypeCommands: if pyargs: args.extend(["--pyargs", pyargs]) - if persist: + if test_data_folder: args.extend(["--test_data_folder", test_data_folder]) if persist: @@ -414,8 +316,12 @@ class PypeCommands: pytest.main(args) def syncserver(self, active_site): - """Start running sync_server in background.""" - import signal + """Start running sync_server in background. + + This functionality is available in directly in module cli commands. + `~/openpype_console module sync_server syncservice` + """ + os.environ["OPENPYPE_LOCAL_ID"] = active_site def signal_handler(sig, frame): @@ -434,7 +340,6 @@ class PypeCommands: sync_server_module.server_init() sync_server_module.server_start() - import time while True: time.sleep(1.0) diff --git a/openpype/resources/__init__.py b/openpype/resources/__init__.py index 49eee21002..0d7778e546 100644 --- a/openpype/resources/__init__.py +++ b/openpype/resources/__init__.py @@ -39,15 +39,21 @@ def get_liberation_font_path(bold=False, italic=False): return font_path +def get_openpype_production_icon_filepath(): + return get_resource("icons", "openpype_icon.png") + + +def get_openpype_staging_icon_filepath(): + return get_resource("icons", "openpype_icon_staging.png") + + def get_openpype_icon_filepath(staging=None): if staging is None: staging = is_running_staging() if staging: - icon_file_name = "openpype_icon_staging.png" - else: - icon_file_name = "openpype_icon.png" - return get_resource("icons", icon_file_name) + return get_openpype_staging_icon_filepath() + return get_openpype_production_icon_filepath() def get_openpype_splash_filepath(staging=None): diff --git a/openpype/resources/app_icons/3dsmax.png b/openpype/resources/app_icons/3dsmax.png new file mode 100644 index 0000000000..9ebdf6099f Binary files /dev/null and b/openpype/resources/app_icons/3dsmax.png differ diff --git a/openpype/resources/app_icons/celaction.png b/openpype/resources/app_icons/celaction.png new file mode 100644 index 0000000000..86ac092365 Binary files /dev/null and b/openpype/resources/app_icons/celaction.png differ diff --git a/openpype/resources/app_icons/celaction_local.png b/openpype/resources/app_icons/celaction_local.png deleted file mode 100644 index 3a8abe6dbc..0000000000 Binary files a/openpype/resources/app_icons/celaction_local.png and /dev/null differ diff --git a/openpype/resources/app_icons/celaction_remotel.png b/openpype/resources/app_icons/celaction_remotel.png deleted file mode 100644 index 320e8173eb..0000000000 Binary files a/openpype/resources/app_icons/celaction_remotel.png and /dev/null differ diff --git a/openpype/resources/app_icons/hiero.png b/openpype/resources/app_icons/hiero.png index 04bbf6265b..ba666c2fe0 100644 Binary files a/openpype/resources/app_icons/hiero.png and b/openpype/resources/app_icons/hiero.png differ diff --git a/openpype/resources/app_icons/nuke.png b/openpype/resources/app_icons/nuke.png index 4234454096..e734b4984e 100644 Binary files a/openpype/resources/app_icons/nuke.png and b/openpype/resources/app_icons/nuke.png differ diff --git a/openpype/resources/app_icons/nukestudio.png b/openpype/resources/app_icons/nukestudio.png new file mode 100644 index 0000000000..601d4a591d Binary files /dev/null and b/openpype/resources/app_icons/nukestudio.png differ diff --git a/openpype/resources/app_icons/nukex.png b/openpype/resources/app_icons/nukex.png index 1c5a83c8ab..980f150124 100644 Binary files a/openpype/resources/app_icons/nukex.png and b/openpype/resources/app_icons/nukex.png differ diff --git a/openpype/resources/app_icons/shotgrid.png b/openpype/resources/app_icons/shotgrid.png new file mode 100644 index 0000000000..6d0cc047f9 Binary files /dev/null and b/openpype/resources/app_icons/shotgrid.png differ diff --git a/openpype/scripts/fusion_switch_shot.py b/openpype/scripts/fusion_switch_shot.py index 6db8ff36a8..fc22f060a2 100644 --- a/openpype/scripts/fusion_switch_shot.py +++ b/openpype/scripts/fusion_switch_shot.py @@ -3,21 +3,24 @@ import re import sys import logging +from openpype.client import get_asset_by_name, get_versions + # Pipeline imports -from avalon import api, io -import avalon.fusion +from openpype.hosts.fusion import api +import openpype.hosts.fusion.api.lib as fusion_lib # Config imports -import openpype.lib as pype -import openpype.hosts.fusion.lib as fusion_lib +from openpype.lib import version_up +from openpype.pipeline import ( + install_host, + registered_host, + legacy_io, +) -from openpype.lib.avalon_context import get_workdir_from_session +from openpype.pipeline.context_tools import get_workdir_from_session log = logging.getLogger("Update Slap Comp") -self = sys.modules[__name__] -self._project = None - def _format_version_folder(folder): """Format a version folder based on the filepath @@ -79,7 +82,7 @@ def _format_filepath(session): # Create new unqiue filepath if os.path.exists(new_filepath): - new_filepath = pype.version_up(new_filepath) + new_filepath = version_up(new_filepath) return new_filepath @@ -102,7 +105,7 @@ def _update_savers(comp, session): comp.Print("New renders to: %s\n" % renders) - with avalon.fusion.comp_lock_and_undo_chunk(comp): + with api.comp_lock_and_undo_chunk(comp): savers = comp.GetToolList(False, "Saver").values() for saver in savers: filepath = saver.GetAttrs("TOOLST_Clip_Name")[1.0] @@ -127,8 +130,8 @@ def update_frame_range(comp, representations): """ version_ids = [r["parent"] for r in representations] - versions = io.find({"type": "version", "_id": {"$in": version_ids}}) - versions = list(versions) + project_name = legacy_io.active_project() + versions = list(get_versions(project_name, version_ids=version_ids)) start = min(v["data"]["frameStart"] for v in versions) end = max(v["data"]["frameEnd"] for v in versions) @@ -158,25 +161,20 @@ def switch(asset_name, filepath=None, new=True): # Assert asset name exists # It is better to do this here then to wait till switch_shot does it - asset = io.find_one({"type": "asset", "name": asset_name}) + project_name = legacy_io.active_project() + asset = get_asset_by_name(project_name, asset_name) assert asset, "Could not find '%s' in the database" % asset_name - # Get current project - self._project = io.find_one({ - "type": "project", - "name": api.Session["AVALON_PROJECT"] - }) - # Go to comp if not filepath: - current_comp = avalon.fusion.get_current_comp() + current_comp = api.get_current_comp() assert current_comp is not None, "Could not find current comp" else: fusion = _get_fusion_instance() current_comp = fusion.LoadComp(filepath, quiet=True) assert current_comp is not None, "Fusion could not load '%s'" % filepath - host = api.registered_host() + host = registered_host() containers = list(host.ls()) assert containers, "Nothing to update" @@ -194,7 +192,7 @@ def switch(asset_name, filepath=None, new=True): current_comp.Print(message) # Build the session to switch to - switch_to_session = api.Session.copy() + switch_to_session = legacy_io.Session.copy() switch_to_session["AVALON_ASSET"] = asset['name'] if new: @@ -203,7 +201,7 @@ def switch(asset_name, filepath=None, new=True): # Update savers output based on new session _update_savers(current_comp, switch_to_session) else: - comp_path = pype.version_up(filepath) + comp_path = version_up(filepath) current_comp.Print(comp_path) @@ -234,7 +232,7 @@ if __name__ == '__main__': args, unknown = parser.parse_args() - api.install(avalon.fusion) + install_host(api) switch(args.asset_name, args.file_path) sys.exit(0) diff --git a/openpype/scripts/non_python_host_launch.py b/openpype/scripts/non_python_host_launch.py index 43921f0483..79fb1cbb52 100644 --- a/openpype/scripts/non_python_host_launch.py +++ b/openpype/scripts/non_python_host_launch.py @@ -14,8 +14,8 @@ CURRENT_FILE = os.path.abspath(__file__) def show_error_messagebox(title, message, detail_message=None): """Function will show message and process ends after closing it.""" - from Qt import QtWidgets, QtCore - from avalon import style + from qtpy import QtWidgets, QtCore + from openpype import style app = QtWidgets.QApplication([]) app.setStyleSheet(style.load_stylesheet()) diff --git a/openpype/scripts/ocio_wrapper.py b/openpype/scripts/ocio_wrapper.py new file mode 100644 index 0000000000..0685b2e52a --- /dev/null +++ b/openpype/scripts/ocio_wrapper.py @@ -0,0 +1,168 @@ +"""OpenColorIO Wrapper. + +Only to be interpreted by Python 3. It is run in subprocess in case +Python 2 hosts needs to use it. Or it is used as module for Python 3 +processing. + +Providing functionality: +- get_colorspace - console command - python 2 + - returning all available color spaces + found in input config path. +- _get_colorspace_data - python 3 - module function + - returning all available colorspaces + found in input config path. +- get_views - console command - python 2 + - returning all available viewers + found in input config path. +- _get_views_data - python 3 - module function + - returning all available viewers + found in input config path. +""" + +import click +import json +from pathlib2 import Path +import PyOpenColorIO as ocio + + +@click.group() +def main(): + pass + + +@main.group() +def config(): + """Config related commands group + + Example of use: + > pyton.exe ./ocio_wrapper.py config *args + """ + pass + + +@config.command( + name="get_colorspace", + help=( + "return all colorspaces from config file " + "--path input arg is required" + ) +) +@click.option("--in_path", required=True, + help="path where to read ocio config file", + type=click.Path(exists=True)) +@click.option("--out_path", required=True, + help="path where to write output json file", + type=click.Path()) +def get_colorspace(in_path, out_path): + """Aggregate all colorspace to file. + + Python 2 wrapped console command + + Args: + in_path (str): config file path string + out_path (str): temp json file path string + + Example of use: + > pyton.exe ./ocio_wrapper.py config get_colorspace + --in_path= --out_path= + """ + json_path = Path(out_path) + + out_data = _get_colorspace_data(in_path) + + with open(json_path, "w") as f: + json.dump(out_data, f) + + print(f"Colorspace data are saved to '{json_path}'") + + +def _get_colorspace_data(config_path): + """Return all found colorspace data. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available colorspaces + """ + config_path = Path(config_path) + + if not config_path.is_file(): + raise IOError( + f"Input path `{config_path}` should be `config.ocio` file") + + config = ocio.Config().CreateFromFile(str(config_path)) + + return { + c.getName(): c.getFamily() + for c in config.getColorSpaces() + } + + +@config.command( + name="get_views", + help=( + "return all viewers from config file " + "--path input arg is required" + ) +) +@click.option("--in_path", required=True, + help="path where to read ocio config file", + type=click.Path(exists=True)) +@click.option("--out_path", required=True, + help="path where to write output json file", + type=click.Path()) +def get_views(in_path, out_path): + """Aggregate all viewers to file. + + Python 2 wrapped console command + + Args: + in_path (str): config file path string + out_path (str): temp json file path string + + Example of use: + > pyton.exe ./ocio_wrapper.py config get_views \ + --in_path= --out_path= + """ + json_path = Path(out_path) + + out_data = _get_views_data(in_path) + + with open(json_path, "w") as f: + json.dump(out_data, f) + + print(f"Viewer data are saved to '{json_path}'") + + +def _get_views_data(config_path): + """Return all found viewer data. + + Args: + config_path (str): path string leading to config.ocio + + Raises: + IOError: Input config does not exist. + + Returns: + dict: aggregated available viewers + """ + config_path = Path(config_path) + + if not config_path.is_file(): + raise IOError("Input path should be `config.ocio` file") + + config = ocio.Config().CreateFromFile(str(config_path)) + + return { + f"{d}/{v}": {"display": d, "view": v} + for d in config.getDisplays() + for v in config.getViews(d) + } + + +if __name__ == '__main__': + main() diff --git a/openpype/scripts/otio_burnin.py b/openpype/scripts/otio_burnin.py index 1f57891b84..3e40bf0c8b 100644 --- a/openpype/scripts/otio_burnin.py +++ b/openpype/scripts/otio_burnin.py @@ -22,10 +22,6 @@ FFMPEG = ( '"{}"%(input_args)s -i "%(input)s" %(filters)s %(args)s%(output)s' ).format(ffmpeg_path) -FFPROBE = ( - '"{}" -v quiet -print_format json -show_format -show_streams "%(source)s"' -).format(ffprobe_path) - DRAWTEXT = ( "drawtext=fontfile='%(font)s':text=\\'%(text)s\\':" "x=%(x)s:y=%(y)s:fontcolor=%(color)s@%(opacity).1f:fontsize=%(size)d" @@ -48,8 +44,15 @@ def _get_ffprobe_data(source): :param str source: source media file :rtype: [{}, ...] """ - command = FFPROBE % {'source': source} - proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + command = [ + ffprobe_path, + "-v", "quiet", + "-print_format", "json", + "-show_format", + "-show_streams", + source + ] + proc = subprocess.Popen(command, stdout=subprocess.PIPE) out = proc.communicate()[0] if proc.returncode != 0: raise RuntimeError("Failed to run: %s" % command) @@ -113,11 +116,20 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if not ffprobe_data: ffprobe_data = _get_ffprobe_data(source) + # Validate 'streams' before calling super to raise more specific + # error + source_streams = ffprobe_data.get("streams") + if not source_streams: + raise ValueError(( + "Input file \"{}\" does not contain any streams" + " with image/video content." + ).format(source)) + self.ffprobe_data = ffprobe_data self.first_frame = first_frame self.input_args = [] - super().__init__(source, ffprobe_data["streams"]) + super().__init__(source, source_streams) if options_init: self.options_init.update(options_init) @@ -328,13 +340,11 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): _stdout, _stderr = proc.communicate() if _stdout: - for line in _stdout.split(b"\r\n"): - print(line.decode("utf-8")) + print(_stdout.decode("utf-8", errors="backslashreplace")) # This will probably never happen as ffmpeg use stdout if _stderr: - for line in _stderr.split(b"\r\n"): - print(line.decode("utf-8")) + print(_stderr.decode("utf-8", errors="backslashreplace")) if proc.returncode != 0: raise RuntimeError( @@ -568,6 +578,7 @@ def burnins_from_data( if source_ffmpeg_cmd: copy_args = ( "-metadata", + "-metadata:s:v:0", ) args = source_ffmpeg_cmd.split(" ") for idx, arg in enumerate(args): diff --git a/openpype/scripts/remote_publish.py b/openpype/scripts/remote_publish.py new file mode 100644 index 0000000000..37df35e36c --- /dev/null +++ b/openpype/scripts/remote_publish.py @@ -0,0 +1,12 @@ +try: + from openpype.lib import Logger + from openpype.pipeline.publish.lib import remote_publish +except ImportError as exc: + # Ensure Deadline fails by output an error that contains "Fatal Error:" + raise ImportError("Fatal Error: %s" % exc) + + +if __name__ == "__main__": + # Perform remote publish with thorough error checking + log = Logger.get_logger(__name__) + remote_publish(log, raise_error=True) diff --git a/openpype/settings/__init__.py b/openpype/settings/__init__.py index 14e4678050..22d734ae58 100644 --- a/openpype/settings/__init__.py +++ b/openpype/settings/__init__.py @@ -18,12 +18,12 @@ from .exceptions import ( ) from .lib import ( get_general_environments, + get_global_settings, get_system_settings, get_project_settings, get_current_project_settings, get_anatomy_settings, - get_environments, - get_local_settings + get_local_settings, ) from .entities import ( SystemSettings, @@ -50,11 +50,11 @@ __all__ = ( "SaveWarningExc", "get_general_environments", + "get_global_settings", "get_system_settings", "get_project_settings", "get_current_project_settings", "get_anatomy_settings", - "get_environments", "get_local_settings", "SystemSettings", diff --git a/openpype/settings/constants.py b/openpype/settings/constants.py index 8b8acf5714..cd84d4db1c 100644 --- a/openpype/settings/constants.py +++ b/openpype/settings/constants.py @@ -3,16 +3,13 @@ import re # Metadata keys for work with studio and project overrides M_OVERRIDDEN_KEY = "__overriden_keys__" -# Metadata key for storing information about environments -M_ENVIRONMENT_KEY = "__environment_keys__" # Metadata key for storing dynamic created labels M_DYNAMIC_KEY_LABEL = "__dynamic_keys_labels__" -METADATA_KEYS = ( +METADATA_KEYS = frozenset([ M_OVERRIDDEN_KEY, - M_ENVIRONMENT_KEY, M_DYNAMIC_KEY_LABEL -) +]) # Keys where studio's system overrides are stored GLOBAL_SETTINGS_KEY = "global_settings" @@ -35,7 +32,6 @@ KEY_REGEX = re.compile(r"^[{}]+$".format(KEY_ALLOWED_SYMBOLS)) __all__ = ( "M_OVERRIDDEN_KEY", - "M_ENVIRONMENT_KEY", "M_DYNAMIC_KEY_LABEL", "METADATA_KEYS", diff --git a/openpype/settings/defaults/project_anatomy/attributes.json b/openpype/settings/defaults/project_anatomy/attributes.json index 983ac603f9..bf8bbef8de 100644 --- a/openpype/settings/defaults/project_anatomy/attributes.json +++ b/openpype/settings/defaults/project_anatomy/attributes.json @@ -19,8 +19,7 @@ "blender/2-91", "harmony/20", "photoshop/2021", - "aftereffects/2021", - "unreal/4-26" + "aftereffects/2021" ], "tools_env": [], "active": true diff --git a/openpype/settings/defaults/project_anatomy/imageio.json b/openpype/settings/defaults/project_anatomy/imageio.json index 1c86509155..caa2a8a206 100644 --- a/openpype/settings/defaults/project_anatomy/imageio.json +++ b/openpype/settings/defaults/project_anatomy/imageio.json @@ -55,36 +55,49 @@ "nukeNodeClass": "Write", "knobs": [ { + "type": "text", "name": "file_type", "value": "exr" }, { + "type": "text", "name": "datatype", "value": "16 bit half" }, { + "type": "text", "name": "compression", "value": "Zip (1 scanline)" }, { + "type": "bool", "name": "autocrop", - "value": "True" + "value": true }, { + "type": "color_gui", "name": "tile_color", - "value": "0xff0000ff" + "value": [ + 186, + 35, + 35, + 255 + ] }, { + "type": "text", "name": "channels", "value": "rgb" }, { + "type": "text", "name": "colorspace", "value": "linear" }, { + "type": "bool", "name": "create_directories", - "value": "True" + "value": true } ] }, @@ -95,82 +108,107 @@ "nukeNodeClass": "Write", "knobs": [ { + "type": "text", "name": "file_type", "value": "exr" }, { + "type": "text", "name": "datatype", "value": "16 bit half" }, { + "type": "text", "name": "compression", "value": "Zip (1 scanline)" }, { + "type": "bool", "name": "autocrop", - "value": "False" + "value": true }, { + "type": "color_gui", "name": "tile_color", - "value": "0xadab1dff" + "value": [ + 171, + 171, + 10, + 255 + ] }, { + "type": "text", "name": "channels", "value": "rgb" }, { + "type": "text", "name": "colorspace", "value": "linear" }, { + "type": "bool", "name": "create_directories", - "value": "True" + "value": true } ] }, { "plugins": [ - "CreateWriteStill" + "CreateWriteImage" ], "nukeNodeClass": "Write", "knobs": [ { + "type": "text", "name": "file_type", "value": "tiff" }, { + "type": "text", "name": "datatype", "value": "16 bit" }, { + "type": "text", "name": "compression", "value": "Deflate" }, { + "type": "color_gui", "name": "tile_color", - "value": "0x23ff00ff" + "value": [ + 56, + 162, + 7, + 255 + ] }, { + "type": "text", "name": "channels", "value": "rgb" }, { + "type": "text", "name": "colorspace", "value": "sRGB" }, { + "type": "bool", "name": "create_directories", - "value": "True" + "value": true } ] } ], - "customNodes": [] + "overrideNodes": [] }, "regexInputs": { "inputs": [ { - "regex": "[^-a-zA-Z0-9]beauty[^-a-zA-Z0-9]", + "regex": "(beauty).*(?=.exr)", "colorspace": "linear" } ] @@ -185,8 +223,8 @@ "linux": [] }, "renderSpace": "ACEScg", - "viewName": "ACES 1.0 SDR-video", - "displayName": "sRGB" + "displayName": "sRGB", + "viewName": "ACES 1.0 SDR-video" }, "colorManagementPreference": { "configFilePath": { diff --git a/openpype/settings/defaults/project_anatomy/templates.json b/openpype/settings/defaults/project_anatomy/templates.json index d46d449c77..32230e0625 100644 --- a/openpype/settings/defaults/project_anatomy/templates.json +++ b/openpype/settings/defaults/project_anatomy/templates.json @@ -28,9 +28,42 @@ }, "delivery": {}, "unreal": { - "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", - "file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}", + "folder": "{root[work]}/{project[name]}/unreal/{task[name]}", + "file": "{project[code]}_{asset}.{ext}", "path": "{@folder}/{@file}" }, - "others": {} + "others": { + "maya2unreal": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}", + "file": "{subset}_{@version}<_{output}><.{@frame}>.{ext}", + "path": "{@folder}/{@file}" + }, + "simpleUnrealTextureHero": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/hero", + "file": "{originalBasename}.{ext}", + "path": "{@folder}/{@file}" + }, + "simpleUnrealTexture": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{@version}", + "file": "{originalBasename}_{@version}.{ext}", + "path": "{@folder}/{@file}" + }, + "online": { + "folder": "{root[work]}/{project[name]}/{hierarchy}/{asset}/publish/{family}/{subset}/{@version}", + "file": "{originalBasename}<.{@frame}><_{udim}>.{ext}", + "path": "{@folder}/{@file}" + }, + "source": { + "folder": "{root[work]}/{originalDirname}", + "file": "{originalBasename}<.{@frame}><_{udim}>.{ext}", + "path": "{@folder}/{@file}" + }, + "__dynamic_keys_labels__": { + "maya2unreal": "Maya to Unreal", + "simpleUnrealTextureHero": "Simple Unreal Texture - Hero", + "simpleUnrealTexture": "Simple Unreal Texture", + "online": "online", + "source": "source" + } + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/aftereffects.json b/openpype/settings/defaults/project_settings/aftereffects.json index 6a9a399069..e4b957fb85 100644 --- a/openpype/settings/defaults/project_settings/aftereffects.json +++ b/openpype/settings/defaults/project_settings/aftereffects.json @@ -1,4 +1,21 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, + "create": { + "RenderCreator": { + "defaults": [ + "Main" + ] + } + }, "publish": { "ValidateSceneSettings": { "enabled": true, diff --git a/openpype/settings/defaults/project_settings/blender.json b/openpype/settings/defaults/project_settings/blender.json index a7262dcb5d..3585d2ad0a 100644 --- a/openpype/settings/defaults/project_settings/blender.json +++ b/openpype/settings/defaults/project_settings/blender.json @@ -1,6 +1,85 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, "workfile_builder": { "create_first_version": false, "custom_templates": [] + }, + "publish": { + "ValidateCameraZeroKeyframe": { + "enabled": true, + "optional": true, + "active": true + }, + "ValidateMeshHasUvs": { + "enabled": true, + "optional": true, + "active": true + }, + "ValidateMeshNoNegativeScale": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateTransformZero": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateNoColonsInName": { + "enabled": true, + "optional": false, + "active": true + }, + "ExtractBlend": { + "enabled": true, + "optional": true, + "active": true, + "families": [ + "model", + "camera", + "rig", + "action", + "layout" + ] + }, + "ExtractFBX": { + "enabled": true, + "optional": true, + "active": false + }, + "ExtractABC": { + "enabled": true, + "optional": true, + "active": false + }, + "ExtractBlendAnimation": { + "enabled": true, + "optional": true, + "active": true + }, + "ExtractAnimationFBX": { + "enabled": true, + "optional": true, + "active": false + }, + "ExtractCamera": { + "enabled": true, + "optional": true, + "active": true + }, + "ExtractLayout": { + "enabled": true, + "optional": true, + "active": false + } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/celaction.json b/openpype/settings/defaults/project_settings/celaction.json index a4a321fb27..ad01e62d95 100644 --- a/openpype/settings/defaults/project_settings/celaction.json +++ b/openpype/settings/defaults/project_settings/celaction.json @@ -1,13 +1,19 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, "publish": { - "ExtractCelactionDeadline": { - "enabled": true, - "deadline_department": "", - "deadline_priority": 50, - "deadline_pool": "", - "deadline_pool_secondary": "", - "deadline_group": "", - "deadline_chunk_size": 10 + "CollectRenderPath": { + "output_extension": "png", + "anatomy_template_key_render_files": "render", + "anatomy_template_key_metadata": "render" } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/deadline.json b/openpype/settings/defaults/project_settings/deadline.json index 5bb0a4022e..0a4318a659 100644 --- a/openpype/settings/defaults/project_settings/deadline.json +++ b/openpype/settings/defaults/project_settings/deadline.json @@ -2,7 +2,11 @@ "deadline_servers": [], "publish": { "CollectDefaultDeadlineServer": { - "pass_mongo_url": false + "pass_mongo_url": true + }, + "CollectDeadlinePools": { + "primary_pool": "", + "secondary_pool": "" }, "ValidateExpectedFiles": { "enabled": true, @@ -15,45 +19,22 @@ "deadline" ] }, - "ProcessSubmittedJobOnFarm": { - "enabled": true, - "deadline_department": "", - "deadline_pool": "", - "deadline_group": "", - "deadline_chunk_size": 1, - "deadline_priority": 50, - "publishing_script": "", - "skip_integration_repre_list": [], - "aov_filter": { - "maya": [ - ".+(?:\\.|_)([Bb]eauty)(?:\\.|_).*" - ], - "nuke": [ - ".*" - ], - "aftereffects": [ - ".*" - ], - "celaction": [ - ".*" - ], - "harmony": [ - ".*" - ] - } - }, "MayaSubmitDeadline": { "enabled": true, "optional": false, "active": true, "tile_assembler_plugin": "OpenPypeTileAssembler", "use_published": true, + "import_reference": false, "asset_dependencies": true, + "priority": 50, + "tile_priority": 50, "group": "none", "limit": [], "jobInfo": {}, "pluginInfo": {}, - "scene_patches": [] + "scene_patches": [], + "strict_error_checking": true }, "NukeSubmitDeadline": { "enabled": true, @@ -62,8 +43,7 @@ "use_published": true, "priority": 50, "chunk_size": 10, - "primary_pool": "", - "secondary_pool": "", + "concurrent_tasks": 1, "group": "", "department": "", "use_gpu": true, @@ -78,8 +58,6 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "" }, @@ -90,11 +68,43 @@ "use_published": true, "priority": 50, "chunk_size": 10000, - "primary_pool": "", - "secondary_pool": "", "group": "", "department": "", "multiprocess": true + }, + "CelactionSubmitDeadline": { + "enabled": true, + "deadline_department": "", + "deadline_priority": 50, + "deadline_pool": "", + "deadline_pool_secondary": "", + "deadline_group": "", + "deadline_chunk_size": 10, + "deadline_job_delay": "00:00:00:00" + }, + "ProcessSubmittedJobOnFarm": { + "enabled": true, + "deadline_department": "", + "deadline_pool": "", + "deadline_group": "", + "deadline_chunk_size": 1, + "deadline_priority": 50, + "publishing_script": "", + "skip_integration_repre_list": [], + "aov_filter": { + "maya": [ + ".*([Bb]eauty).*" + ], + "aftereffects": [ + ".*" + ], + "celaction": [ + ".*" + ], + "harmony": [ + ".*" + ] + } } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/flame.json b/openpype/settings/defaults/project_settings/flame.json index c7188b10b5..cbd99c4560 100644 --- a/openpype/settings/defaults/project_settings/flame.json +++ b/openpype/settings/defaults/project_settings/flame.json @@ -1,4 +1,31 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + }, + "project": { + "colourPolicy": "ACES 1.1", + "frameDepth": "16-bit fp", + "fieldDominance": "PROGRESSIVE" + }, + "profilesMapping": { + "inputs": [ + { + "flameName": "ACEScg", + "ocioName": "ACES - ACEScg" + }, + { + "flameName": "Rec.709 video", + "ocioName": "Output - Rec.709" + } + ] + } + }, "create": { "CreateShotClip": { "hierarchy": "{folder}/{sequence}", @@ -16,24 +43,65 @@ "vSyncOn": false, "workfileFrameStart": 1001, "handleStart": 5, - "handleEnd": 5 + "handleEnd": 5, + "includeHandles": false, + "retimedHandles": true, + "retimedFramerange": true } }, "publish": { + "CollectTimelineInstances": { + "xml_preset_attrs_from_comments": [ + { + "name": "width", + "type": "number" + }, + { + "name": "height", + "type": "number" + }, + { + "name": "pixelRatio", + "type": "float" + }, + { + "name": "resizeType", + "type": "string" + }, + { + "name": "resizeFilter", + "type": "string" + } + ], + "add_tasks": [ + { + "name": "compositing", + "type": "Compositing", + "create_batch_group": true + } + ] + }, "ExtractSubsetResources": { "keep_original_representation": false, "export_presets_mapping": { "exr16fpdwaa": { + "active": true, + "export_type": "File Sequence", "ext": "exr", "xml_preset_file": "OpenEXR (16-bit fp DWAA).xml", - "xml_preset_dir": "", - "export_type": "File Sequence", - "ignore_comment_attrs": false, "colorspace_out": "ACES - ACEScg", + "xml_preset_dir": "", + "parsed_comment_attrs": true, "representation_add_range": true, - "representation_tags": [] + "representation_tags": [], + "load_to_batch_group": true, + "batch_group_loader_name": "LoadClipBatch", + "filter_path_regex": ".*" } } + }, + "IntegrateBatchGroup": { + "enabled": false } }, "load": { @@ -54,11 +122,45 @@ "png", "h264", "mov", - "mp4" + "mp4", + "exr16fpdwaa" ], "reel_group_name": "OpenPype_Reels", "reel_name": "Loaded", - "clip_name_template": "{asset}_{subset}_{representation}" + "clip_name_template": "{asset}_{subset}<_{output}>", + "layer_rename_template": "{asset}_{subset}<_{output}>", + "layer_rename_patterns": [ + "rgb", + "rgba" + ] + }, + "LoadClipBatch": { + "enabled": true, + "families": [ + "render2d", + "source", + "plate", + "render", + "review" + ], + "representations": [ + "exr", + "dpx", + "jpg", + "jpeg", + "png", + "h264", + "mov", + "mp4", + "exr16fpdwaa" + ], + "reel_name": "OP_LoadedReel", + "clip_name_template": "{batch}_{asset}_{subset}<_{output}>", + "layer_rename_template": "{asset}_{subset}<_{output}>", + "layer_rename_patterns": [ + "rgb", + "rgba" + ] } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/ftrack.json b/openpype/settings/defaults/project_settings/ftrack.json index 89bb41a164..cdf861df4a 100644 --- a/openpype/settings/defaults/project_settings/ftrack.json +++ b/openpype/settings/defaults/project_settings/ftrack.json @@ -56,7 +56,7 @@ "Not Ready" ], "__ignore__": [ - "in prgoress", + "in progress", "omitted", "on hold" ] @@ -96,10 +96,6 @@ "mapping": {}, "asset_types_to_skip": [] }, - "first_version_status": { - "enabled": true, - "status": "" - }, "next_task_update": { "enabled": true, "mapping": { @@ -109,6 +105,27 @@ "Omitted" ], "name_sorting": false + }, + "transfer_values_of_hierarchical_attributes": { + "enabled": true, + "role_list": [ + "Administrator", + "Project manager" + ] + }, + "create_daily_review_session": { + "enabled": true, + "role_list": [ + "Administrator", + "Project Manager" + ], + "cycle_enabled": false, + "cycle_hour_start": [ + 0, + 0, + 0 + ], + "review_session_template": "{yy}{mm}{dd}" } }, "user_handlers": { @@ -252,6 +269,51 @@ } ] }, + { + "hosts": [ + "traypublisher" + ], + "families": [], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "matchmove", + "shot" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [] + }, + { + "hosts": [ + "traypublisher" + ], + "families": [ + "plate", + "review", + "audio" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": false, + "advanced_filtering": [ + { + "families": [ + "clip", + "review" + ], + "add_ftrack_family": true + } + ] + }, { "hosts": [ "maya" @@ -349,23 +411,51 @@ "tasks": [], "add_ftrack_family": true, "advanced_filtering": [] + }, + { + "hosts": [ + "photoshop" + ], + "families": [ + "review" + ], + "task_types": [], + "tasks": [], + "add_ftrack_family": true, + "advanced_filtering": [] } ] }, + "CollectFtrackCustomAttributeData": { + "enabled": false, + "custom_attribute_keys": [] + }, + "IntegrateHierarchyToFtrack": { + "create_task_status_profiles": [] + }, "IntegrateFtrackNote": { "enabled": true, - "note_with_intent_template": "{intent}: {comment}", + "note_template": "{intent}: {comment}", "note_labels": [] }, + "IntegrateFtrackDescription": { + "enabled": false, + "optional": true, + "active": true, + "description_template": "{comment}" + }, "ValidateFtrackAttributes": { "enabled": false, "ftrack_custom_attributes": {} }, + "IntegrateFtrackComponentOverwrite": { + "enabled": true + }, "IntegrateFtrackInstance": { "family_mapping": { "camera": "cam", "look": "look", - "mayaascii": "scene", + "mayaAscii": "scene", "model": "geo", "rig": "rig", "setdress": "setdress", @@ -395,7 +485,13 @@ "vrayproxy": "cache", "redshiftproxy": "cache", "usd": "usd" - } + }, + "keep_first_subset_name_for_review": true, + "asset_versions_status_profiles": [], + "additional_metadata_keys": [] + }, + "IntegrateFtrackFarmStatus": { + "farm_status_profiles": [] } } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/fusion.json b/openpype/settings/defaults/project_settings/fusion.json new file mode 100644 index 0000000000..720178e17a --- /dev/null +++ b/openpype/settings/defaults/project_settings/fusion.json @@ -0,0 +1,20 @@ +{ + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + }, + "ocio": { + "enabled": false, + "configFilePath": { + "windows": [], + "darwin": [], + "linux": [] + } + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/global.json b/openpype/settings/defaults/project_settings/global.json index 30a71b044a..0e078dc157 100644 --- a/openpype/settings/defaults/project_settings/global.json +++ b/openpype/settings/defaults/project_settings/global.json @@ -1,8 +1,30 @@ { + "imageio": { + "ocio_config": { + "filepath": [ + "{OPENPYPE_ROOT}/vendor/bin/ocioconfig/OpenColorIOConfigs/aces_1.2/config.ocio", + "{OPENPYPE_ROOT}/vendor/bin/ocioconfig/OpenColorIOConfigs/nuke-default/config.ocio" + ] + }, + "file_rules": { + "enabled": false, + "rules": { + "example": { + "pattern": ".*(beauty).*", + "colorspace": "ACES - ACEScg", + "ext": "exr" + } + } + } + }, "publish": { "CollectAnatomyInstanceData": { "follow_workfile_version": false }, + "CollectAudio": { + "enabled": false, + "audio_subset_name": "audioMain" + }, "CollectSceneVersion": { "hosts": [ "aftereffects", @@ -20,6 +42,10 @@ ], "skip_hosts_headless_publish": [] }, + "collect_comment_per_instance": { + "enabled": false, + "families": [] + }, "ValidateEditorialAssetName": { "enabled": true, "optional": false @@ -33,21 +59,7 @@ "enabled": false, "profiles": [] }, - "IntegrateHeroVersion": { - "enabled": true, - "optional": true, - "families": [ - "model", - "rig", - "look", - "pointcache", - "animation", - "setdress", - "layout", - "mayaScene" - ] - }, - "ExtractJpegEXR": { + "ExtractThumbnail": { "enabled": true, "ffmpeg_args": { "input": [ @@ -63,6 +75,62 @@ "families": [], "hosts": [], "outputs": { + "png": { + "ext": "png", + "tags": [ + "ftrackreview" + ], + "burnins": [], + "ffmpeg_args": { + "video_filters": [], + "audio_filters": [], + "input": [], + "output": [] + }, + "filter": { + "families": [ + "render", + "review", + "ftrack" + ], + "subsets": [], + "custom_tags": [], + "single_frame_filter": "single_frame" + }, + "overscan_crop": "", + "overscan_color": [ + 0, + 0, + 0, + 255 + ], + "width": 1920, + "height": 1080, + "scale_pixel_aspect": true, + "bg_color": [ + 0, + 0, + 0, + 0 + ], + "letter_box": { + "enabled": false, + "ratio": 0.0, + "fill_color": [ + 0, + 0, + 0, + 255 + ], + "line_thickness": 0, + "line_color": [ + 255, + 0, + 0, + 255 + ] + } + }, "h264": { "ext": "mp4", "tags": [ @@ -88,7 +156,9 @@ "review", "ftrack" ], - "subsets": [] + "subsets": [], + "custom_tags": [], + "single_frame_filter": "multi_frame" }, "overscan_crop": "", "overscan_color": [ @@ -99,6 +169,7 @@ ], "width": 0, "height": 0, + "scale_pixel_aspect": true, "bg_color": [ 0, 0, @@ -156,6 +227,9 @@ { "families": [], "hosts": [], + "task_types": [], + "task_names": [], + "subsets": [], "burnins": { "burnin": { "TOP_LEFT": "{yy}-{mm}-{dd}", @@ -173,7 +247,31 @@ } ] }, + "PreIntegrateThumbnails": { + "enabled": true, + "integrate_profiles": [] + }, + "IntegrateSubsetGroup": { + "subset_grouping_profiles": [ + { + "families": [], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "" + } + ] + }, "IntegrateAssetNew": { + "subset_grouping_profiles": [ + { + "families": [], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "" + } + ], "template_name_profiles": [ { "families": [], @@ -192,15 +290,72 @@ "task_types": [], "tasks": [], "template_name": "render" - } - ], - "subset_grouping_profiles": [ + }, { - "families": [], - "hosts": [], + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], "task_types": [], "tasks": [], - "template": "" + "template_name": "simpleUnrealTexture" + }, + { + "families": [ + "staticMesh", + "skeletalMesh" + ], + "hosts": [ + "maya" + ], + "task_types": [], + "tasks": [], + "template_name": "maya2unreal" + }, + { + "families": [ + "online" + ], + "hosts": [ + "traypublisher" + ], + "task_types": [], + "tasks": [], + "template_name": "online" + } + ] + }, + "IntegrateAsset": { + "skip_host_families": [] + }, + "IntegrateHeroVersion": { + "enabled": true, + "optional": true, + "active": true, + "families": [ + "model", + "rig", + "look", + "pointcache", + "animation", + "setdress", + "layout", + "mayaScene", + "simpleUnrealTexture" + ], + "template_name_profiles": [ + { + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], + "task_types": [], + "task_names": [], + "template_name": "simpleUnrealTextureHero" } ] }, @@ -241,6 +396,15 @@ "tasks": [], "template": "{family}{variant}" }, + { + "families": [ + "workfile" + ], + "hosts": [], + "task_types": [], + "tasks": [], + "template": "{family}{Task}" + }, { "families": [ "render" @@ -260,7 +424,7 @@ ], "task_types": [], "tasks": [], - "template": "{family}{Task}_{Render_layer}_{Render_pass}" + "template": "{family}{Task}_{Renderlayer}_{Renderpass}" }, { "families": [ @@ -268,6 +432,7 @@ "workfile" ], "hosts": [ + "aftereffects", "tvpaint" ], "task_types": [], @@ -276,18 +441,18 @@ }, { "families": [ - "renderLocal" + "render" ], "hosts": [ "aftereffects" ], "task_types": [], "tasks": [], - "template": "render{Task}{Variant}" + "template": "{family}{Task}{Composition}{Variant}" }, { "families": [ - "unrealStaticMesh" + "staticMesh" ], "hosts": [ "maya" @@ -295,6 +460,17 @@ "task_types": [], "tasks": [], "template": "S_{asset}{variant}" + }, + { + "families": [ + "skeletalMesh" + ], + "hosts": [ + "maya" + ], + "task_types": [], + "tasks": [], + "template": "SK_{asset}{variant}" } ] }, @@ -304,6 +480,13 @@ "task_types": [], "hosts": [], "workfile_template": "work" + }, + { + "task_types": [], + "hosts": [ + "unreal" + ], + "workfile_template": "unreal" } ], "last_workfile_on_startup": [ @@ -311,7 +494,8 @@ "hosts": [], "task_types": [], "tasks": [], - "enabled": true + "enabled": true, + "use_last_published_workfile": false } ], "open_workfile_tool_on_startup": [ @@ -322,7 +506,8 @@ "enabled": false } ], - "extra_folders": [] + "extra_folders": [], + "workfile_lock_profiles": [] }, "loader": { "family_filter_profiles": [ @@ -333,9 +518,78 @@ "filter_families": [] } ] + }, + "publish": { + "template_name_profiles": [ + { + "families": [], + "hosts": [], + "task_types": [], + "task_names": [], + "template_name": "publish" + }, + { + "families": [ + "review", + "render", + "prerender" + ], + "hosts": [], + "task_types": [], + "task_names": [], + "template_name": "render" + }, + { + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], + "task_types": [], + "task_names": [], + "template_name": "simpleUnrealTexture" + }, + { + "families": [ + "staticMesh", + "skeletalMesh" + ], + "hosts": [ + "maya" + ], + "task_types": [], + "task_names": [], + "template_name": "maya2unreal" + }, + { + "families": [ + "online" + ], + "hosts": [ + "traypublisher" + ], + "task_types": [], + "task_names": [], + "template_name": "online" + } + ], + "hero_template_name_profiles": [ + { + "families": [ + "simpleUnrealTexture" + ], + "hosts": [ + "standalonepublisher" + ], + "task_types": [], + "task_names": [], + "template_name": "simpleUnrealTextureHero" + } + ] } }, - "project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets[ftrack.Library]\": {\"characters[ftrack]\": {}, \"locations[ftrack]\": {}}, \"shots[ftrack.Sequence]\": {\"scripts\": {}, \"editorial[ftrack.Folder]\": {}}}}", + "project_folder_structure": "{\"__project_root__\": {\"prod\": {}, \"resources\": {\"footage\": {\"plates\": {}, \"offline\": {}}, \"audio\": {}, \"art_dept\": {}}, \"editorial\": {}, \"assets\": {\"characters\": {}, \"locations\": {}}, \"shots\": {}}}", "sync_server": { "enabled": false, "config": { diff --git a/openpype/settings/defaults/project_settings/harmony.json b/openpype/settings/defaults/project_settings/harmony.json index 1508b02e1b..1f4ea88272 100644 --- a/openpype/settings/defaults/project_settings/harmony.json +++ b/openpype/settings/defaults/project_settings/harmony.json @@ -1,4 +1,30 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, + "load": { + "ImageSequenceLoader": { + "family": [ + "shot", + "render", + "image", + "plate", + "reference" + ], + "representations": [ + "jpeg", + "png", + "jpg" + ] + } + }, "publish": { "CollectPalettes": { "allowed_tasks": [ diff --git a/openpype/settings/defaults/project_settings/hiero.json b/openpype/settings/defaults/project_settings/hiero.json index 1dff3aac51..c6180d0a58 100644 --- a/openpype/settings/defaults/project_settings/hiero.json +++ b/openpype/settings/defaults/project_settings/hiero.json @@ -1,4 +1,37 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + }, + "workfile": { + "ocioConfigName": "nuke-default", + "ocioconfigpath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "workingSpace": "linear", + "sixteenBitLut": "sRGB", + "eightBitLut": "sRGB", + "floatLut": "linear", + "logLut": "Cineon", + "viewerLut": "sRGB", + "thumbnailLut": "sRGB" + }, + "regexInputs": { + "inputs": [ + { + "regex": "[^-a-zA-Z0-9](plateRef).*(?=mp4)", + "colorspace": "sRGB" + } + ] + } + }, "create": { "CreateShotClip": { "hierarchy": "{folder}/{sequence}", @@ -51,5 +84,17 @@ ] } }, - "filters": {} + "filters": {}, + "scriptsmenu": { + "name": "OpenPype Tools", + "definition": [ + { + "type": "action", + "sourcetype": "python", + "title": "OpenPype Docs", + "command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_hiero')", + "tooltip": "Open the OpenPype Hiero user doc page" + } + ] + } } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/houdini.json b/openpype/settings/defaults/project_settings/houdini.json index 911bf82d9b..68cc8945fe 100644 --- a/openpype/settings/defaults/project_settings/houdini.json +++ b/openpype/settings/defaults/project_settings/houdini.json @@ -1,4 +1,15 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, + "shelves": [], "create": { "CreateArnoldAss": { "enabled": true, @@ -47,6 +58,18 @@ } }, "publish": { + "ValidateWorkfilePaths": { + "enabled": true, + "optional": true, + "node_types": [ + "file", + "alembic" + ], + "prohibited_vars": [ + "$HIP", + "$JOB" + ] + }, "ValidateContainers": { "enabled": true, "optional": true, diff --git a/openpype/settings/defaults/project_settings/kitsu.json b/openpype/settings/defaults/project_settings/kitsu.json new file mode 100644 index 0000000000..3a9723b9c0 --- /dev/null +++ b/openpype/settings/defaults/project_settings/kitsu.json @@ -0,0 +1,13 @@ +{ + "entities_naming_pattern": { + "episode": "E##", + "sequence": "SQ##", + "shot": "SH##" + }, + "publish": { + "IntegrateKitsuNote": { + "set_status_note": false, + "note_status_shortname": "wfa" + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/maya.json b/openpype/settings/defaults/project_settings/maya.json index 19d9a95595..64bba7b28c 100644 --- a/openpype/settings/defaults/project_settings/maya.json +++ b/openpype/settings/defaults/project_settings/maya.json @@ -1,4 +1,36 @@ { + "open_workfile_post_initialization": false, + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + }, + "colorManagementPreference_v2": { + "enabled": true, + "configFilePath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "renderSpace": "ACEScg", + "displayName": "sRGB", + "viewName": "ACES 1.0 SDR-video" + }, + "colorManagementPreference": { + "configFilePath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "renderSpace": "scene-linear Rec 709/sRGB", + "viewTransform": "sRGB gamma" + } + }, + "mel_workspace": "workspace -fr \"shaders\" \"renderData/shaders\";\nworkspace -fr \"images\" \"renders/maya\";\nworkspace -fr \"particles\" \"particles\";\nworkspace -fr \"mayaAscii\" \"\";\nworkspace -fr \"mayaBinary\" \"\";\nworkspace -fr \"scene\" \"\";\nworkspace -fr \"alembicCache\" \"cache/alembic\";\nworkspace -fr \"renderData\" \"renderData\";\nworkspace -fr \"sourceImages\" \"sourceimages\";\nworkspace -fr \"fileCache\" \"cache/nCache\";\n", "ext_mapping": { "model": "ma", "mayaAscii": "ma", @@ -8,6 +40,7 @@ "yetiRig": "ma" }, "maya-dirmap": { + "use_env_var_as_root": false, "enabled": false, "paths": { "source-path": [], @@ -30,6 +63,49 @@ } ] }, + "RenderSettings": { + "apply_render_settings": true, + "default_render_image_folder": "renders/maya", + "enable_all_lights": true, + "aov_separator": "underscore", + "remove_aovs": false, + "reset_current_frame": false, + "arnold_renderer": { + "image_prefix": "//_", + "image_format": "exr", + "multilayer_exr": true, + "tiled": true, + "aov_list": [], + "additional_options": [] + }, + "vray_renderer": { + "image_prefix": "//", + "engine": "1", + "image_format": "exr", + "aov_list": [], + "additional_options": [] + }, + "redshift_renderer": { + "image_prefix": "//", + "primary_gi_engine": "0", + "secondary_gi_engine": "0", + "image_format": "exr", + "multilayer_exr": true, + "force_combine": true, + "aov_list": [], + "additional_options": [] + }, + "renderman_renderer": { + "image_prefix": "{aov_separator}..", + "image_dir": "/", + "display_filters": [], + "imageDisplay_dir": "/{aov_separator}imageDisplayFilter..", + "sample_filters": [], + "cryptomatte_dir": "/{aov_separator}cryptomatte..", + "watermark_dir": "/{aov_separator}watermarkFilter..", + "additional_options": [] + } + }, "create": { "CreateLook": { "enabled": true, @@ -42,9 +118,7 @@ "enabled": true, "defaults": [ "Main" - ], - "aov_separator": "underscore", - "default_render_image_folder": "renders" + ] }, "CreateUnrealStaticMesh": { "enabled": true, @@ -52,7 +126,7 @@ "", "_Main" ], - "static_mesh_prefix": "S_", + "static_mesh_prefix": "S", "collision_prefixes": [ "UBX", "UCP", @@ -60,13 +134,82 @@ "UCX" ] }, + "CreateUnrealSkeletalMesh": { + "enabled": true, + "defaults": [], + "joint_hints": "jnt_org" + }, + "CreateMultiverseLook": { + "enabled": true, + "publish_mip_map": true + }, "CreateAnimation": { "enabled": true, + "write_color_sets": false, + "write_face_sets": false, + "defaults": [ + "Main" + ] + }, + "CreateModel": { + "enabled": true, + "write_color_sets": false, + "write_face_sets": false, + "defaults": [ + "Main", + "Proxy", + "Sculpt" + ] + }, + "CreatePointCache": { + "enabled": true, + "write_color_sets": false, + "write_face_sets": false, + "defaults": [ + "Main" + ] + }, + "CreateProxyAlembic": { + "enabled": true, + "write_color_sets": false, + "write_face_sets": false, "defaults": [ "Main" ] }, "CreateAss": { + "enabled": true, + "defaults": [ + "Main" + ], + "expandProcedurals": false, + "motionBlur": true, + "motionBlurKeys": 2, + "motionBlurLength": 0.5, + "maskOptions": false, + "maskCamera": false, + "maskLight": false, + "maskShape": false, + "maskShader": false, + "maskOverride": false, + "maskDriver": false, + "maskFilter": false, + "maskColor_manager": false, + "maskOperator": false + }, + "CreateMultiverseUsd": { + "enabled": true, + "defaults": [ + "Main" + ] + }, + "CreateMultiverseUsdComp": { + "enabled": true, + "defaults": [ + "Main" + ] + }, + "CreateMultiverseUsdOver": { "enabled": true, "defaults": [ "Main" @@ -96,20 +239,6 @@ "Main" ] }, - "CreateModel": { - "enabled": true, - "defaults": [ - "Main", - "Proxy", - "Sculpt" - ] - }, - "CreatePointCache": { - "enabled": true, - "defaults": [ - "Main" - ] - }, "CreateRenderSetup": { "enabled": true, "defaults": [ @@ -160,6 +289,12 @@ "CollectMayaRender": { "sync_workfile_version": false }, + "CollectFbxCamera": { + "enabled": false + }, + "CollectGLTF": { + "enabled": false + }, "ValidateInstanceInContext": { "enabled": true, "optional": true, @@ -173,10 +308,16 @@ "ValidateFrameRange": { "enabled": true, "optional": true, - "active": true + "active": true, + "exclude_families": [ + "model", + "rig", + "staticMesh" + ] }, "ValidateShaderName": { "enabled": false, + "optional": true, "regex": "(?P.*)_(.*)_SHD" }, "ValidateShadingEngine": { @@ -190,6 +331,7 @@ }, "ValidateLoadedPlugin": { "enabled": false, + "optional": true, "whitelist_native_plugins": false, "authorized_plugins": [] }, @@ -204,6 +346,7 @@ }, "ValidateUnrealStaticMeshName": { "enabled": true, + "optional": true, "validate_mesh": false, "validate_collision": true }, @@ -214,12 +357,126 @@ "rig" ] }, + "ValidatePluginPathAttributes": { + "enabled": true, + "optional": false, + "active": true, + "attribute": { + "AlembicNode": "abc_File", + "VRayProxy": "fileName", + "RenderManArchive": "filename", + "pgYetiMaya": "cacheFileName", + "aiStandIn": "dso", + "RedshiftSprite": "tex0", + "RedshiftBokeh": "dofBokehImage", + "RedshiftCameraMap": "tex0", + "RedshiftEnvironment": "tex2", + "RedshiftDomeLight": "tex1", + "RedshiftIESLight": "profile", + "RedshiftLightGobo": "tex0", + "RedshiftNormalMap": "tex0", + "RedshiftProxyMesh": "fileName", + "RedshiftVolumeShape": "fileName", + "VRayTexGLSL": "fileName", + "VRayMtlGLSL": "fileName", + "VRayVRmatMtl": "fileName", + "VRayPtex": "ptexFile", + "VRayLightIESShape": "iesFile", + "VRayMesh": "materialAssignmentsFile", + "VRayMtlOSL": "fileName", + "VRayTexOSL": "fileName", + "VRayTexOCIO": "ocioConfigFile", + "VRaySettingsNode": "pmap_autoSaveFile2", + "VRayScannedMtl": "file", + "VRayScene": "parameterOverrideFilePath", + "VRayMtlMDL": "filename", + "VRaySimbiont": "file", + "dlOpenVDBShape": "filename", + "pgYetiMayaShape": "liveABCFilename", + "gpuCache": "cacheFileName" + } + }, "ValidateRenderSettings": { "arnold_render_attributes": [], "vray_render_attributes": [], "redshift_render_attributes": [], "renderman_render_attributes": [] }, + "ValidateCurrentRenderLayerIsRenderable": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderImageRule": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderSingleCamera": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRenderLayerAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateStepSize": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayDistributedRendering": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayReferencedAOVs": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVRayTranslatorEnabled": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateVrayProxyMembers": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRenderScriptCallbacks": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigCacheState": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigInputShapesInInstance": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateYetiRigSettings": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateModelName": { "enabled": false, "database": true, @@ -238,6 +495,7 @@ }, "ValidateTransformNamingSuffix": { "enabled": true, + "optional": true, "SUFFIX_NAMING_TABLE": { "mesh": [ "_GEO", @@ -261,7 +519,7 @@ "ALLOW_IF_NOT_IN_SUFFIX_TABLE": true }, "ValidateColorSets": { - "enabled": false, + "enabled": true, "optional": true, "active": true }, @@ -305,6 +563,16 @@ "optional": true, "active": true }, + "ValidateMeshNoNegativeScale": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateMeshNonZeroEdgeLength": { + "enabled": true, + "optional": true, + "active": true + }, "ValidateMeshNormalsUnlocked": { "enabled": false, "optional": true, @@ -327,22 +595,22 @@ }, "ValidateNoNamespace": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoNullTransforms": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNoUnknownNodes": { "enabled": true, - "optional": true, + "optional": false, "active": true }, "ValidateNodeNoGhosting": { "enabled": false, - "optional": true, + "optional": false, "active": true }, "ValidateShapeDefaultNames": { @@ -370,6 +638,39 @@ "optional": true, "active": true }, + "ValidateNoVRayMesh": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealMeshTriangulated": { + "enabled": false, + "optional": true, + "active": true + }, + "ValidateAlembicVisibleOnly": { + "enabled": true, + "optional": false, + "active": true + }, + "ExtractProxyAlembic": { + "enabled": true, + "families": [ + "proxyAbc" + ] + }, + "ExtractAlembic": { + "enabled": true, + "families": [ + "pointcache", + "model", + "vrayproxy" + ] + }, + "ExtractObj": { + "enabled": false, + "optional": true + }, "ValidateRigContents": { "enabled": false, "optional": true, @@ -385,8 +686,34 @@ "optional": true, "active": true }, + "ValidateAnimationContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateOutRelatedNodeIds": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateRigControllersArnoldAttributes": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkeletalMeshHierarchy": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateSkinclusterDeformerSet": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateRigOutSetNodeIds": { "enabled": true, + "optional": false, "allow_history_only": false }, "ValidateCameraAttributes": { @@ -399,11 +726,46 @@ "optional": true, "active": true }, + "ValidateAssemblyNamespaces": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateAssemblyModelTransforms": { + "enabled": true, + "optional": false, + "active": true + }, "ValidateAssRelativePaths": { "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerContent": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateInstancerFrameRanges": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateNoDefaultCameras": { + "enabled": true, + "optional": false, + "active": true + }, + "ValidateUnrealUpAxis": { + "enabled": false, "optional": true, "active": true }, + "ValidateCameraContents": { + "enabled": true, + "optional": false, + "validate_shapes": true + }, "ExtractPlayblast": { "capture_preset": { "Codec": { @@ -436,30 +798,42 @@ "isolate_view": true, "off_screen": true }, - "PanZoom": { - "pan_zoom": true - }, "Renderer": { "rendererName": "vp2Renderer" }, "Resolution": { "width": 1920, - "height": 1080, - "percent": 1.0, - "mode": "Custom" + "height": 1080 }, "Viewport Options": { "override_viewport_options": true, "displayLights": "default", + "displayTextures": true, "textureMaxResolution": 1024, - "multiSample": 4, + "renderDepthOfField": true, "shadows": true, - "textures": true, "twoSidedLighting": true, - "ssaoEnable": true, + "lineAAEnable": true, + "multiSample": 8, + "ssaoEnable": false, + "ssaoAmount": 1, + "ssaoRadius": 16, + "ssaoFilterRadius": 16, + "ssaoSamples": 16, + "fogging": false, + "hwFogFalloff": "0", + "hwFogDensity": 0.0, + "hwFogStart": 0, + "hwFogEnd": 100, + "hwFogAlpha": 0, + "hwFogColorR": 1.0, + "hwFogColorG": 1.0, + "hwFogColorB": 1.0, + "motionBlurEnable": false, + "motionBlurSampleCount": 8, + "motionBlurShutterOpenFraction": 0.2, "cameras": false, "clipGhosts": false, - "controlVertices": false, "deformers": false, "dimensions": false, "dynamicConstraints": false, @@ -471,8 +845,7 @@ "grid": false, "hairSystems": true, "handles": false, - "hud": false, - "hulls": false, + "headsUpDisplay": false, "ikHandles": false, "imagePlane": true, "joints": false, @@ -483,7 +856,9 @@ "nCloths": false, "nParticles": false, "nRigids": false, + "controlVertices": false, "nurbsCurves": false, + "hulls": false, "nurbsSurfaces": false, "particleInstancers": false, "pivots": false, @@ -491,7 +866,8 @@ "pluginShapes": false, "polymeshes": true, "strokes": false, - "subdivSurfaces": false + "subdivSurfaces": false, + "textures": false }, "Camera Options": { "displayGateMask": false, @@ -629,7 +1005,7 @@ "current_context": [ { "subset_name_filters": [ - "\".+[Mm]ain\"" + ".+[Mm]ain" ], "families": [ "model" @@ -646,7 +1022,8 @@ "subset_name_filters": [], "families": [ "animation", - "pointcache" + "pointcache", + "proxyAbc" ], "repre_names": [ "abc" @@ -709,6 +1086,9 @@ } ] }, + "templated_workfile_build": { + "profiles": [] + }, "filters": { "preset 1": { "ValidateNoAnimation": false, diff --git a/openpype/settings/defaults/project_settings/nuke.json b/openpype/settings/defaults/project_settings/nuke.json index 6992fb6e3e..2999d1427d 100644 --- a/openpype/settings/defaults/project_settings/nuke.json +++ b/openpype/settings/defaults/project_settings/nuke.json @@ -1,13 +1,211 @@ { "general": { "menu": { - "create": "ctrl+shift+alt+c", + "create": "ctrl+alt+c", "publish": "ctrl+alt+p", "load": "ctrl+alt+l", "manage": "ctrl+alt+m", "build_workfile": "ctrl+alt+b" } }, + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + }, + "viewer": { + "viewerProcess": "sRGB" + }, + "baking": { + "viewerProcess": "rec709" + }, + "workfile": { + "colorManagement": "Nuke", + "OCIO_config": "nuke-default", + "customOCIOConfigPath": { + "windows": [], + "darwin": [], + "linux": [] + }, + "workingSpaceLUT": "linear", + "monitorLut": "sRGB", + "int8Lut": "sRGB", + "int16Lut": "sRGB", + "logLut": "Cineon", + "floatLut": "linear" + }, + "nodes": { + "requiredNodes": [ + { + "plugins": [ + "CreateWriteRender" + ], + "nukeNodeClass": "Write", + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "exr" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit half" + }, + { + "type": "text", + "name": "compression", + "value": "Zip (1 scanline)" + }, + { + "type": "bool", + "name": "autocrop", + "value": true + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 186, + 35, + 35, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "text", + "name": "colorspace", + "value": "linear" + }, + { + "type": "bool", + "name": "create_directories", + "value": true + } + ] + }, + { + "plugins": [ + "CreateWritePrerender" + ], + "nukeNodeClass": "Write", + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "exr" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit half" + }, + { + "type": "text", + "name": "compression", + "value": "Zip (1 scanline)" + }, + { + "type": "bool", + "name": "autocrop", + "value": true + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 171, + 171, + 10, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "text", + "name": "colorspace", + "value": "linear" + }, + { + "type": "bool", + "name": "create_directories", + "value": true + } + ] + }, + { + "plugins": [ + "CreateWriteStill" + ], + "nukeNodeClass": "Write", + "knobs": [ + { + "type": "text", + "name": "file_type", + "value": "tiff" + }, + { + "type": "text", + "name": "datatype", + "value": "16 bit" + }, + { + "type": "text", + "name": "compression", + "value": "Deflate" + }, + { + "type": "color_gui", + "name": "tile_color", + "value": [ + 56, + 162, + 7, + 255 + ] + }, + { + "type": "text", + "name": "channels", + "value": "rgb" + }, + { + "type": "text", + "name": "colorspace", + "value": "sRGB" + }, + { + "type": "bool", + "name": "create_directories", + "value": true + } + ] + } + ], + "overrideNodes": [] + }, + "regexInputs": { + "inputs": [ + { + "regex": "(beauty).*(?=.exr)", + "colorspace": "linear" + } + ] + } + }, "nuke-dirmap": { "enabled": false, "paths": { @@ -15,38 +213,129 @@ "destination-path": [] } }, + "scriptsmenu": { + "name": "OpenPype Tools", + "definition": [ + { + "type": "action", + "sourcetype": "python", + "title": "OpenPype Docs", + "command": "import webbrowser;webbrowser.open(url='https://openpype.io/docs/artist_hosts_nuke_tut')", + "tooltip": "Open the OpenPype Nuke user doc page" + } + ] + }, + "gizmo": [ + { + "toolbar_menu_name": "OpenPype Gizmo", + "gizmo_source_dir": { + "windows": [], + "darwin": [], + "linux": [] + }, + "toolbar_icon_path": { + "windows": "", + "darwin": "", + "linux": "" + }, + "gizmo_definition": [ + { + "gizmo_toolbar_path": "/path/to/menu", + "sub_gizmo_list": [ + { + "sourcetype": "python", + "title": "Gizmo Note", + "command": "nuke.nodes.StickyNote(label='You can create your own toolbar menu in the Nuke GizmoMenu of OpenPype')", + "icon": "", + "shortcut": "" + } + ] + } + ] + } + ], "create": { "CreateWriteRender": { - "fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}", - "defaults": [ + "temp_rendering_path_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}", + "default_variants": [ "Main", "Mask" - ] + ], + "instance_attributes": [ + "reviewable", + "farm_rendering" + ], + "prenodes": { + "Reformat01": { + "nodeclass": "Reformat", + "dependent": "", + "knobs": [ + { + "type": "text", + "name": "resize", + "value": "none" + }, + { + "type": "bool", + "name": "black_outside", + "value": true + } + ] + } + } }, "CreateWritePrerender": { - "fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}", - "use_range_limit": true, - "defaults": [ + "temp_rendering_path_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}", + "default_variants": [ "Key01", "Bg01", "Fg01", "Branch01", "Part01" ], - "reviewable": false + "instance_attributes": [ + "farm_rendering", + "use_range_limit" + ], + "prenodes": {} + }, + "CreateWriteImage": { + "temp_rendering_path_template": "{work}/renders/nuke/{subset}/{subset}.{ext}", + "default_variants": [ + "StillFrame", + "MPFrame", + "LayoutFrame" + ], + "instance_attributes": [ + "use_range_limit" + ], + "prenodes": { + "FrameHold01": { + "nodeclass": "FrameHold", + "dependent": "", + "knobs": [ + { + "type": "expression", + "name": "first_frame", + "expression": "parent.first" + } + ] + } + } } }, "publish": { - "PreCollectNukeInstances": { + "CollectInstanceData": { "sync_workfile_version_on_families": [ "nukenodes", "camera", "gizmo", "source", - "render" + "render", + "write" ] }, - "ValidateInstanceInContext": { + "ValidateCorrectAssetName": { "enabled": true, "optional": true, "active": true @@ -81,6 +370,9 @@ }, "ExtractThumbnail": { "enabled": true, + "use_rendered": true, + "bake_viewer_process": true, + "bake_viewer_input_process": true, "nodes": { "Reformat": [ [ @@ -106,6 +398,9 @@ ] } }, + "ExtractReviewData": { + "enabled": false + }, "ExtractReviewDataLut": { "enabled": false }, @@ -117,27 +412,26 @@ "filter": { "task_types": [], "families": [], - "sebsets": [] + "subsets": [] }, - "extension": "mov", + "read_raw": false, "viewer_process_override": "", "bake_viewer_process": true, "bake_viewer_input_process": true, - "add_tags": [], "reformat_node_add": false, "reformat_node_config": [ { - "type": "string", + "type": "text", "name": "type", "value": "to format" }, { - "type": "string", + "type": "text", "name": "format", "value": "HD_1080" }, { - "type": "string", + "type": "text", "name": "filter", "value": "Lanczos6" }, @@ -151,12 +445,28 @@ "name": "pbb", "value": false } - ] + ], + "extension": "mov", + "add_custom_tags": [] } } }, "ExtractSlateFrame": { - "viewer_lut_raw": false + "viewer_lut_raw": false, + "key_value_mapping": { + "f_submission_note": [ + true, + "{comment}" + ], + "f_submitting_for": [ + true, + "{intent[value]}" + ], + "f_vfx_scope_of_work": [ + false, + "" + ] + } }, "IncrementScriptVersion": { "enabled": true, @@ -181,7 +491,11 @@ "LoadClip": { "enabled": true, "_representations": [], - "node_name_template": "{class_name}_{ext}" + "node_name_template": "{class_name}_{ext}", + "options_defaults": { + "start_at_workfile": true, + "add_retime": true + } } }, "workfile_builder": { @@ -202,11 +516,12 @@ "repre_names": [ "exr", "dpx", - "mov" + "mov", + "mp4", + "h264" ], "loaders": [ - "LoadSequence", - "LoadMov" + "LoadClip" ] } ], @@ -214,5 +529,8 @@ } ] }, + "templated_workfile_build": { + "profiles": [] + }, "filters": {} } \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/photoshop.json b/openpype/settings/defaults/project_settings/photoshop.json index 118b9c721e..cdfab0c439 100644 --- a/openpype/settings/defaults/project_settings/photoshop.json +++ b/openpype/settings/defaults/project_settings/photoshop.json @@ -1,4 +1,14 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, "create": { "CreateImage": { "defaults": [ @@ -8,17 +18,26 @@ }, "publish": { "CollectColorCodedInstances": { - "create_flatten_image": false, + "create_flatten_image": "no", "flatten_subset_template": "", "color_code_mapping": [] }, + "CollectInstances": { + "flatten_subset_template": "" + }, + "CollectReview": { + "publish": true + }, + "CollectVersion": { + "enabled": false + }, "ValidateContainers": { "enabled": true, "optional": true, "active": true }, "ValidateNaming": { - "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,]", + "invalid_chars": "[ \\\\/+\\*\\?\\(\\)\\[\\]\\{\\}:,;]", "replace_char": "_" }, "ExtractImage": { @@ -29,8 +48,12 @@ }, "ExtractReview": { "make_image_sequence": false, + "max_downscale_size": 8192, "jpg_options": { - "tags": [] + "tags": [ + "review", + "ftrackreview" + ] }, "mov_options": { "tags": [ @@ -44,4 +67,4 @@ "create_first_version": false, "custom_templates": [] } -} +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/resolve.json b/openpype/settings/defaults/project_settings/resolve.json index b6fbdecc95..66013c5ac7 100644 --- a/openpype/settings/defaults/project_settings/resolve.json +++ b/openpype/settings/defaults/project_settings/resolve.json @@ -1,4 +1,14 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, "create": { "CreateShotClip": { "hierarchy": "{folder}/{sequence}", diff --git a/openpype/settings/defaults/project_settings/shotgrid.json b/openpype/settings/defaults/project_settings/shotgrid.json new file mode 100644 index 0000000000..774bce714b --- /dev/null +++ b/openpype/settings/defaults/project_settings/shotgrid.json @@ -0,0 +1,22 @@ +{ + "shotgrid_project_id": 0, + "shotgrid_server": "", + "event": { + "enabled": false + }, + "fields": { + "asset": { + "type": "sg_asset_type" + }, + "sequence": { + "episode_link": "episode" + }, + "shot": { + "episode_link": "sg_episode", + "sequence_link": "sg_sequence" + }, + "task": { + "step": "step" + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/slack.json b/openpype/settings/defaults/project_settings/slack.json index d77b8c2208..c156fed08e 100644 --- a/openpype/settings/defaults/project_settings/slack.json +++ b/openpype/settings/defaults/project_settings/slack.json @@ -11,6 +11,7 @@ "task_types": [], "tasks": [], "subsets": [], + "review_upload_limit": 50.0, "channel_messages": [] } ] diff --git a/openpype/settings/defaults/project_settings/standalonepublisher.json b/openpype/settings/defaults/project_settings/standalonepublisher.json index 6858c4f34d..b6e2e056a1 100644 --- a/openpype/settings/defaults/project_settings/standalonepublisher.json +++ b/openpype/settings/defaults/project_settings/standalonepublisher.json @@ -133,6 +133,22 @@ ], "help": "Texture files with UDIM together with worfile" }, + "create_simple_unreal_texture": { + "name": "simple_unreal_texture", + "label": "Simple Unreal Texture", + "family": "simpleUnrealTexture", + "icon": "Image", + "defaults": [], + "help": "Texture files with Unreal naming convention" + }, + "create_vdb": { + "name": "vdb", + "label": "VDB Volumetric Data", + "family": "vdbcache", + "icon": "cloud", + "defaults": [], + "help": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids" + }, "__dynamic_keys_labels__": { "create_workfile": "Workfile", "create_model": "Model", @@ -145,7 +161,9 @@ "create_matchmove": "Matchmove", "create_render": "Render", "create_mov_batch": "Batch Mov", - "create_texture_batch": "Batch Texture" + "create_texture_batch": "Batch Texture", + "create_simple_unreal_texture": "Simple Unreal Texture", + "create_vdb": "VDB Cache" } }, "publish": { @@ -239,12 +257,14 @@ ] }, "CollectHierarchyInstance": { + "shot_rename": true, "shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}", "shot_rename_search_patterns": { - "_sequence_": "(\\d{4})(?=_\\d{4})", - "_shot_": "(\\d{4})(?!_\\d{4})" + "_sequence_": "(sc\\d{3})", + "_shot_": "(sh\\d{3})" }, "shot_add_hierarchy": { + "enabled": true, "parents_path": "{project}/{folder}/{sequence}", "parents": { "project": "{project[name]}", diff --git a/openpype/settings/defaults/project_settings/traypublisher.json b/openpype/settings/defaults/project_settings/traypublisher.json new file mode 100644 index 0000000000..8a222a6dd2 --- /dev/null +++ b/openpype/settings/defaults/project_settings/traypublisher.json @@ -0,0 +1,324 @@ +{ + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, + "simple_creators": [ + { + "family": "workfile", + "identifier": "", + "label": "Workfile", + "icon": "fa.file", + "default_variants": [ + "Main" + ], + "description": "Backup of a working scene", + "detailed_description": "Workfiles are full scenes from any application that are directly edited by artists. They represent a state of work on a task at a given point and are usually not directly referenced into other scenes.", + "allow_sequences": false, + "allow_multiple_items": false, + "extensions": [ + ".ma", + ".mb", + ".nk", + ".hrox", + ".hip", + ".hiplc", + ".hipnc", + ".blend", + ".scn", + ".tvpp", + ".comp", + ".zip", + ".prproj", + ".drp", + ".psd", + ".psb", + ".aep" + ] + }, + { + "family": "model", + "identifier": "", + "label": "Model", + "icon": "fa.cubes", + "default_variants": [ + "Main", + "Proxy", + "Sculpt" + ], + "description": "Clean models", + "detailed_description": "Models should only contain geometry data, without any extras like cameras, locators or bones.\n\nKeep in mind that models published from tray publisher are not validated for correctness. ", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".ma", + ".mb", + ".obj", + ".abc", + ".fbx", + ".bgeo", + ".bgeogz", + ".bgeosc", + ".usd", + ".blend" + ] + }, + { + "family": "pointcache", + "identifier": "", + "label": "Pointcache", + "icon": "fa.gears", + "default_variants": [ + "Main" + ], + "description": "Geometry Caches", + "detailed_description": "Alembic or bgeo cache of animated data", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".bgeo", + ".bgeogz", + ".bgeosc" + ] + }, + { + "family": "plate", + "identifier": "", + "label": "Plate", + "icon": "mdi.camera-image", + "default_variants": [ + "Main", + "BG", + "Animatic", + "Reference", + "Offline" + ], + "description": "Footage Plates", + "detailed_description": "Any type of image seqeuence coming from outside of the studio. Usually camera footage, but could also be animatics used for reference.", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "render", + "identifier": "", + "label": "Render", + "icon": "mdi.folder-multiple-image", + "default_variants": [], + "description": "Rendered images or video", + "detailed_description": "Sequence or single file renders", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".png", + ".dpx", + ".jpg", + ".jpeg", + ".tiff", + ".tif", + ".mov", + ".mp4", + ".avi" + ] + }, + { + "family": "camera", + "identifier": "", + "label": "Camera", + "icon": "fa.video-camera", + "default_variants": [], + "description": "3d Camera", + "detailed_description": "Ideally this should be only camera itself with baked animation, however, it can technically also include helper geometry.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".abc", + ".ma", + ".hip", + ".blend", + ".fbx", + ".usd" + ] + }, + { + "family": "image", + "identifier": "", + "label": "Image", + "icon": "fa.image", + "default_variants": [ + "Reference", + "Texture", + "Concept", + "Background" + ], + "description": "Single image", + "detailed_description": "Any image data can be published as image family. References, textures, concept art, matte paints. This is a fallback 2d family for everything that doesn't fit more specific family.", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [ + ".exr", + ".jpg", + ".jpeg", + ".dpx", + ".bmp", + ".tif", + ".tiff", + ".png", + ".psb", + ".psd" + ] + }, + { + "family": "vdb", + "identifier": "", + "label": "VDB Volumes", + "icon": "fa.cloud", + "default_variants": [], + "description": "Sparse volumetric data", + "detailed_description": "Hierarchical data structure for the efficient storage and manipulation of sparse volumetric data discretized on three-dimensional grids", + "allow_sequences": true, + "allow_multiple_items": true, + "extensions": [ + ".vdb" + ] + }, + { + "family": "matchmove", + "identifier": "", + "label": "Matchmove", + "icon": "fa.empire", + "default_variants": [ + "Camera", + "Object", + "Mocap" + ], + "description": "Matchmoving script", + "detailed_description": "Script exported from matchmoving application to be later processed into a tracked camera with additional data", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] + }, + { + "family": "rig", + "identifier": "", + "label": "Rig", + "icon": "fa.wheelchair", + "default_variants": [], + "description": "CG rig file", + "detailed_description": "CG rigged character or prop. Rig should be clean of any extra data and directly loadable into it's respective application\t", + "allow_sequences": false, + "allow_multiple_items": false, + "extensions": [ + ".ma", + ".blend", + ".hip", + ".hda" + ] + }, + { + "family": "simpleUnrealTexture", + "identifier": "", + "label": "Simple UE texture", + "icon": "fa.image", + "default_variants": [], + "description": "Simple Unreal Engine texture", + "detailed_description": "Texture files with Unreal Engine naming conventions", + "allow_sequences": false, + "allow_multiple_items": true, + "extensions": [] + } + ], + "editorial_creators": { + "editorial_simple": { + "default_variants": [ + "Main" + ], + "clip_name_tokenizer": { + "_sequence_": "(sc\\d{3})", + "_shot_": "(sh\\d{3})" + }, + "shot_rename": { + "enabled": true, + "shot_rename_template": "{project[code]}_{_sequence_}_{_shot_}" + }, + "shot_hierarchy": { + "enabled": true, + "parents_path": "{project}/{folder}/{sequence}", + "parents": [ + { + "type": "Project", + "name": "project", + "value": "{project[name]}" + }, + { + "type": "Folder", + "name": "folder", + "value": "shots" + }, + { + "type": "Sequence", + "name": "sequence", + "value": "{_sequence_}" + } + ] + }, + "shot_add_tasks": {}, + "family_presets": [ + { + "family": "review", + "variant": "Reference", + "review": true, + "output_file_type": ".mp4" + }, + { + "family": "plate", + "variant": "", + "review": false, + "output_file_type": ".mov" + }, + { + "family": "audio", + "variant": "", + "review": false, + "output_file_type": ".wav" + } + ] + } + }, + "BatchMovieCreator": { + "default_variants": [ + "Main" + ], + "default_tasks": [ + "Compositing" + ], + "extensions": [ + ".mov" + ] + }, + "publish": { + "ValidateFrameRange": { + "enabled": true, + "optional": true, + "active": true + } + } +} \ No newline at end of file diff --git a/openpype/settings/defaults/project_settings/tvpaint.json b/openpype/settings/defaults/project_settings/tvpaint.json index 528bf6de8e..5a3e1dc2df 100644 --- a/openpype/settings/defaults/project_settings/tvpaint.json +++ b/openpype/settings/defaults/project_settings/tvpaint.json @@ -1,12 +1,31 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, "stop_timer_on_application_exit": false, "publish": { + "CollectRenderScene": { + "enabled": false, + "render_layer": "Main" + }, "ExtractSequence": { "review_bg": [ 255, 255, 255, 255 + ], + "families_to_review": [ + "review", + "renderlayer", + "renderscene" ] }, "ValidateProjectSettings": { @@ -28,6 +47,11 @@ "enabled": true, "optional": true, "active": true + }, + "ExtractConvertToEXR": { + "enabled": false, + "replace_pngs": true, + "exr_compression": "ZIP" } }, "load": { diff --git a/openpype/settings/defaults/project_settings/unreal.json b/openpype/settings/defaults/project_settings/unreal.json index dad61cd1f0..b06bf28714 100644 --- a/openpype/settings/defaults/project_settings/unreal.json +++ b/openpype/settings/defaults/project_settings/unreal.json @@ -1,4 +1,16 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, + "level_sequences_for_layouts": false, + "delete_unmatched_assets": false, "project_setup": { "dev_mode": true } diff --git a/openpype/settings/defaults/project_settings/webpublisher.json b/openpype/settings/defaults/project_settings/webpublisher.json index 77168c25e6..27eac131b7 100644 --- a/openpype/settings/defaults/project_settings/webpublisher.json +++ b/openpype/settings/defaults/project_settings/webpublisher.json @@ -1,6 +1,26 @@ { + "imageio": { + "ocio_config": { + "enabled": false, + "filepath": [] + }, + "file_rules": { + "enabled": false, + "rules": {} + } + }, + "timeout_profiles": [ + { + "hosts": [ + "photoshop" + ], + "task_types": [], + "timeout": 600 + } + ], "publish": { "CollectPublishedFiles": { + "sync_next_version": false, "task_type_to_family": { "Animation": [ { diff --git a/openpype/settings/defaults/system_settings/applications.json b/openpype/settings/defaults/system_settings/applications.json index 0fb99a2608..936407a49b 100644 --- a/openpype/settings/defaults/system_settings/applications.json +++ b/openpype/settings/defaults/system_settings/applications.json @@ -12,6 +12,26 @@ "LC_ALL": "C" }, "variants": { + "2023": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Autodesk\\Maya2023\\bin\\maya.exe" + ], + "darwin": [], + "linux": [ + "/usr/autodesk/maya2023/bin/maya" + ] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": { + "MAYA_VERSION": "2023" + } + }, "2022": { "use_python_2": false, "executables": { @@ -91,9 +111,35 @@ "environment": { "MAYA_VERSION": "2018" } - }, - "__dynamic_keys_labels__": { - "2022": "2022" + } + } + }, + "3dsmax": { + "enabled": true, + "label": "3ds max", + "icon": "{}/app_icons/3dsmax.png", + "host_name": "max", + "environment": { + "ADSK_3DSMAX_STARTUPSCRIPTS_ADDON_DIR": "{OPENPYPE_ROOT}\\openpype\\hosts\\max\\startup" + }, + "variants": { + "2023": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Autodesk\\3ds Max 2023\\3dsmax.exe" + ], + "darwin": [], + "linux": [] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": { + "3DSMAX_VERSION": "2023" + } } } }, @@ -175,6 +221,24 @@ ] }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -264,6 +328,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -275,7 +340,7 @@ "nukex": { "enabled": true, "label": "Nuke X", - "icon": "{}/app_icons/nuke.png", + "icon": "{}/app_icons/nukex.png", "host_name": "nuke", "environment": { "NUKE_PATH": [ @@ -284,6 +349,30 @@ ] }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [ + "--nukex" + ], + "darwin": [ + "--nukex" + ], + "linux": [ + "--nukex" + ] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -403,6 +492,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -414,13 +504,37 @@ "nukestudio": { "enabled": true, "label": "Nuke Studio", - "icon": "{}/app_icons/nuke.png", + "icon": "{}/app_icons/nukestudio.png", "host_name": "hiero", "environment": { "WORKFILES_STARTUP": "0", "TAG_ASSETBUILD_STARTUP": "0" }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [ + "--studio" + ], + "darwin": [ + "--studio" + ], + "linux": [ + "--studio" + ] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -538,6 +652,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -556,6 +671,30 @@ "TAG_ASSETBUILD_STARTUP": "0" }, "variants": { + "13-2": { + "use_python_2": false, + "executables": { + "windows": [ + "C:\\Program Files\\Nuke13.2v1\\Nuke13.2.exe" + ], + "darwin": [], + "linux": [ + "/usr/local/Nuke13.2v1/Nuke13.2" + ] + }, + "arguments": { + "windows": [ + "--hiero" + ], + "darwin": [ + "--hiero" + ], + "linux": [ + "--hiero" + ] + }, + "environment": {} + }, "13-0": { "use_python_2": false, "executables": { @@ -675,6 +814,7 @@ "environment": {} }, "__dynamic_keys_labels__": { + "13-2": "13.2", "13-0": "13.0", "12-2": "12.2", "12-0": "12.0", @@ -689,30 +829,28 @@ "icon": "{}/app_icons/fusion.png", "host_name": "fusion", "environment": { - "FUSION_UTILITY_SCRIPTS_SOURCE_DIR": [], - "FUSION_UTILITY_SCRIPTS_DIR": { - "windows": "{PROGRAMDATA}/Blackmagic Design/Fusion/Scripts/Comp", - "darwin": "/Library/Application Support/Blackmagic Design/Fusion/Scripts/Comp", - "linux": "/opt/Fusion/Scripts/Comp" - }, - "PYTHON36": { + "FUSION_PYTHON3_HOME": { "windows": "{LOCALAPPDATA}/Programs/Python/Python36", "darwin": "~/Library/Python/3.6/bin", "linux": "/opt/Python/3.6/bin" - }, - "PYTHONPATH": [ - "{PYTHON36}/Lib/site-packages", - "{VIRTUAL_ENV}/Lib/site-packages", - "{PYTHONPATH}" - ], - "PATH": [ - "{PYTHON36}", - "{PYTHON36}/Scripts", - "{PATH}" - ], - "OPENPYPE_LOG_NO_COLORS": "Yes" + } }, "variants": { + "18": { + "executables": { + "windows": [ + "C:\\Program Files\\Blackmagic Design\\Fusion 18\\Fusion.exe" + ], + "darwin": [], + "linux": [] + }, + "arguments": { + "windows": [], + "darwin": [], + "linux": [] + }, + "environment": {} + }, "17": { "executables": { "windows": [ @@ -767,41 +905,11 @@ "host_name": "resolve", "environment": { "RESOLVE_UTILITY_SCRIPTS_SOURCE_DIR": [], - "RESOLVE_SCRIPT_API": { - "windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Support/Developer/Scripting", - "darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Developer/Scripting", - "linux": "/opt/resolve/Developer/Scripting" - }, - "RESOLVE_SCRIPT_LIB": { - "windows": "C:/Program Files/Blackmagic Design/DaVinci Resolve/fusionscript.dll", - "darwin": "/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/fusionscript.so", - "linux": "/opt/resolve/libs/Fusion/fusionscript.so" - }, - "RESOLVE_UTILITY_SCRIPTS_DIR": { - "windows": "{PROGRAMDATA}/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp", - "darwin": "/Library/Application Support/Blackmagic Design/DaVinci Resolve/Fusion/Scripts/Comp", - "linux": "/opt/resolve/Fusion/Scripts/Comp" - }, - "PYTHON36_RESOLVE": { + "RESOLVE_PYTHON3_HOME": { "windows": "{LOCALAPPDATA}/Programs/Python/Python36", "darwin": "~/Library/Python/3.6/bin", "linux": "/opt/Python/3.6/bin" - }, - "PYTHONPATH": [ - "{PYTHON36_RESOLVE}/Lib/site-packages", - "{VIRTUAL_ENV}/Lib/site-packages", - "{PYTHONPATH}", - "{RESOLVE_SCRIPT_API}/Modules", - "{PYTHONPATH}" - ], - "PATH": [ - "{PYTHON36_RESOLVE}", - "{PYTHON36_RESOLVE}/Scripts", - "{PATH}" - ], - "PRE_PYTHON_SCRIPT": "{OPENPYPE_REPOS_ROOT}/openpype/resolve/preload_console.py", - "OPENPYPE_LOG_NO_COLORS": "True", - "RESOLVE_DEV": "True" + } }, "variants": { "stable": { @@ -969,8 +1077,6 @@ }, "variants": { "21": { - "enabled": true, - "variant_label": "21", "executables": { "windows": [ "c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 21 Premium\\win64\\bin\\HarmonyPremium.exe" @@ -988,8 +1094,6 @@ "environment": {} }, "20": { - "enabled": true, - "variant_label": "20", "executables": { "windows": [ "c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 20 Premium\\win64\\bin\\HarmonyPremium.exe" @@ -1007,8 +1111,6 @@ "environment": {} }, "17": { - "enabled": true, - "variant_label": "17", "executables": { "windows": [ "c:\\Program Files (x86)\\Toon Boom Animation\\Toon Boom Harmony 17 Premium\\win64\\bin\\HarmonyPremium.exe" @@ -1138,11 +1240,9 @@ }, "variants": { "2020": { - "enabled": true, - "variant_label": "2020", "executables": { "windows": [ - "" + "C:\\Program Files\\Adobe\\Adobe After Effects 2020\\Support Files\\AfterFX.exe" ], "darwin": [], "linux": [] @@ -1155,8 +1255,6 @@ "environment": {} }, "2021": { - "enabled": true, - "variant_label": "2021", "executables": { "windows": [ "C:\\Program Files\\Adobe\\Adobe After Effects 2021\\Support Files\\AfterFX.exe" @@ -1172,8 +1270,6 @@ "environment": {} }, "2022": { - "enabled": true, - "variant_label": "2022", "executables": { "windows": [ "C:\\Program Files\\Adobe\\Adobe After Effects 2022\\Support Files\\AfterFX.exe" @@ -1201,12 +1297,12 @@ "CELACTION_TEMPLATE": "{OPENPYPE_REPOS_ROOT}/openpype/hosts/celaction/celaction_template_scene.scn" }, "variants": { - "local": { + "current": { "enabled": true, - "variant_label": "Local", + "variant_label": "Current", "use_python_2": false, "executables": { - "windows": [], + "windows": ["C:/Program Files/CelAction/CelAction2D Studio/CelAction2D.exe"], "darwin": [], "linux": [] }, @@ -1226,9 +1322,19 @@ "host_name": "unreal", "environment": {}, "variants": { - "4-26": { + "4-27": { "use_python_2": false, "environment": {} + }, + "5-0": { + "use_python_2": false, + "environment": { + "UE_PYTHONPATH": "{PYTHONPATH}" + } + }, + "__dynamic_keys_labels__": { + "4-27": "4.27", + "5-0": "5.0" } } }, diff --git a/openpype/settings/defaults/system_settings/general.json b/openpype/settings/defaults/system_settings/general.json index 5a3e39e5b6..909ffc1ee4 100644 --- a/openpype/settings/defaults/system_settings/general.json +++ b/openpype/settings/defaults/system_settings/general.json @@ -2,16 +2,14 @@ "studio_name": "Studio name", "studio_code": "stu", "admin_password": "", - "environment": { - "__environment_keys__": { - "global": [] - } - }, + "environment": {}, + "log_to_server": true, "disk_mapping": { "windows": [], "linux": [], "darwin": [] }, + "local_env_white_list": [], "openpype_path": { "windows": [], "darwin": [], diff --git a/openpype/settings/defaults/system_settings/modules.json b/openpype/settings/defaults/system_settings/modules.json index d74269922f..703e72cb5d 100644 --- a/openpype/settings/defaults/system_settings/modules.json +++ b/openpype/settings/defaults/system_settings/modules.json @@ -26,13 +26,14 @@ "linux": [] }, "intent": { + "allow_empty_intent": true, + "empty_intent_label": "", "items": { - "-": "-", "wip": "WIP", "final": "Final", "test": "Test" }, - "default": "-" + "default": "" }, "custom_attributes": { "show": { @@ -59,13 +60,11 @@ "applications": { "write_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ], "read_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ] } }, @@ -73,25 +72,21 @@ "tools_env": { "write_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ], "read_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ] }, "avalon_mongo_id": { "write_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ], "read_security_roles": [ "API", - "Administrator", - "Pypeclub" + "Administrator" ] }, "fps": { @@ -137,6 +132,17 @@ } } }, + "kitsu": { + "enabled": false, + "server": "" + }, + "shotgrid": { + "enabled": false, + "leecher_manager_url": "http://127.0.0.1:3000", + "leecher_backend_url": "http://127.0.0.1:8090", + "filter_projects_by_login": true, + "shotgrid_settings": {} + }, "timers_manager": { "enabled": true, "auto_stop": true, @@ -189,7 +195,7 @@ "enabled": true }, "standalonepublish_tool": { - "enabled": true + "enabled": false }, "project_manager": { "enabled": true diff --git a/openpype/settings/defaults/system_settings/tools.json b/openpype/settings/defaults/system_settings/tools.json index 181236abe8..243cde40cc 100644 --- a/openpype/settings/defaults/system_settings/tools.json +++ b/openpype/settings/defaults/system_settings/tools.json @@ -25,10 +25,18 @@ }, "variants": { "3-2": { - "MTOA_VERSION": "3.2" + "host_names": [], + "app_variants": [], + "environment": { + "MTOA_VERSION": "3.2" + } }, "3-1": { - "MTOA_VERSION": "3.1" + "host_names": [], + "app_variants": [], + "environment": { + "MTOA_VERSION": "3.1" + } }, "__dynamic_keys_labels__": { "3-2": "3.2", @@ -44,10 +52,39 @@ "environment": {}, "variants": {} }, + "renderman": { + "environment": {}, + "variants": { + "24-3-maya": { + "host_names": [ + "maya" + ], + "app_variants": [ + "maya/2022" + ], + "environment": { + "RFMTREE": { + "windows": "C:\\Program Files\\Pixar\\RenderManForMaya-24.3", + "darwin": "/Applications/Pixar/RenderManForMaya-24.3", + "linux": "/opt/pixar/RenderManForMaya-24.3" + }, + "RMANTREE": { + "windows": "C:\\Program Files\\Pixar\\RenderManProServer-24.3", + "darwin": "/Applications/Pixar/RenderManProServer-24.3", + "linux": "/opt/pixar/RenderManProServer-24.3" + } + } + }, + "__dynamic_keys_labels__": { + "24-3-maya": "24.3 RFM" + } + } + }, "__dynamic_keys_labels__": { "mtoa": "Autodesk Arnold", "vray": "Chaos Group Vray", - "yeti": "Pergrine Labs Yeti" + "yeti": "Peregrine Labs Yeti", + "renderman": "Pixar Renderman" } } } \ No newline at end of file diff --git a/openpype/settings/entities/__init__.py b/openpype/settings/entities/__init__.py index a173e2454f..5e3a76094e 100644 --- a/openpype/settings/entities/__init__.py +++ b/openpype/settings/entities/__init__.py @@ -107,6 +107,7 @@ from .enum_entity import ( TaskTypeEnumEntity, DeadlineUrlEnumEntity, AnatomyTemplatesEnumEntity, + ShotgridUrlEnumEntity ) from .list_entity import ListEntity @@ -122,10 +123,7 @@ from .dict_conditional import ( ) from .anatomy_entities import AnatomyEntity -from .op_version_entity import ( - ProductionVersionsInputEntity, - StagingVersionsInputEntity -) +from .op_version_entity import VersionsInputEntity __all__ = ( "DefaultsNotDefined", @@ -171,6 +169,7 @@ __all__ = ( "ToolsEnumEntity", "TaskTypeEnumEntity", "DeadlineUrlEnumEntity", + "ShotgridUrlEnumEntity", "AnatomyTemplatesEnumEntity", "ListEntity", @@ -186,6 +185,5 @@ __all__ = ( "AnatomyEntity", - "ProductionVersionsInputEntity", - "StagingVersionsInputEntity" + "VersionsInputEntity", ) diff --git a/openpype/settings/entities/base_entity.py b/openpype/settings/entities/base_entity.py index 21ee44ae77..f28fefdf5a 100644 --- a/openpype/settings/entities/base_entity.py +++ b/openpype/settings/entities/base_entity.py @@ -15,7 +15,7 @@ from .exceptions import ( EntitySchemaError ) -from openpype.lib import PypeLogger +from openpype.lib import Logger @six.add_metaclass(ABCMeta) @@ -127,12 +127,6 @@ class BaseItemEntity(BaseEntity): # Entity is in hierarchy of dynamically created entity self.is_in_dynamic_item = False - # Entity will save metadata about environments - # - this is current possible only for RawJsonEnity - self.is_env_group = False - # Key of environment group key must be unique across system settings - self.env_group_key = None - # Roles of an entity self.roles = None @@ -286,16 +280,6 @@ class BaseItemEntity(BaseEntity): ).format(self.group_item.path) raise EntitySchemaError(self, reason) - # Validate that env group entities will be stored into file. - # - env group entities must store metadata which is not possible if - # metadata would be outside of file - if self.file_item is None and self.is_env_group: - reason = ( - "Environment item is not inside file" - " item so can't store metadata for defaults." - ) - raise EntitySchemaError(self, reason) - # Dynamic items must not have defined labels. (UI specific) if self.label and self.is_dynamic_item: raise EntitySchemaError( @@ -494,7 +478,7 @@ class BaseItemEntity(BaseEntity): def log(self): """Auto created logger for debugging or warnings.""" if self._log is None: - self._log = PypeLogger.get_logger(self.__class__.__name__) + self._log = Logger.get_logger(self.__class__.__name__) return self._log @abstractproperty @@ -862,11 +846,6 @@ class ItemEntity(BaseItemEntity): if self.is_dynamic_item: self.require_key = False - # If value should be stored to environments and uder which group key - # - the key may be dynamically changed by it's parent on save - self.env_group_key = self.schema_data.get("env_group_key") - self.is_env_group = bool(self.env_group_key is not None) - # Root item reference self.root_item = self.parent.root_item diff --git a/openpype/settings/entities/dict_mutable_keys_entity.py b/openpype/settings/entities/dict_mutable_keys_entity.py index a0c93b97a7..e6d332b9ad 100644 --- a/openpype/settings/entities/dict_mutable_keys_entity.py +++ b/openpype/settings/entities/dict_mutable_keys_entity.py @@ -15,7 +15,6 @@ from .exceptions import ( from openpype.settings.constants import ( METADATA_KEYS, M_DYNAMIC_KEY_LABEL, - M_ENVIRONMENT_KEY, KEY_REGEX, KEY_ALLOWED_SYMBOLS ) @@ -148,11 +147,7 @@ class DictMutableKeysEntity(EndpointEntity): ): raise InvalidKeySymbols(self.path, key) - if self.value_is_env_group: - item_schema = copy.deepcopy(self.item_schema) - item_schema["env_group_key"] = key - else: - item_schema = self.item_schema + item_schema = self.item_schema new_child = self.create_schema_object(item_schema, self, True) self.children_by_key[key] = new_child @@ -216,9 +211,7 @@ class DictMutableKeysEntity(EndpointEntity): self.children_label_by_id = {} self.store_as_list = self.schema_data.get("store_as_list") or False - self.value_is_env_group = ( - self.schema_data.get("value_is_env_group") or False - ) + self.required_keys = self.schema_data.get("required_keys") or [] self.collapsible_key = self.schema_data.get("collapsible_key") or False # GUI attributes @@ -241,9 +234,6 @@ class DictMutableKeysEntity(EndpointEntity): object_type.update(input_modifiers) self.item_schema = object_type - if self.value_is_env_group: - self.item_schema["env_group_key"] = "" - if self.group_item is None: self.is_group = True @@ -259,10 +249,6 @@ class DictMutableKeysEntity(EndpointEntity): if used_temp_label: self.label = None - if self.value_is_env_group and self.store_as_list: - reason = "Item can't store environments metadata to list output." - raise EntitySchemaError(self, reason) - if not self.schema_data.get("object_type"): reason = ( "Modifiable dictionary must have specified `object_type`." @@ -579,18 +565,10 @@ class DictMutableKeysEntity(EndpointEntity): output.append([key, child_value]) return output - output = {} - for key, child_entity in self.children_by_key.items(): - child_value = child_entity.settings_value() - # TODO child should have setter of env group key se child can - # know what env group represents. - if self.value_is_env_group: - if key not in child_value[M_ENVIRONMENT_KEY]: - _metadata = child_value[M_ENVIRONMENT_KEY] - _m_keykey = tuple(_metadata.keys())[0] - env_keys = child_value[M_ENVIRONMENT_KEY].pop(_m_keykey) - child_value[M_ENVIRONMENT_KEY][key] = env_keys - output[key] = child_value + output = { + key: child_entity.settings_value() + for key, child_entity in self.children_by_key.items() + } output.update(self.metadata) return output diff --git a/openpype/settings/entities/enum_entity.py b/openpype/settings/entities/enum_entity.py index 92a397afba..c0c103ea10 100644 --- a/openpype/settings/entities/enum_entity.py +++ b/openpype/settings/entities/enum_entity.py @@ -1,10 +1,7 @@ import copy from .input_entities import InputEntity from .exceptions import EntitySchemaError -from .lib import ( - NOT_SET, - STRING_TYPE -) +from .lib import NOT_SET, STRING_TYPE class BaseEnumEntity(InputEntity): @@ -26,7 +23,7 @@ class BaseEnumEntity(InputEntity): for item in self.enum_items: key = tuple(item.keys())[0] if key in enum_keys: - reason = "Key \"{}\" is more than once in enum items.".format( + reason = 'Key "{}" is more than once in enum items.'.format( key ) raise EntitySchemaError(self, reason) @@ -34,7 +31,7 @@ class BaseEnumEntity(InputEntity): enum_keys.add(key) if not isinstance(key, STRING_TYPE): - reason = "Key \"{}\" has invalid type {}, expected {}.".format( + reason = 'Key "{}" has invalid type {}, expected {}.'.format( key, type(key), STRING_TYPE ) raise EntitySchemaError(self, reason) @@ -59,7 +56,7 @@ class BaseEnumEntity(InputEntity): for item in check_values: if item not in self.valid_keys: raise ValueError( - "{} Invalid value \"{}\". Expected one of: {}".format( + '{} Invalid value "{}". Expected one of: {}'.format( self.path, item, self.valid_keys ) ) @@ -84,7 +81,7 @@ class EnumEntity(BaseEnumEntity): self.valid_keys = set(all_keys) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) value_on_not_set = [] if enum_default: if not isinstance(enum_default, list): @@ -109,7 +106,7 @@ class EnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -152,8 +149,10 @@ class HostsEnumEntity(BaseEnumEntity): Host name is not the same as application name. Host name defines implementation instead of application name. """ + schema_types = ["hosts-enum"] all_host_names = [ + "max", "aftereffects", "blender", "celaction", @@ -169,6 +168,7 @@ class HostsEnumEntity(BaseEnumEntity): "tvpaint", "unreal", "standalonepublisher", + "traypublisher", "webpublisher" ] @@ -210,7 +210,7 @@ class HostsEnumEntity(BaseEnumEntity): self.valid_keys = valid_keys if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: for key in valid_keys: @@ -218,7 +218,7 @@ class HostsEnumEntity(BaseEnumEntity): self.value_on_not_set = key break - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) # GUI attribute self.placeholder = self.schema_data.get("placeholder") @@ -226,14 +226,10 @@ class HostsEnumEntity(BaseEnumEntity): def schema_validations(self): if self.hosts_filter: enum_len = len(self.enum_items) - if ( - enum_len == 0 - or (enum_len == 1 and self.use_empty_value) - ): - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) + if enum_len == 0 or (enum_len == 1 and self.use_empty_value): + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) reason = ( "All host names were removed after applying" " host filters. {}" @@ -246,24 +242,25 @@ class HostsEnumEntity(BaseEnumEntity): invalid_filters.add(item) if invalid_filters: - joined_filters = ", ".join([ - '"{}"'.format(item) - for item in self.hosts_filter - ]) - expected_hosts = ", ".join([ - '"{}"'.format(item) - for item in self.all_host_names - ]) - self.log.warning(( - "Host filters containt invalid host names:" - " \"{}\" Expected values are {}" - ).format(joined_filters, expected_hosts)) + joined_filters = ", ".join( + ['"{}"'.format(item) for item in self.hosts_filter] + ) + expected_hosts = ", ".join( + ['"{}"'.format(item) for item in self.all_host_names] + ) + self.log.warning( + ( + "Host filters containt invalid host names:" + ' "{}" Expected values are {}' + ).format(joined_filters, expected_hosts) + ) super(HostsEnumEntity, self).schema_validations() class AppsEnumEntity(BaseEnumEntity): """Enum of applications for project anatomy attributes.""" + schema_types = ["apps-enum"] def _item_initialization(self): @@ -271,7 +268,7 @@ class AppsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -352,7 +349,7 @@ class ToolsEnumEntity(BaseEnumEntity): self.value_on_not_set = [] self.enum_items = [] self.valid_keys = set() - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.placeholder = None def _get_enum_values(self): @@ -409,10 +406,10 @@ class TaskTypeEnumEntity(BaseEnumEntity): def _item_initialization(self): self.multiselection = self.schema_data.get("multiselection", True) if self.multiselection: - self.valid_value_types = (list, ) + self.valid_value_types = (list,) self.value_on_not_set = [] else: - self.valid_value_types = (STRING_TYPE, ) + self.valid_value_types = (STRING_TYPE,) self.value_on_not_set = "" self.enum_items = [] @@ -507,7 +504,8 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): enum_items_list = [] for server_name, url_entity in deadline_urls_entity.items(): enum_items_list.append( - {server_name: "{}: {}".format(server_name, url_entity.value)}) + {server_name: "{}: {}".format(server_name, url_entity.value)} + ) valid_keys.add(server_name) return enum_items_list, valid_keys @@ -530,6 +528,50 @@ class DeadlineUrlEnumEntity(BaseEnumEntity): self._current_value = tuple(self.valid_keys)[0] +class ShotgridUrlEnumEntity(BaseEnumEntity): + schema_types = ["shotgrid_url-enum"] + + def _item_initialization(self): + self.multiselection = False + + self.enum_items = [] + self.valid_keys = set() + + self.valid_value_types = (STRING_TYPE,) + self.value_on_not_set = "" + + # GUI attribute + self.placeholder = self.schema_data.get("placeholder") + + def _get_enum_values(self): + shotgrid_settings = self.get_entity_from_path( + "system_settings/modules/shotgrid/shotgrid_settings" + ) + + valid_keys = set() + enum_items_list = [] + for server_name, settings in shotgrid_settings.items(): + enum_items_list.append( + { + server_name: "{}: {}".format( + server_name, settings["shotgrid_url"].value + ) + } + ) + valid_keys.add(server_name) + return enum_items_list, valid_keys + + def set_override_state(self, *args, **kwargs): + super(ShotgridUrlEnumEntity, self).set_override_state(*args, **kwargs) + + self.enum_items, self.valid_keys = self._get_enum_values() + if not self.valid_keys: + self._current_value = "" + + elif self._current_value not in self.valid_keys: + self._current_value = tuple(self.valid_keys)[0] + + class AnatomyTemplatesEnumEntity(BaseEnumEntity): schema_types = ["anatomy-templates-enum"] diff --git a/openpype/settings/entities/input_entities.py b/openpype/settings/entities/input_entities.py index 3dcd238672..89f12afd9b 100644 --- a/openpype/settings/entities/input_entities.py +++ b/openpype/settings/entities/input_entities.py @@ -15,10 +15,7 @@ from .exceptions import ( EntitySchemaError ) -from openpype.settings.constants import ( - METADATA_KEYS, - M_ENVIRONMENT_KEY -) +from openpype.settings.constants import METADATA_KEYS class EndpointEntity(ItemEntity): @@ -534,13 +531,7 @@ class RawJsonEntity(InputEntity): @property def metadata(self): - output = {} - if isinstance(self._current_value, dict) and self.is_env_group: - output[M_ENVIRONMENT_KEY] = { - self.env_group_key: list(self._current_value.keys()) - } - - return output + return {} @property def has_unsaved_changes(self): @@ -549,15 +540,6 @@ class RawJsonEntity(InputEntity): result = self.metadata != self._metadata_for_current_state() return result - def schema_validations(self): - if self.store_as_string and self.is_env_group: - reason = ( - "RawJson entity can't store environment group metadata" - " as string." - ) - raise EntitySchemaError(self, reason) - super(RawJsonEntity, self).schema_validations() - def _convert_to_valid_type(self, value): if isinstance(value, STRING_TYPE): try: @@ -583,9 +565,6 @@ class RawJsonEntity(InputEntity): def _settings_value(self): value = super(RawJsonEntity, self)._settings_value() - if self.is_env_group and isinstance(value, dict): - value.update(self.metadata) - if self.store_as_string: return json.dumps(value) return value diff --git a/openpype/settings/entities/op_version_entity.py b/openpype/settings/entities/op_version_entity.py index 782d65a446..f79048222e 100644 --- a/openpype/settings/entities/op_version_entity.py +++ b/openpype/settings/entities/op_version_entity.py @@ -66,24 +66,13 @@ class OpenPypeVersionInput(TextEntity): return super(OpenPypeVersionInput, self).convert_to_valid_type(value) -class ProductionVersionsInputEntity(OpenPypeVersionInput): +class VersionsInputEntity(OpenPypeVersionInput): """Entity meant only for global settings to define production version.""" - schema_types = ["production-versions-text"] + schema_types = ["versions-text"] def _get_openpype_versions(self): - versions = get_remote_versions(staging=False, production=True) + versions = get_remote_versions() if versions is None: return [] versions.append(get_installed_version()) return sorted(versions) - - -class StagingVersionsInputEntity(OpenPypeVersionInput): - """Entity meant only for global settings to define staging version.""" - schema_types = ["staging-versions-text"] - - def _get_openpype_versions(self): - versions = get_remote_versions(staging=True, production=False) - if versions is None: - return [] - return sorted(versions) diff --git a/openpype/settings/entities/root_entities.py b/openpype/settings/entities/root_entities.py index edb4407679..ff76fa5180 100644 --- a/openpype/settings/entities/root_entities.py +++ b/openpype/settings/entities/root_entities.py @@ -52,7 +52,6 @@ from openpype.settings.lib import ( get_available_studio_project_settings_overrides_versions, get_available_studio_project_anatomy_overrides_versions, - find_environments, apply_overrides ) @@ -422,11 +421,6 @@ class RootEntity(BaseItemEntity): """ pass - @abstractmethod - def _validate_defaults_to_save(self, value): - """Validate default values before save.""" - pass - def _save_default_values(self): """Save default values. @@ -435,7 +429,6 @@ class RootEntity(BaseItemEntity): DEFAULTS. """ settings_value = self.settings_value() - self._validate_defaults_to_save(settings_value) defaults_dir = self.defaults_dir() for file_path, value in settings_value.items(): @@ -604,8 +597,6 @@ class SystemSettings(RootEntity): def _save_studio_values(self): settings_value = self.settings_value() - self._validate_duplicated_env_group(settings_value) - self.log.debug("Saving system settings: {}".format( json.dumps(settings_value, indent=4) )) @@ -613,29 +604,6 @@ class SystemSettings(RootEntity): # Reset source version after restart self._source_version = None - def _validate_defaults_to_save(self, value): - """Valiations of default values before save.""" - self._validate_duplicated_env_group(value) - - def _validate_duplicated_env_group(self, value, override_state=None): - """ Validate duplicated environment groups. - - Raises: - DuplicatedEnvGroups: When value contain duplicated env groups. - """ - value = copy.deepcopy(value) - if override_state is None: - override_state = self._override_state - - if override_state is OverrideState.STUDIO: - default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] - final_value = apply_overrides(default_values, value) - else: - final_value = value - - # Check if final_value contain duplicated environment groups - find_environments(final_value) - def _save_project_values(self): """System settings can't have project overrides. @@ -911,10 +879,6 @@ class ProjectSettings(RootEntity): if warnings: raise SaveWarningExc(warnings) - def _validate_defaults_to_save(self, value): - """Valiations of default values before save.""" - pass - def _validate_values_to_save(self, value): pass diff --git a/openpype/settings/entities/schemas/README.md b/openpype/settings/entities/schemas/README.md index fbfd699937..b4c878fe0f 100644 --- a/openpype/settings/entities/schemas/README.md +++ b/openpype/settings/entities/schemas/README.md @@ -46,8 +46,7 @@ }, { "type": "raw-json", "label": "{host_label} Environments", - "key": "{host_name}_environments", - "env_group_key": "{host_name}" + "key": "{host_name}_environments" }, { "type": "path", "key": "{host_name}_executables", @@ -745,6 +744,7 @@ How output of the schema could look like on save: ### label - add label with note or explanations - it is possible to use html tags inside the label +- set `work_wrap` to `true`/`false` if you want to enable word wrapping in UI (default: `false`) ``` { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_main.json b/openpype/settings/entities/schemas/projects_schema/schema_main.json index 8e4eba86ef..0b9fbf7470 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_main.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_main.json @@ -62,6 +62,14 @@ "type": "schema", "name": "schema_project_ftrack" }, + { + "type": "schema", + "name": "schema_project_shotgrid" + }, + { + "type": "schema", + "name": "schema_project_kitsu" + }, { "type": "schema", "name": "schema_project_deadline" @@ -82,6 +90,10 @@ "type": "schema", "name": "schema_project_nuke" }, + { + "type": "schema", + "name": "schema_project_fusion" + }, { "type": "schema", "name": "schema_project_hiero" @@ -126,6 +138,10 @@ "type": "schema", "name": "schema_project_standalonepublisher" }, + { + "type": "schema", + "name": "schema_project_traypublisher" + }, { "type": "schema", "name": "schema_project_webpublisher" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json index 4c4cd225ab..8dc83f5506 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_aftereffects.json @@ -5,6 +5,46 @@ "label": "AfterEffects", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "create", + "label": "Creator plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "RenderCreator", + "label": "Create render", + "children": [ + { + "type": "list", + "key": "defaults", + "label": "Default Variants", + "object_type": "text", + "docstring": "Fill default variant(s) (like 'Main' or 'Default') used in subset name creation." + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json b/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json index af09329a03..725d9bfb08 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_blender.json @@ -5,6 +5,23 @@ "label": "Blender", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, { "type": "schema_template", "name": "template_workfile_options", @@ -12,6 +29,10 @@ "workfile_builder/builder_on_start", "workfile_builder/profiles" ] + }, + { + "type": "schema", + "name": "schema_blender_publish" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_celaction.json b/openpype/settings/entities/schemas/projects_schema/schema_project_celaction.json index 500e5b2298..2320d9ae26 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_celaction.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_celaction.json @@ -5,6 +5,23 @@ "label": "CelAction", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, { "type": "dict", "collapsible": true, @@ -14,45 +31,24 @@ { "type": "dict", "collapsible": true, - "checkbox_key": "enabled", - "key": "ExtractCelactionDeadline", - "label": "ExtractCelactionDeadline", + "key": "CollectRenderPath", + "label": "CollectRenderPath", "is_group": true, "children": [ { - "type": "boolean", - "key": "enabled", - "label": "Enabled" + "type": "text", + "key": "output_extension", + "label": "Output render file extension" }, { "type": "text", - "key": "deadline_department", - "label": "Deadline apartment" - }, - { - "type": "number", - "key": "deadline_priority", - "label": "Deadline priority" + "key": "anatomy_template_key_render_files", + "label": "Anatomy template key: render files" }, { "type": "text", - "key": "deadline_pool", - "label": "Deadline pool" - }, - { - "type": "text", - "key": "deadline_pool_secondary", - "label": "Deadline pool (secondary)" - }, - { - "type": "text", - "key": "deadline_group", - "label": "Deadline Group" - }, - { - "type": "number", - "key": "deadline_chunk_size", - "label": "Deadline Chunk size" + "key": "anatomy_template_key_metadata", + "label": "Anatomy template key: metadata job file" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json index e6097a2b14..03f6489a41 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_deadline.json @@ -30,6 +30,24 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectDeadlinePools", + "label": "Default Deadline Pools", + "children": [ + { + "type": "text", + "key": "primary_pool", + "label": "Primary Pool" + }, + { + "type": "text", + "key": "secondary_pool", + "label": "Secondary Pool" + } + ] + }, { "type": "dict", "collapsible": true, @@ -112,11 +130,26 @@ "key": "use_published", "label": "Use Published scene" }, + { + "type": "boolean", + "key": "import_reference", + "label": "Use Scene with Imported Reference" + }, { "type": "boolean", "key": "asset_dependencies", "label": "Use Asset dependencies" }, + { + "type": "number", + "key": "priority", + "label": "Priority" + }, + { + "type": "number", + "key": "tile_priority", + "label": "Tile Assembler Priority" + }, { "type": "text", "key": "group", @@ -162,6 +195,12 @@ ] } + }, + { + "type": "boolean", + "key": "strict_error_checking", + "label": "Strict Error Checking", + "default": true } ] }, @@ -192,6 +231,9 @@ "key": "use_published", "label": "Use Published scene" }, + { + "type": "splitter" + }, { "type": "number", "key": "priority", @@ -203,20 +245,21 @@ "label": "Chunk Size" }, { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" + "type": "number", + "key": "concurrent_tasks", + "label": "Number of concurrent tasks" }, { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" + "type": "splitter" }, { "type": "text", "key": "group", "label": "Group" }, + { + "type": "splitter" + }, { "type": "text", "key": "department", @@ -289,16 +332,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", @@ -348,16 +381,6 @@ "key": "chunk_size", "label": "Chunk Size" }, - { - "type": "text", - "key": "primary_pool", - "label": "Primary Pool" - }, - { - "type": "text", - "key": "secondary_pool", - "label": "Secondary Pool" - }, { "type": "text", "key": "group", @@ -375,6 +398,56 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "CelactionSubmitDeadline", + "label": "Celaction Submit Deadline", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "deadline_department", + "label": "Deadline apartment" + }, + { + "type": "number", + "key": "deadline_priority", + "label": "Deadline priority" + }, + { + "type": "text", + "key": "deadline_pool", + "label": "Deadline pool" + }, + { + "type": "text", + "key": "deadline_pool_secondary", + "label": "Deadline pool (secondary)" + }, + { + "type": "text", + "key": "deadline_group", + "label": "Deadline Group" + }, + { + "type": "number", + "key": "deadline_chunk_size", + "label": "Deadline Chunk size" + }, + { + "type": "text", + "key": "deadline_job_delay", + "label": "Delay job (timecode dd:hh:mm:ss)" + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json index e352f8b132..0f20c0efbe 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_flame.json @@ -5,6 +5,77 @@ "label": "Flame", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + }, + { + "key": "project", + "type": "dict", + "label": "Project", + "collapsible": false, + "children": [ + { + "type": "form", + "children": [ + { + "type": "text", + "key": "colourPolicy", + "label": "Colour Policy (name or path)" + }, + { + "type": "text", + "key": "frameDepth", + "label": "Image Depth" + }, + { + "type": "text", + "key": "fieldDominance", + "label": "Field Dominance" + } + ] + } + ] + }, + { + "key": "profilesMapping", + "type": "dict", + "label": "Profile names mapping", + "collapsible": true, + "children": [ + { + "type": "list", + "key": "inputs", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "flameName", + "label": "Flame name" + }, + { + "type": "text", + "key": "ocioName", + "label": "OCIO name" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -123,6 +194,21 @@ "type": "number", "key": "handleEnd", "label": "Handle end (tail)" + }, + { + "type": "boolean", + "key": "includeHandles", + "label": "Enable handles including" + }, + { + "type": "boolean", + "key": "retimedHandles", + "label": "Enable retimed handles" + }, + { + "type": "boolean", + "key": "retimedFramerange", + "label": "Enable retimed shot frameranges" } ] } @@ -136,6 +222,87 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectTimelineInstances", + "label": "Collect Timeline Instances", + "is_group": true, + "children": [ + { + "type": "collapsible-wrap", + "label": "XML presets attributes parsable from segment comments", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "xml_preset_attrs_from_comments", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Attribute name" + }, + { + "key": "type", + "label": "Attribute type", + "type": "enum", + "default": "number", + "enum_items": [ + { + "number": "number" + }, + { + "float": "float" + }, + { + "string": "string" + } + ] + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Add tasks", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "add_tasks", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "name", + "label": "Task name" + }, + { + "key": "type", + "label": "Task type", + "multiselection": false, + "type": "task-types-enum" + }, + { + "type": "boolean", + "key": "create_batch_group", + "label": "Create batch group" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -157,25 +324,19 @@ "type": "dict", "children": [ { - "key": "ext", - "label": "Output extension", - "type": "text" + "type": "boolean", + "key": "active", + "label": "Is active", + "default": true }, { - "key": "xml_preset_file", - "label": "XML preset file (with ext)", - "type": "text" - }, - { - "key": "xml_preset_dir", - "label": "XML preset folder (optional)", - "type": "text" + "type": "separator" }, { "key": "export_type", "label": "Eport clip type", "type": "enum", - "default": "File Sequence", + "default": "Sequence Publish", "enum_items": [ { "Movie": "Movie" @@ -187,45 +348,125 @@ "Sequence Publish": "Sequence Publish" } ] - }, { - "type": "separator" + "key": "ext", + "label": "Output extension", + "type": "text", + "default": "exr" }, { - "type": "boolean", - "key": "ignore_comment_attrs", - "label": "Ignore attributes parsed from a segment comments" - }, - { - "type": "separator" + "key": "xml_preset_file", + "label": "XML preset file (with ext)", + "type": "text" }, { "key": "colorspace_out", "label": "Output color (imageio)", - "type": "text" + "type": "text", + "default": "linear" }, { - "type": "separator" + "type": "collapsible-wrap", + "label": "Other parameters", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "xml_preset_dir", + "label": "XML preset folder (optional)", + "type": "text" + }, + { + "type": "separator" + }, + { + "type": "boolean", + "key": "parsed_comment_attrs", + "label": "Include parsed attributes from comments", + "default": false + + }, + { + "type": "separator" + }, + { + "type": "collapsible-wrap", + "label": "Representation", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "boolean", + "key": "representation_add_range", + "label": "Add frame range to representation" + }, + { + "type": "list", + "key": "representation_tags", + "label": "Add representation tags", + "object_type": { + "type": "text", + "multiline": false + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Loading during publish", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "boolean", + "key": "load_to_batch_group", + "label": "Load to batch group reel", + "default": false + }, + { + "type": "text", + "key": "batch_group_loader_name", + "label": "Use loader name" + } + ] + } + + ] }, { - "type": "boolean", - "key": "representation_add_range", - "label": "Add frame range to representation" - }, - { - "type": "list", - "key": "representation_tags", - "label": "Add representation tags", - "object_type": { - "type": "text", - "multiline": false - } + "type": "collapsible-wrap", + "label": "Filtering", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "filter_path_regex", + "label": "Regex in clip path", + "type": "text", + "default": ".*" + } + ] } ] } } ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateBatchGroup", + "label": "IntegrateBatchGroup", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] } ] }, @@ -279,6 +520,70 @@ "type": "text", "key": "clip_name_template", "label": "Clip name template" + }, + { + "type": "text", + "key": "layer_rename_template", + "label": "Layer name template" + }, + { + "type": "list", + "key": "layer_rename_patterns", + "label": "Layer rename patters", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClipBatch", + "label": "Load as clip to current batch", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "reel_name", + "label": "Reel name" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "clip_name_template", + "label": "Clip name template" + }, + { + "type": "text", + "key": "layer_rename_template", + "label": "Layer name template" + }, + { + "type": "list", + "key": "layer_rename_patterns", + "label": "Layer rename patters", + "object_type": "text" } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json index cb59e9d67e..da414cc961 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_ftrack.json @@ -299,24 +299,6 @@ } ] }, - { - "type": "dict", - "key": "first_version_status", - "label": "Set status on first created version", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "text", - "key": "status", - "label": "Status" - } - ] - }, { "type": "dict", "key": "next_task_update", @@ -369,6 +351,97 @@ "key": "name_sorting" } ] + }, + { + "type": "dict", + "key": "transfer_values_of_hierarchical_attributes", + "label": "Action to transfer hierarchical attribute values", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "role_list", + "label": "Roles", + "object_type": "text" + } + ] + }, + { + "key": "create_daily_review_session", + "label": "Create daily review session", + "type": "dict", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled" + }, + { + "type": "list", + "key": "role_list", + "label": "Roles", + "object_type": "text", + "use_label_wrap": true + }, + { + "type": "boolean", + "key": "cycle_enabled", + "label": "Run automatically every day" + }, + { + "type": "separator" + }, + { + "type": "list-strict", + "key": "cycle_hour_start", + "label": "Create daily review session at", + "tooltip": "This may take affect on next day", + "object_types": [ + { + "label": "H:", + "type": "number", + "minimum": 0, + "maximum": 23, + "decimal": 0 + }, { + "label": "M:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + }, { + "label": "S:", + "type": "number", + "minimum": 0, + "maximum": 59, + "decimal": 0 + } + ] + }, + { + "type": "label", + "label": "This can't be overriden per project and any change will take effect on the next day or on restart of event server." + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "review_session_template", + "label": "ReviewSession template", + "placeholder": "Default: {yy}{mm}{dd}" + }, + { + "type": "label", + "label": "Possible formatting keys in template:
- \"project_name\" - <Name of project>
- \"d\" - <Day of month number> in shortest possible way.
- \"dd\" - <Day of month number> with 2 digits.
- \"ddd\" - <Week day name> shortened week day. e.g.: `Mon`, ...
- \"dddd\" - <Week day name> full name of week day. e.g.: `Monday`, ...
- \"m\" - <Month number> in shortest possible way. e.g.: `1` if January
- \"mm\" - <Month number> with 2 digits.
- \"mmm\" - <Month name> shortened month name. e.g.: `Jan`, ...
- \"mmmm\" -<Month name> full month name. e.g.: `January`, ...
- \"yy\" - <Year number> shortened year. e.g.: `19`, `20`, ...
- \"yyyy\" - <Year number> full year. e.g.: `2019`, `2020`, ..." + } + ] } ] }, @@ -725,6 +798,69 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "CollectFtrackCustomAttributeData", + "label": "Collect Custom Attribute Data", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Collect custom attributes from ftrack for ftrack entities that can be used in some templates during publishing." + }, + { + "type": "list", + "key": "custom_attribute_keys", + "label": "Custom attribute keys", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "key": "IntegrateHierarchyToFtrack", + "label": "Integrate Hierarchy to ftrack", + "is_group": true, + "collapsible": true, + "children": [ + { + "type": "label", + "label": "Set task status on new task creation. Ftrack's default status is used otherwise." + }, + { + "type": "list", + "key": "create_task_status_profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "text", + "key": "status_name", + "label": "Status name" + } + ] + } + } + ] + }, { "type": "dict", "collapsible": true, @@ -738,10 +874,15 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "label", + "label": "Template may contain formatting keys intent, comment, host_name, app_name, app_label, published_paths and source." + }, { "type": "text", - "key": "note_with_intent_template", - "label": "Note with intent template" + "key": "note_template", + "label": "Note template", + "multiline": true }, { "type": "list", @@ -751,6 +892,44 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackDescription", + "label": "Integrate Ftrack Description", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Add description to integrated AssetVersion." + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "label", + "label": "Template may contain formatting keys intent and comment." + }, + { + "type": "text", + "key": "description_template", + "label": "Description template" + } + ] + }, { "type": "dict", "collapsible": true, @@ -771,10 +950,25 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateFtrackComponentOverwrite", + "label": "IntegrateFtrackComponentOverwrite", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "dict", "key": "IntegrateFtrackInstance", - "label": "IntegrateFtrackInstance", + "label": "Integrate Ftrack Instance", "is_group": true, "children": [ { @@ -784,6 +978,125 @@ "object_type": { "type": "text" } + }, + { + "type": "boolean", + "key": "keep_first_subset_name_for_review", + "label": "Make subset name as first asset name", + "default": true + }, + { + "type": "list", + "collapsible": true, + "key": "asset_versions_status_profiles", + "label": "AssetVersion status on publish", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "family", + "label": "Family", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "status", + "label": "Status name", + "type": "text" + } + ] + } + }, + { + "key": "additional_metadata_keys", + "label": "Additional metadata keys on components", + "type": "enum", + "multiselection": true, + "enum_items": [ + {"openpype_version": "OpenPype version"}, + {"frame_start": "Frame start"}, + {"frame_end": "Frame end"}, + {"duration": "Duration"}, + {"width": "Resolution width"}, + {"height": "Resolution height"}, + {"fps": "FPS"}, + {"code": "Codec"} + ] + } + ] + }, + { + "type": "dict", + "key": "IntegrateFtrackFarmStatus", + "label": "Integrate Ftrack Farm Status", + "children": [ + { + "type": "label", + "label": "Change status of task when it's subset is submitted to farm" + }, + { + "type": "list", + "collapsible": true, + "key": "farm_status_profiles", + "label": "Farm status profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "key": "subsets", + "label": "Subset names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "status_name", + "label": "Status name", + "type": "text" + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_fusion.json b/openpype/settings/entities/schemas/projects_schema/schema_project_fusion.json new file mode 100644 index 0000000000..8c62d75815 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_fusion.json @@ -0,0 +1,50 @@ +{ + "type": "dict", + "collapsible": true, + "key": "fusion", + "label": "Fusion", + "is_file": true, + "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "collapsible": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + }, + { + "key": "ocio", + "type": "dict", + "label": "OpenColorIO (OCIO)", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Set OCIO variable for Fusion" + }, + { + "type": "label", + "label": "'configFilePath' will be deprecated.
Please move values to : project_settings/{app}/imageio/ocio_config/filepath." + }, + { + "type": "path", + "key": "configFilePath", + "label": "OCIO Config File Path", + "multiplatform": true, + "multipath": true + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_global.json b/openpype/settings/entities/schemas/projects_schema/schema_project_global.json index a8bce47592..6f31f4f685 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_global.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_global.json @@ -5,6 +5,34 @@ "label": "Global", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "key": "ocio_config", + "type": "dict", + "label": "OCIO config", + "collapsible": true, + "children": [ + { + "type": "path", + "key": "filepath", + "label": "Config path", + "multiplatform": false, + "multipath": true + } + ] + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, { "type": "schema", "name": "schema_global_publish" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json index c049ce3084..e6bf835c9f 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_harmony.json @@ -5,6 +5,51 @@ "label": "Harmony", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "load", + "label": "Loader plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "ImageSequenceLoader", + "label": "Load Image Sequence", + "children": [ + { + "type": "list", + "key": "family", + "label": "Families", + "object_type": "text" + }, + { + "type": "list", + "key": "representations", + "label": "Representations", + "object_type": "text" + } + ] + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json index f717eff7dd..03bfb56ad1 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_hiero.json @@ -5,6 +5,128 @@ "label": "Hiero", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "collapsible": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + }, + { + "key": "workfile", + "type": "dict", + "label": "Workfile", + "collapsible": false, + "children": [ + { + "type": "label", + "label": "'ocioconfigpath' will be deprecated.
Please move values to : project_settings/{app}/imageio/ocio_config/filepath." + }, + { + "type": "form", + "children": [ + { + "type": "enum", + "key": "ocioConfigName", + "label": "OpenColorIO Config", + "enum_items": [ + { + "nuke-default": "nuke-default" + }, + { + "aces_1.0.3": "aces_1.0.3" + }, + { + "aces_1.1": "aces_1.1" + }, + { + "custom": "custom" + } + ] + }, + { + "type": "path", + "key": "ocioconfigpath", + "label": "Custom OCIO path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "workingSpace", + "label": "Working Space" + }, + { + "type": "text", + "key": "sixteenBitLut", + "label": "16 Bit Files" + }, + { + "type": "text", + "key": "eightBitLut", + "label": "8 Bit Files" + }, + { + "type": "text", + "key": "floatLut", + "label": "Floating Point Files" + }, + { + "type": "text", + "key": "logLut", + "label": "Log Files" + }, + { + "type": "text", + "key": "viewerLut", + "label": "Viewer" + }, + { + "type": "text", + "key": "thumbnailLut", + "label": "Thumbnails" + } + ] + } + ] + }, + { + "key": "regexInputs", + "type": "dict", + "label": "Colorspace on Inputs by regex detection", + "collapsible": true, + "children": [ + { + "type": "list", + "key": "inputs", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "regex", + "label": "Regex" + }, + { + "type": "text", + "key": "colorspace", + "label": "Colorspace" + } + ] + } + } + ] + } + ] + }, { "type": "dict", "collapsible": true, @@ -19,102 +141,102 @@ "is_group": true, "children": [ { - "type": "collapsible-wrap", - "label": "Shot Hierarchy And Rename Settings", - "collapsible": false, - "children": [ - { - "type": "text", - "key": "hierarchy", - "label": "Shot parent hierarchy" - }, - { - "type": "boolean", - "key": "clipRename", - "label": "Rename clips" - }, - { - "type": "text", - "key": "clipName", - "label": "Clip name template" - }, - { - "type": "number", - "key": "countFrom", - "label": "Count sequence from" - }, - { - "type": "number", - "key": "countSteps", - "label": "Stepping number" - } - ] + "type": "collapsible-wrap", + "label": "Shot Hierarchy And Rename Settings", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "hierarchy", + "label": "Shot parent hierarchy" + }, + { + "type": "boolean", + "key": "clipRename", + "label": "Rename clips" + }, + { + "type": "text", + "key": "clipName", + "label": "Clip name template" + }, + { + "type": "number", + "key": "countFrom", + "label": "Count sequence from" + }, + { + "type": "number", + "key": "countSteps", + "label": "Stepping number" + } + ] }, { - "type": "collapsible-wrap", - "label": "Shot Template Keywords", - "collapsible": false, - "children": [ - { - "type": "text", - "key": "folder", - "label": "{folder}" - }, - { - "type": "text", - "key": "episode", - "label": "{episode}" - }, - { - "type": "text", - "key": "sequence", - "label": "{sequence}" - }, - { - "type": "text", - "key": "track", - "label": "{track}" - }, - { - "type": "text", - "key": "shot", - "label": "{shot}" - } - ] + "type": "collapsible-wrap", + "label": "Shot Template Keywords", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "folder", + "label": "{folder}" + }, + { + "type": "text", + "key": "episode", + "label": "{episode}" + }, + { + "type": "text", + "key": "sequence", + "label": "{sequence}" + }, + { + "type": "text", + "key": "track", + "label": "{track}" + }, + { + "type": "text", + "key": "shot", + "label": "{shot}" + } + ] }, { - "type": "collapsible-wrap", - "label": "Vertical Synchronization Of Attributes", - "collapsible": false, - "children": [ - { - "type": "boolean", - "key": "vSyncOn", - "label": "Enable Vertical Sync" - } - ] + "type": "collapsible-wrap", + "label": "Vertical Synchronization Of Attributes", + "collapsible": false, + "children": [ + { + "type": "boolean", + "key": "vSyncOn", + "label": "Enable Vertical Sync" + } + ] }, { - "type": "collapsible-wrap", - "label": "Shot Attributes", - "collapsible": false, - "children": [ - { - "type": "number", - "key": "workfileFrameStart", - "label": "Workfiles Start Frame" - }, - { - "type": "number", - "key": "handleStart", - "label": "Handle start (head)" - }, - { - "type": "number", - "key": "handleEnd", - "label": "Handle end (tail)" - } - ] + "type": "collapsible-wrap", + "label": "Shot Attributes", + "collapsible": false, + "children": [ + { + "type": "number", + "key": "workfileFrameStart", + "label": "Workfiles Start Frame" + }, + { + "type": "number", + "key": "handleStart", + "label": "Handle start (head)" + }, + { + "type": "number", + "key": "handleEnd", + "label": "Handle end (tail)" + } + ] } ] } @@ -206,6 +328,10 @@ { "type": "schema", "name": "schema_publish_gui_filter" + }, + { + "type": "schema", + "name": "schema_scriptsmenu" } ] -} +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json index cad99dde22..24b06f77db 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_houdini.json @@ -5,27 +5,34 @@ "label": "Houdini", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, + { + "type": "schema", + "name": "schema_houdini_scriptshelf" + }, { "type": "schema", "name": "schema_houdini_create" }, { - "type": "dict", - "collapsible": true, - "key": "publish", - "label": "Publish plugins", - "children": [ - { - "type": "schema_template", - "name": "template_publish_plugin", - "template_data": [ - { - "key": "ValidateContainers", - "label": "ValidateContainers" - } - ] - } - ] + "type": "schema", + "name": "schema_houdini_publish" } ] -} +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json b/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json new file mode 100644 index 0000000000..fb47670e74 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_kitsu.json @@ -0,0 +1,61 @@ +{ + "type": "dict", + "key": "kitsu", + "label": "Kitsu", + "collapsible": true, + "is_file": true, + "children": [ + { + "type": "dict", + "key": "entities_naming_pattern", + "label": "Entities naming pattern", + "children": [ + { + "type": "text", + "key": "episode", + "label": "Episode:" + }, + { + "type": "text", + "key": "sequence", + "label": "Sequence:" + }, + { + "type": "text", + "key": "shot", + "label": "Shot:" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "label", + "label": "Integrator" + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateKitsuNote", + "label": "Integrate Kitsu Note", + "children": [ + { + "type": "boolean", + "key": "set_status_note", + "label": "Set status on note" + }, + { + "type": "text", + "key": "note_status_shortname", + "label": "Note shortname" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json index cc70516c72..47dfb37024 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_maya.json @@ -5,6 +5,104 @@ "label": "Maya", "is_file": true, "children": [ + { + "type": "boolean", + "key": "open_workfile_post_initialization", + "label": "Open Workfile Post Initialization" + }, + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "collapsible": true, + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + }, + { + "key": "colorManagementPreference_v2", + "type": "dict", + "label": "Color Management Preference v2 (Maya 2022+)", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Use Color Management Preference v2" + }, + { + "type": "label", + "label": "'configFilePath' will be deprecated.
Please move values to : project_settings/{app}/imageio/ocio_config/filepath." + }, + { + "type": "path", + "key": "configFilePath", + "label": "OCIO Config File Path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "renderSpace", + "label": "Rendering Space" + }, + { + "type": "text", + "key": "displayName", + "label": "Display" + }, + { + "type": "text", + "key": "viewName", + "label": "View" + } + ] + }, + { + "key": "colorManagementPreference", + "type": "dict", + "label": "Color Management Preference (legacy)", + "collapsible": true, + "children": [ + { + "type": "label", + "label": "'configFilePath' will be deprecated.
Please move values to : project_settings/{app}/imageio/ocio_config/filepath." + }, + { + "type": "path", + "key": "configFilePath", + "label": "OCIO Config File Path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "renderSpace", + "label": "Rendering Space" + }, + { + "type": "text", + "key": "viewTransform", + "label": "Viewer Transform" + } + ] + } + ] + }, + { + "type": "text", + "multiline" : true, + "use_label_wrap": true, + "key": "mel_workspace", + "label": "Maya MEL Workspace" + }, { "type": "dict-modifiable", "key": "ext_mapping", @@ -22,6 +120,12 @@ "label": "Maya Directory Mapping", "is_group": true, "children": [ + { + "type": "boolean", + "key": "use_env_var_as_root", + "label": "Use env var placeholder in referenced paths", + "docstring": "Use ${} placeholder instead of absolute value of a root in referenced filepaths." + }, { "type": "boolean", "key": "enabled", @@ -49,7 +153,11 @@ }, { "type": "schema", - "name": "schema_maya_scriptsmenu" + "name": "schema_scriptsmenu" + }, + { + "type": "schema", + "name": "schema_maya_render_settings" }, { "type": "schema", @@ -67,6 +175,10 @@ "type": "schema", "name": "schema_workfile_build" }, + { + "type": "schema", + "name": "schema_templated_workfile_build" + }, { "type": "schema", "name": "schema_publish_gui_filter" diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json index 9ab5fc65fb..26c64e6219 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_nuke.json @@ -46,6 +46,10 @@ } ] }, + { + "type": "schema", + "name": "schema_nuke_imageio" + }, { "type": "dict", "collapsible": true, @@ -79,6 +83,14 @@ } ] }, + { + "type": "schema", + "name": "schema_scriptsmenu" + }, + { + "type": "schema", + "name": "schema_nuke_scriptsgizmo" + }, { "type": "dict", "collapsible": true, @@ -87,55 +99,172 @@ "children": [ { "type": "dict", - "collapsible": false, + "collapsible": true, "key": "CreateWriteRender", "label": "CreateWriteRender", "is_group": true, "children": [ { "type": "text", - "key": "fpath_template", - "label": "Path template" + "key": "temp_rendering_path_template", + "label": "Temporary rendering path template" }, { "type": "list", - "key": "defaults", - "label": "Subset name defaults", + "key": "default_variants", + "label": "Default variants", "object_type": { "type": "text" } + }, + { + "type": "schema_template", + "name": "template_nuke_write_attrs" + }, + { + "key": "prenodes", + "label": "Pre write nodes", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "nodeclass", + "label": "Node class", + "type": "text" + }, + { + "key": "dependent", + "label": "Outside node dependency", + "type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + } + ] + } } ] }, { "type": "dict", - "collapsible": false, + "collapsible": true, "key": "CreateWritePrerender", "label": "CreateWritePrerender", "is_group": true, "children": [ { "type": "text", - "key": "fpath_template", - "label": "Path template" - }, - { - "type": "boolean", - "key": "use_range_limit", - "label": "Use Frame range limit by default" + "key": "temp_rendering_path_template", + "label": "Temporary rendering path template" }, { "type": "list", - "key": "defaults", - "label": "Subset name defaults", + "key": "default_variants", + "label": "Default variants", "object_type": { "type": "text" } }, { - "type": "boolean", - "key": "reviewable", - "label": "Add reviewable toggle" + "type": "schema_template", + "name": "template_nuke_write_attrs" + }, + { + "key": "prenodes", + "label": "Pre write nodes", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "nodeclass", + "label": "Node class", + "type": "text" + }, + { + "key": "dependent", + "label": "Outside node dependency", + "type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + } + ] + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateWriteImage", + "label": "CreateWriteImage", + "is_group": true, + "children": [ + { + "type": "text", + "key": "temp_rendering_path_template", + "label": "Temporary rendering path template" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "schema_template", + "name": "template_nuke_write_attrs" + }, + { + "key": "prenodes", + "label": "Pre write nodes", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "nodeclass", + "label": "Node class", + "type": "text" + }, + { + "key": "dependent", + "label": "Outside node dependency", + "type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Node knobs", + "key": "knobs" + } + ] + } + ] + } } ] } @@ -155,9 +284,13 @@ "type": "schema_template", "name": "template_workfile_options" }, + { + "type": "schema", + "name": "schema_templated_workfile_build" + }, { "type": "schema", "name": "schema_publish_gui_filter" } ] -} \ No newline at end of file +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json index b499ccc4be..0071e632af 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_photoshop.json @@ -5,6 +5,23 @@ "label": "Photoshop", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, { "type": "dict", "collapsible": true, @@ -42,12 +59,18 @@ "children": [ { "type": "label", - "label": "Set color for publishable layers, set its resulting family and template for subset name. Can create flatten image from published instances" + "label": "Set color for publishable layers, set its resulting family and template for subset name. \nCan create flatten image from published instances.(Applicable only for remote publishing!)" }, { - "type": "boolean", "key": "create_flatten_image", - "label": "Create flatten image" + "label": "Create flatten image", + "type": "enum", + "multiselection": false, + "enum_items": [ + { "flatten_with_images": "Flatten with images" }, + { "flatten_only": "Flatten only" }, + { "no": "No" } + ] }, { "type": "text", @@ -108,6 +131,52 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectInstances", + "label": "Collect Instances", + "children": [ + { + "type": "label", + "label": "Name for flatten image created if no image instance present" + }, + { + "type": "text", + "key": "flatten_subset_template", + "label": "Subset template for flatten image" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CollectReview", + "label": "Collect Review", + "children": [ + { + "type": "boolean", + "key": "publish", + "label": "Active" + } + ] + }, + { + "type": "dict", + "key": "CollectVersion", + "label": "Collect Version", + "children": [ + { + "type": "label", + "label": "Synchronize version for image and review instances by workfile version." + }, + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "schema_template", "name": "template_publish_plugin", @@ -169,6 +238,15 @@ "key": "make_image_sequence", "label": "Makes an image sequence instead of a flatten image" }, + { + "type": "number", + "key": "max_downscale_size", + "label": "Maximum size of sources for review", + "tooltip": "FFMpeg can only handle limited resolution for creation of review and/or thumbnail", + "minimum": 300, + "maximum": 16384, + "decimal": 0 + }, { "type": "dict", "collapsible": false, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json index 68e405b7d7..b326f22394 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_resolve.json @@ -5,6 +5,23 @@ "label": "DaVinci Resolve", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json new file mode 100644 index 0000000000..4faeca89f3 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_shotgrid.json @@ -0,0 +1,98 @@ +{ + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "is_file": true, + "children": [ + { + "type": "number", + "key": "shotgrid_project_id", + "label": "Shotgrid project id" + }, + { + "type": "shotgrid_url-enum", + "key": "shotgrid_server", + "label": "Shotgrid Server" + }, + { + "type": "dict", + "key": "event", + "label": "Event Handler", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, + { + "type": "dict", + "key": "fields", + "label": "Fields Template", + "collapsible": true, + "children": [ + { + "type": "dict", + "key": "asset", + "label": "Asset", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "type", + "label": "Asset Type" + } + ] + }, + { + "type": "dict", + "key": "sequence", + "label": "Sequence", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + } + ] + }, + { + "type": "dict", + "key": "shot", + "label": "Shot", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "episode_link", + "label": "Episode link" + }, + { + "type": "text", + "key": "sequence_link", + "label": "Sequence link" + } + ] + }, + { + "type": "dict", + "key": "task", + "label": "Task", + "collapsible": true, + "children": [ + { + "type": "text", + "key": "step", + "label": "Step link" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json index 37fcaac69f..ae25007683 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_standalonepublisher.json @@ -271,6 +271,11 @@ "label": "Collect Instance Hierarchy", "is_group": true, "children": [ + { + "type": "boolean", + "key": "shot_rename", + "label": "Shot Rename" + }, { "type": "text", "key": "shot_rename_template", @@ -289,7 +294,13 @@ "type": "dict", "key": "shot_add_hierarchy", "label": "Shot hierarchy", + "checkbox_key": "enabled", "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, { "type": "text", "key": "parents_path", @@ -343,8 +354,8 @@ "type": "number", "key": "timeline_frame_start", "label": "Timeline start frame", - "default": 900000, - "minimum": 1, + "default": 90000, + "minimum": 0, "maximum": 10000000 }, { diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json new file mode 100644 index 0000000000..2ef1d2a414 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_traypublisher.json @@ -0,0 +1,351 @@ +{ + "type": "dict", + "collapsible": true, + "key": "traypublisher", + "label": "Tray Publisher", + "is_file": true, + "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, + { + "type": "list", + "collapsible": true, + "key": "simple_creators", + "label": "Creator plugins", + "use_label_wrap": true, + "collapsible_key": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "family", + "label": "Family" + }, + { + "type": "text", + "key": "identifier", + "label": "Identifier", + "placeholder": "< Use 'Family' >", + "tooltip": "All creators must have unique identifier.\nBy default is used 'family' but if you need to have more creators with same families\nyou have to set identifier too." + }, + { + "type": "text", + "key": "label", + "label": "Label" + }, + { + "type": "text", + "key": "icon", + "label": "Icon" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "description", + "label": "Description" + }, + { + "type": "text", + "key": "detailed_description", + "label": "Detailed Description", + "multiline": true + }, + { + "type": "separator" + }, + { + "key": "allow_sequences", + "label": "Allow sequences", + "type": "boolean" + }, + { + "key": "allow_multiple_items", + "label": "Allow multiple items", + "type": "boolean" + }, + { + "type": "list", + "key": "extensions", + "label": "Extensions", + "use_label_wrap": true, + "collapsible_key": true, + "collapsed": false, + "object_type": "text" + } + ] + } + }, + { + "type": "dict", + "collapsible": true, + "key": "editorial_creators", + "label": "Editorial creator plugins", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + { + "type": "dict", + "collapsible": true, + "key": "editorial_simple", + "label": "Editorial simple creator", + "use_label_wrap": true, + "collapsible_key": true, + "children": [ + + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "splitter" + }, + { + "type": "collapsible-wrap", + "label": "Shot metadata creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "key": "clip_name_tokenizer", + "label": "Clip name tokenizer", + "type": "dict-modifiable", + "highlight_content": true, + "tooltip": "Using Regex expression to create tokens. \nThose can be used later in \"Shot rename\" creator \nor \"Shot hierarchy\". \n\nTokens should be decorated with \"_\" on each side", + "object_type": { + "type": "text" + } + }, + { + "type": "dict", + "key": "shot_rename", + "label": "Shot rename", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "shot_rename_template", + "label": "Shot rename template", + "tooltip":"Template only supports Anatomy keys and Tokens \nfrom \"Clip name tokenizer\"" + } + ] + }, + { + "type": "dict", + "key": "shot_hierarchy", + "label": "Shot hierarchy", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "parents_path", + "label": "Parents path template", + "tooltip": "Using keys from \"Token to parent convertor\" or tokens directly" + }, + { + "key": "parents", + "label": "Token to parent convertor", + "type": "list", + "highlight_content": true, + "tooltip": "The left side is key to be used in template. \nThe right is value build from Tokens comming from \n\"Clip name tokenizer\"", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "type", + "label": "Parent type", + "enum_items": [ + {"Project": "Project"}, + {"Folder": "Folder"}, + {"Episode": "Episode"}, + {"Sequence": "Sequence"} + ] + }, + { + "type": "text", + "key": "name", + "label": "Parent token name", + "tooltip": "Unique name used in \"Parent path template\"" + }, + { + "type": "text", + "key": "value", + "label": "Parent name value", + "tooltip": "Template where any text, Anatomy keys and Tokens could be used" + } + ] + } + } + ] + }, + { + "key": "shot_add_tasks", + "label": "Add tasks to shot", + "type": "dict-modifiable", + "highlight_content": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "task-types-enum", + "key": "type", + "label": "Task type", + "multiselection": false + } + ] + } + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Shot's subset creator", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "list", + "key": "family_presets", + "label": "Family presets", + "object_type": { + "type": "dict", + "children": [ + { + "type": "enum", + "key": "family", + "label": "Family", + "enum_items": [ + {"review": "review"}, + {"plate": "plate"}, + {"audio": "audio"} + ] + }, + { + "type": "text", + "key": "variant", + "label": "Variant", + "placeholder": "< Inherited >" + }, + { + "type": "boolean", + "key": "review", + "label": "Review", + "default": true + }, + { + "type": "enum", + "key": "output_file_type", + "label": "Integrating file type", + "enum_items": [ + {".mp4": "MP4"}, + {".mov": "MOV"}, + {".wav": "WAV"} + ] + } + ] + } + } + ] + } + ] + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "BatchMovieCreator", + "label": "Batch Movie Creator", + "collapsible_key": true, + "children": [ + { + "type": "label", + "label": "Allows to publish multiple video files in one go.
Name of matching asset is parsed from file names ('asset.mov', 'asset_v001.mov', 'my_asset_to_publish.mov')" + }, + { + "type": "list", + "key": "default_variants", + "label": "Default variants", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "default_tasks", + "label": "Default tasks", + "object_type": { + "type": "text" + } + }, + { + "type": "list", + "key": "extensions", + "label": "Extensions", + "use_label_wrap": true, + "collapsible_key": true, + "collapsed": false, + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "schema_template", + "name": "template_validate_plugin", + "template_data": [ + { + "key": "ValidateFrameRange", + "label": "Validate frame range" + } + ] + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json index 8286ed1193..db38c938dc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_tvpaint.json @@ -5,6 +5,23 @@ "label": "TVPaint", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, { "type": "boolean", "key": "stop_timer_on_application_exit", @@ -16,6 +33,30 @@ "key": "publish", "label": "Publish plugins", "children": [ + { + "type": "dict", + "collapsible": true, + "key": "CollectRenderScene", + "label": "Collect Render Scene", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "It is possible to fill 'render_layer' or 'variant' in subset name template with custom value.
- value of 'render_pass' is always \"beauty\"." + }, + { + "type": "text", + "key": "render_layer", + "label": "Render Layer" + } + ] + }, { "type": "dict", "collapsible": true, @@ -32,6 +73,18 @@ "key": "review_bg", "label": "Review BG color", "use_alpha": false + }, + { + "type": "enum", + "key": "families_to_review", + "label": "Families to review", + "multiselection": true, + "enum_items": [ + {"review": "review"}, + {"renderpass": "renderPass"}, + {"renderlayer": "renderLayer"}, + {"renderscene": "renderScene"} + ] } ] }, @@ -78,6 +131,47 @@ "docstring": "Validate if shot on instances metadata is same as workfiles shot" } ] + }, + { + "type": "dict", + "key": "ExtractConvertToEXR", + "label": "Extract Convert To EXR", + "is_group": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "WARNING: This plugin does not work on MacOS (using OIIO tool)." + }, + { + "type": "boolean", + "key": "replace_pngs", + "label": "Replace source PNG" + }, + { + "type": "enum", + "key": "exr_compression", + "label": "EXR Compression", + "multiselection": false, + "enum_items": [ + {"ZIP": "ZIP"}, + {"ZIPS": "ZIPS"}, + {"DWAA": "DWAA"}, + {"DWAB": "DWAB"}, + {"PIZ": "PIZ"}, + {"RLE": "RLE"}, + {"PXR24": "PXR24"}, + {"B44": "B44"}, + {"B44A": "B44A"}, + {"none": "None"} + ] + } + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json index 4e197e9fc8..8988dd2ff0 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_unreal.json @@ -5,6 +5,33 @@ "label": "Unreal Engine", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, + { + "type": "boolean", + "key": "level_sequences_for_layouts", + "label": "Generate level sequences when loading layouts" + }, + { + "type": "boolean", + "key": "delete_unmatched_assets", + "label": "Delete assets that are not matched" + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json index b76a0fa844..66ccca644d 100644 --- a/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json +++ b/openpype/settings/entities/schemas/projects_schema/schema_project_webpublisher.json @@ -5,6 +5,55 @@ "label": "Web Publisher", "is_file": true, "children": [ + { + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] + }, + { + "type": "list", + "collapsible": true, + "use_label_wrap": true, + "key": "timeout_profiles", + "label": "Timeout profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "hosts", + "label": "Host names", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum", + "multiselection": true + }, + { + "type": "separator" + }, + { + "type": "number", + "key": "timeout", + "label": "Timeout (sec)" + } + ] + } + }, { "type": "dict", "collapsible": true, @@ -17,6 +66,19 @@ "key": "CollectPublishedFiles", "label": "Collect Published Files", "children": [ + { + "type": "label", + "label": "Select if all versions of published items should be kept same. (As max(published) + 1.)" + }, + { + "type": "boolean", + "key": "sync_next_version", + "label": "Sync next publish version" + }, + { + "type": "label", + "label": "Configure resulting family and tags on representation based on uploaded file and task.
Eg. '.png' is uploaded >> create instance of 'render' family
'Create review' in Tags >> mark representation to create review from." + }, { "type": "dict-modifiable", "collapsible": true, @@ -42,6 +104,9 @@ "label": "Extensions", "object_type": "text" }, + { + "type": "separator" + }, { "type": "list", "key": "families", @@ -52,9 +117,6 @@ "type": "schema", "name": "schema_representation_tags" }, - { - "type": "separator" - }, { "type": "text", "key": "result_family", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json index a2a566da0e..3667c9d5d8 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_attributes.json @@ -16,22 +16,26 @@ { "type": "number", "key": "frameStart", - "label": "Frame Start" + "label": "Frame Start", + "maximum": 999999999 }, { "type": "number", "key": "frameEnd", - "label": "Frame End" + "label": "Frame End", + "maximum": 999999999 }, { "type": "number", "key": "clipIn", - "label": "Clip In" + "label": "Clip In", + "maximum": 999999999 }, { "type": "number", "key": "clipOut", - "label": "Clip Out" + "label": "Clip Out", + "maximum": 999999999 }, { "type": "number", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json index 9f142bad09..93b6adae6b 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_anatomy_imageio.json @@ -1,10 +1,14 @@ { "type": "dict", "key": "imageio", - "label": "Color Management and Output Formats", + "label": "Color Management and Output Formats (Deprecated)", "is_file": true, "is_group": true, "children": [ + { + "type": "label", + "label": "These settings are deprecated and have moved to: project_settings/{app}/imageio.
You can right click to copy each host's values and paste them to apply to each host as needed.
Changing these values here will not do anything." + }, { "key": "hiero", "type": "dict", @@ -253,7 +257,7 @@ { "key": "requiredNodes", "type": "list", - "label": "Required Nodes", + "label": "Plugin required", "object_type": { "type": "dict", "children": [ @@ -272,35 +276,26 @@ "label": "Nuke Node Class" }, { - "type": "splitter" - }, - { - "key": "knobs", - "label": "Knobs", - "type": "list", - "object_type": { - "type": "dict", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "text", - "key": "value", - "label": "Value" - } - ] - } + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs", + "key": "knobs" + } + ] } + ] } }, + { + "type": "splitter" + }, { "type": "list", - "key": "customNodes", - "label": "Custom Nodes", + "key": "overrideNodes", + "label": "Plugin's node overrides", "object_type": { "type": "dict", "children": [ @@ -319,27 +314,20 @@ "label": "Nuke Node Class" }, { - "type": "splitter" + "key": "subsets", + "label": "Subsets", + "type": "list", + "object_type": "text" }, { - "key": "knobs", - "label": "Knobs", - "type": "list", - "object_type": { - "type": "dict", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "text", - "key": "value", - "label": "Value" - } - ] - } + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs overrides", + "key": "knobs" + } + ] } ] } @@ -446,7 +434,7 @@ { "key": "flame", "type": "dict", - "label": "Flame/Flair", + "label": "Flame & Flare", "children": [ { "key": "project", diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_blender_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_blender_publish.json new file mode 100644 index 0000000000..53949f65cb --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_blender_publish.json @@ -0,0 +1,117 @@ +{ + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "label", + "label": "Validators" + }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateCameraZeroKeyframe", + "label": "Validate Camera Zero Keyframe" + } + ] + }, + { + "type": "collapsible-wrap", + "label": "Model", + "children": [ + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateMeshHasUvs", + "label": "Validate Mesh Has UVs" + }, + { + "key": "ValidateMeshNoNegativeScale", + "label": "Validate Mesh No Negative Scale" + }, + { + "key": "ValidateTransformZero", + "label": "Validate Transform Zero" + }, + { + "key": "ValidateNoColonsInName", + "label": "Validate No Colons In Name" + } + ] + } + ] + }, + { + "type": "splitter" + }, + { + "type": "label", + "label": "Extractors" + }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractBlend", + "label": "Extract Blend", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ExtractFBX", + "label": "Extract FBX (model and rig)" + }, + { + "key": "ExtractABC", + "label": "Extract ABC (model and pointcache)" + }, + { + "key": "ExtractBlendAnimation", + "label": "Extract Animation as Blend" + }, + { + "key": "ExtractAnimationFBX", + "label": "Extract Animation as FBX" + }, + { + "key": "ExtractCamera", + "label": "Extract FBX Camera as FBX" + }, + { + "key": "ExtractLayout", + "label": "Extract Layout as JSON" + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json index 12043d4205..5388d04bc9 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_publish.json @@ -18,6 +18,27 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "CollectAudio", + "label": "Collect Audio", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "audio_subset_name", + "label": "Name of audio variant", + "type": "text", + "placeholder": "audioMain" + } + ] + }, { "type": "dict", "collapsible": true, @@ -39,6 +60,27 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "collect_comment_per_instance", + "label": "Collect comment per instance", + "checkbox_key": "enabled", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, { "type": "dict", "collapsible": true, @@ -126,34 +168,8 @@ "type": "dict", "collapsible": true, "checkbox_key": "enabled", - "key": "IntegrateHeroVersion", - "label": "IntegrateHeroVersion", - "is_group": true, - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "boolean", - "key": "optional", - "label": "Optional" - }, - { - "key": "families", - "label": "Families", - "type": "list", - "object_type": "text" - } - ] - }, - { - "type": "dict", - "collapsible": true, - "checkbox_key": "enabled", - "key": "ExtractJpegEXR", - "label": "ExtractJpegEXR", + "key": "ExtractThumbnail", + "label": "ExtractThumbnail", "is_group": true, "children": [ { @@ -300,6 +316,29 @@ "label": "Subsets", "type": "list", "object_type": "text" + }, + { + "type": "separator" + }, + { + "key": "custom_tags", + "label": "Custom Tags", + "type": "list", + "object_type": "text" + }, + { + "type": "label", + "label": "Use output always / only if input is 1 frame image / only if has 2+ frames or is video" + }, + { + "type": "enum", + "key": "single_frame_filter", + "default": "everytime", + "enum_items": [ + {"everytime": "Always"}, + {"single_frame": "Only if input has 1 image frame"}, + {"multi_frame": "Only if input is video or sequence of frames"} + ] } ] }, @@ -345,6 +384,15 @@ "minimum": 0, "maximum": 100000 }, + { + "type": "label", + "label": "Rescale input when it's pixel aspect ratio is not 1. Usefull for anamorph reviews." + }, + { + "key": "scale_pixel_aspect", + "label": "Scale pixel aspect", + "type": "boolean" + }, { "type": "label", "label": "Background color is used only when input have transparency and Alpha is higher than 0." @@ -478,11 +526,28 @@ "object_type": "text" }, { - "type": "hosts-enum", "key": "hosts", - "label": "Hosts", + "label": "Host names", + "type": "hosts-enum", "multiselection": true }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "key": "subsets", + "label": "Subset names", + "type": "list", + "object_type": "text" + }, { "type": "splitter" }, @@ -554,22 +619,28 @@ { "type": "dict", "collapsible": true, - "key": "IntegrateAssetNew", - "label": "IntegrateAssetNew", + "key": "PreIntegrateThumbnails", + "label": "Override Integrate Thumbnail Representations", "is_group": true, + "checkbox_key": "enabled", "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "label", + "label": "Explicitly set if Thumbnail representation should be integrated into DB.
If no matching profile set, existing state from Host implementation is kept." + }, { "type": "list", - "key": "template_name_profiles", - "label": "Template name profiles", + "key": "integrate_profiles", + "label": "Integrate profiles", "use_label_wrap": true, "object_type": { "type": "dict", "children": [ - { - "type": "label", - "label": "" - }, { "key": "families", "label": "Families", @@ -588,22 +659,37 @@ "type": "task-types-enum" }, { - "key": "tasks", + "key": "task_names", "label": "Task names", "type": "list", "object_type": "text" }, + { + "key": "subsets", + "label": "Subset names", + "type": "list", + "object_type": "text" + }, { "type": "separator" }, { - "type": "text", - "key": "template_name", - "label": "Template name" + "type": "boolean", + "key": "integrate_thumbnail", + "label": "Integrate thumbnail" } ] } - }, + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateSubsetGroup", + "label": "Integrate Subset Group", + "is_group": true, + "children": [ { "type": "list", "key": "subset_grouping_profiles", @@ -652,6 +738,220 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateAssetNew", + "label": "IntegrateAsset (Legacy)", + "is_group": true, + "children": [ + { + "type": "label", + "label": "NOTE: Subset grouping profiles settings were moved to Integrate Subset Group. Please move values there." + }, + { + "type": "list", + "key": "subset_grouping_profiles", + "label": "Subset grouping profiles (DEPRECATED)", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template", + "label": "Template" + } + ] + } + }, + { + "type": "label", + "label": "NOTE: Publish template profiles settings were moved to Tools/Publish/Template name profiles. Please move values there." + }, + { + "type": "list", + "key": "template_name_profiles", + "label": "Template name profiles (DEPRECATED)", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "label", + "label": "" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "tasks", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name" + } + ] + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "IntegrateAsset", + "label": "Integrate Asset", + "is_group": true, + "children": [ + { + "type": "list", + "key": "skip_host_families", + "label": "Skip hosts and families", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "hosts-enum", + "key": "host", + "label": "Host" + }, + { + "type": "list", + "key": "families", + "label": "Families", + "object_type": "text" + } + ] + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "IntegrateHeroVersion", + "label": "IntegrateHeroVersion", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "label", + "label": "NOTE: Hero publish template profiles settings were moved to Tools/Publish/Hero template name profiles. Please move values there." + }, + { + "type": "list", + "key": "template_name_profiles", + "label": "Template name profiles (DEPRECATED)", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name", + "tooltip": "Name of template from Anatomy templates" + } + ] + } + } + ] + }, { "type": "dict", "collapsible": true, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json index f8c9482e5f..962008d476 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_global_tools.json @@ -149,6 +149,11 @@ "type": "boolean", "key": "enabled", "label": "Enabled" + }, + { + "type": "boolean", + "key": "use_last_published_workfile", + "label": "Use last published workfile" } ] } @@ -238,6 +243,31 @@ } ] } + }, + { + "type": "list", + "key": "workfile_lock_profiles", + "label": "Workfile lock profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "hosts-enum", + "key": "host_name", + "label": "Hosts", + "multiselection": true + }, + { + "type": "splitter" + }, + { + "key": "enabled", + "label": "Enabled", + "type": "boolean" + } + ] + } } ] }, @@ -284,6 +314,102 @@ } } ] + }, + { + "type": "dict", + "key": "publish", + "label": "Publish", + "children": [ + { + "type": "label", + "label": "NOTE: For backwards compatibility can be value empty and in that case are used values from IntegrateAssetNew. This will change in future so please move all values here as soon as possible." + }, + { + "type": "list", + "key": "template_name_profiles", + "label": "Template name profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name" + } + ] + } + }, + { + "type": "list", + "key": "hero_template_name_profiles", + "label": "Hero template name profiles", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, + { + "type": "hosts-enum", + "key": "hosts", + "label": "Hosts", + "multiselection": true + }, + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "template_name", + "label": "Template name", + "tooltip": "Name of template from Anatomy templates" + } + ] + } + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_publish.json new file mode 100644 index 0000000000..aa6eaf5164 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_publish.json @@ -0,0 +1,50 @@ +{ + "type": "dict", + "collapsible": true, + "key": "publish", + "label": "Publish plugins", + "children": [ + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ValidateWorkfilePaths", + "label": "Validate Workfile Paths", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "key": "node_types", + "label": "Node types", + "type": "list", + "object_type": "text" + }, + { + "key": "prohibited_vars", + "label": "Prohibited variables", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateContainers", + "label": "ValidateContainers" + } + ] + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_scriptshelf.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_scriptshelf.json new file mode 100644 index 0000000000..bab9b604b4 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_houdini_scriptshelf.json @@ -0,0 +1,71 @@ +{ + "type": "list", + "key": "shelves", + "label": "Shelves Manager", + "is_group": true, + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "shelf_set_name", + "label": "Shelf Set Name" + }, + { + "type": "path", + "key": "shelf_set_source_path", + "label": "Shelf Set Path (optional)", + "multipath": false, + "multiplatform": true + }, + { + "type": "list", + "key": "shelf_definition", + "label": "Shelves", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "shelf_name", + "label": "Shelf Name" + }, + { + "type": "list", + "key": "tools_list", + "label": "Tools", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "label", + "label": "Name" + }, + { + "type": "path", + "key": "script", + "label": "Script" + }, + { + "type": "path", + "key": "icon", + "label": "Icon" + }, + { + "type": "text", + "key": "help", + "label": "Help" + } + ] + } + } + ] + } + } + ] + } +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_imageio_config.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_imageio_config.json new file mode 100644 index 0000000000..e7cff969d3 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_imageio_config.json @@ -0,0 +1,21 @@ +{ + "key": "ocio_config", + "type": "dict", + "label": "OCIO config", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "path", + "key": "filepath", + "label": "Config path", + "multiplatform": false, + "multipath": true + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_imageio_file_rules.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_imageio_file_rules.json new file mode 100644 index 0000000000..a171ba1c55 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_imageio_file_rules.json @@ -0,0 +1,41 @@ +{ + "key": "file_rules", + "type": "dict", + "label": "File Rules", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "rules", + "label": "Rules", + "type": "dict-modifiable", + "highlight_content": true, + "collapsible": false, + "object_type": { + "type": "dict", + "children": [ + { + "key": "pattern", + "label": "Regex pattern", + "type": "text" + }, + { + "key": "colorspace", + "label": "Colorspace name", + "type": "text" + }, + { + "key": "ext", + "label": "File extension", + "type": "text" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json index d6b81c8687..62c33f55fc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_capture.json @@ -94,18 +94,6 @@ } ] }, - - { - "type": "dict", - "key": "PanZoom", - "children": [ - { - "type": "boolean", - "key": "pan_zoom", - "label": " Pan Zoom" - } - ] - }, { "type": "splitter" }, @@ -153,19 +141,6 @@ "decimal": 0, "minimum": 0, "maximum": 99999 - }, - { - "type": "number", - "key": "percent", - "label": "percent", - "decimal": 1, - "minimum": 0, - "maximum": 200 - }, - { - "type": "text", - "key": "mode", - "label": "Mode" } ] }, @@ -195,12 +170,46 @@ { "nolights": "No Lights"} ] }, + { + "type": "boolean", + "key": "displayTextures", + "label": "Display Textures" + }, { "type": "number", "key": "textureMaxResolution", "label": "Texture Clamp Resolution", "decimal": 0 }, + { + "type": "splitter" + }, + { + "type":"boolean", + "key": "renderDepthOfField", + "label": "Depth of Field" + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "shadows", + "label": "Display Shadows" + }, + { + "type": "boolean", + "key": "twoSidedLighting", + "label": "Two Sided Lighting" + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "lineAAEnable", + "label": "Enable Anti-Aliasing" + }, { "type": "number", "key": "multiSample", @@ -210,142 +219,239 @@ "maximum": 32 }, { - "type": "boolean", - "key": "shadows", - "label": "Display Shadows" - }, - { - "type": "boolean", - "key": "textures", - "label": "Display Textures" - }, - { - "type": "boolean", - "key": "twoSidedLighting", - "label": "Two Sided Lighting" + "type": "splitter" }, { "type": "boolean", "key": "ssaoEnable", "label": "Screen Space Ambient Occlusion" }, + { + "type": "number", + "key": "ssaoAmount", + "label": "SSAO Amount" + }, + { + "type": "number", + "key": "ssaoRadius", + "label": "SSAO Radius" + }, + { + "type": "number", + "key": "ssaoFilterRadius", + "label": "SSAO Filter Radius", + "decimal": 0, + "minimum": 1, + "maximum": 32 + }, + { + "type": "number", + "key": "ssaoSamples", + "label": "SSAO Samples", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, { "type": "splitter" }, + { + "type": "boolean", + "key": "fogging", + "label": "Enable Hardware Fog" + }, + { + "type": "enum", + "key": "hwFogFalloff", + "label": "Hardware Falloff", + "enum_items": [ + { "0": "Linear"}, + { "1": "Exponential"}, + { "2": "Exponential Squared"} + ] + }, + { + "type": "number", + "key": "hwFogDensity", + "label": "Fog Density", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogStart", + "label": "Fog Start" + }, + { + "type": "number", + "key": "hwFogEnd", + "label": "Fog End" + }, + { + "type": "number", + "key": "hwFogAlpha", + "label": "Fog Alpha" + }, + { + "type": "number", + "key": "hwFogColorR", + "label": "Fog Color R", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorG", + "label": "Fog Color G", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "number", + "key": "hwFogColorB", + "label": "Fog Color B", + "decimal": 2, + "minimum": 0, + "maximum": 1 + }, + { + "type": "splitter" + }, + { + "type": "boolean", + "key": "motionBlurEnable", + "label": "Enable Motion Blur" + }, + { + "type": "number", + "key": "motionBlurSampleCount", + "label": "Motion Blur Sample Count", + "decimal": 0, + "minimum": 8, + "maximum": 32 + }, + { + "type": "number", + "key": "motionBlurShutterOpenFraction", + "label": "Shutter Open Fraction", + "decimal": 3, + "minimum": 0.01, + "maximum": 32 + }, + { + "type": "splitter" + }, + { + "type": "label", + "label": "Show" + }, { "type": "boolean", "key": "cameras", - "label": "cameras" + "label": "Cameras" }, { "type": "boolean", "key": "clipGhosts", - "label": "clipGhosts" - }, - { - "type": "boolean", - "key": "controlVertices", - "label": "controlVertices" + "label": "Clip Ghosts" }, { "type": "boolean", "key": "deformers", - "label": "deformers" + "label": "Deformers" }, { "type": "boolean", "key": "dimensions", - "label": "dimensions" + "label": "Dimensions" }, { "type": "boolean", "key": "dynamicConstraints", - "label": "dynamicConstraints" + "label": "Dynamic Constraints" }, { "type": "boolean", "key": "dynamics", - "label": "dynamics" + "label": "Dynamics" }, { "type": "boolean", "key": "fluids", - "label": "fluids" + "label": "Fluids" }, { "type": "boolean", "key": "follicles", - "label": "follicles" + "label": "Follicles" }, { "type": "boolean", "key": "gpuCacheDisplayFilter", - "label": "gpuCacheDisplayFilter" + "label": "GPU Cache" }, { "type": "boolean", "key": "greasePencils", - "label": "greasePencils" + "label": "Grease Pencil" }, { "type": "boolean", "key": "grid", - "label": "grid" + "label": "Grid" }, { "type": "boolean", "key": "hairSystems", - "label": "hairSystems" + "label": "Hair Systems" }, { "type": "boolean", "key": "handles", - "label": "handles" + "label": "Handles" }, { "type": "boolean", - "key": "hud", - "label": "hud" - }, - { - "type": "boolean", - "key": "hulls", - "label": "hulls" + "key": "headsUpDisplay", + "label": "HUD" }, { "type": "boolean", "key": "ikHandles", - "label": "ikHandles" + "label": "IK Handles" }, { "type": "boolean", "key": "imagePlane", - "label": "imagePlane" + "label": "Image Planes" }, { "type": "boolean", "key": "joints", - "label": "joints" + "label": "Joints" }, { "type": "boolean", "key": "lights", - "label": "lights" + "label": "Lights" }, { "type": "boolean", "key": "locators", - "label": "locators" + "label": "Locators" }, { "type": "boolean", "key": "manipulators", - "label": "manipulators" + "label": "Manipulators" }, { "type": "boolean", "key": "motionTrails", - "label": "motionTrails" + "label": "Motion Trails" }, { "type": "boolean", @@ -362,50 +468,65 @@ "key": "nRigids", "label": "nRigids" }, + { + "type": "boolean", + "key": "controlVertices", + "label": "NURBS CVs" + }, { "type": "boolean", "key": "nurbsCurves", - "label": "nurbsCurves" + "label": "NURBS Curves" + }, + { + "type": "boolean", + "key": "hulls", + "label": "NURBS Hulls" }, { "type": "boolean", "key": "nurbsSurfaces", - "label": "nurbsSurfaces" + "label": "NURBS Surfaces" }, { "type": "boolean", "key": "particleInstancers", - "label": "particleInstancers" + "label": "Particle Instancers" }, { "type": "boolean", "key": "pivots", - "label": "pivots" + "label": "Pivots" }, { "type": "boolean", "key": "planes", - "label": "planes" + "label": "Planes" }, { "type": "boolean", "key": "pluginShapes", - "label": "pluginShapes" + "label": "Plugin Shapes" }, { "type": "boolean", "key": "polymeshes", - "label": "polymeshes" + "label": "Polygons" }, { "type": "boolean", "key": "strokes", - "label": "strokes" + "label": "Strokes" }, { "type": "boolean", "key": "subdivSurfaces", - "label": "subdivSurfaces" + "label": "Subdiv Surfaces" + }, + { + "type": "boolean", + "key": "textures", + "label": "Texture Placements" } ] }, @@ -418,47 +539,47 @@ { "type": "boolean", "key": "displayGateMask", - "label": "displayGateMask" + "label": "Display Gate Mask" }, { "type": "boolean", "key": "displayResolution", - "label": "displayResolution" + "label": "Display Resolution" }, { "type": "boolean", "key": "displayFilmGate", - "label": "displayFilmGate" + "label": "Display Film Gate" }, { "type": "boolean", "key": "displayFieldChart", - "label": "displayFieldChart" + "label": "Display Field Chart" }, { "type": "boolean", "key": "displaySafeAction", - "label": "displaySafeAction" + "label": "Display Safe Action" }, { "type": "boolean", "key": "displaySafeTitle", - "label": "displaySafeTitle" + "label": "Display Safe Title" }, { "type": "boolean", "key": "displayFilmPivot", - "label": "displayFilmPivot" + "label": "Display Film Pivot" }, { "type": "boolean", "key": "displayFilmOrigin", - "label": "displayFilmOrigin" + "label": "Display Film Origin" }, { "type": "number", "key": "overscan", - "label": "overscan", + "label": "Overscan", "decimal": 1, "minimum": 0, "maximum": 10 diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json index 0544b4bab7..e1a3082616 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create.json @@ -29,42 +29,9 @@ } ] }, - { - "type": "dict", - "collapsible": true, - "key": "CreateRender", - "label": "Create Render", - "checkbox_key": "enabled", - "children": [ - { - "type": "boolean", - "key": "enabled", - "label": "Enabled" - }, - { - "type": "list", - "key": "defaults", - "label": "Default Subsets", - "object_type": "text" - }, - { - "key": "aov_separator", - "label": "AOV Separator character", - "type": "enum", - "multiselection": false, - "default": "underscore", - "enum_items": [ - {"dash": "- (dash)"}, - {"underscore": "_ (underscore)"}, - {"dot": ". (dot)"} - ] - }, - { - "type": "text", - "key": "default_render_image_folder", - "label": "Default render image folder" - } - ] + { + "type": "schema", + "name": "schema_maya_create_render" }, { "type": "dict", @@ -98,17 +65,278 @@ ] }, + { + "type": "dict", + "collapsible": true, + "key": "CreateUnrealSkeletalMesh", + "label": "Create Unreal - Skeletal Mesh", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + }, + { + "type": "text", + "key": "joint_hints", + "label": "Joint root hint" + } + ] + + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateMultiverseLook", + "label": "Create Multiverse Look", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "publish_mip_map", + "label": "Publish Mip Maps" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateAnimation", + "label": "Create Animation", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "boolean", + "key": "write_face_sets", + "label": "Write Face Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateModel", + "label": "Create Model", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "boolean", + "key": "write_face_sets", + "label": "Write Face Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreatePointCache", + "label": "Create Point Cache", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "boolean", + "key": "write_face_sets", + "label": "Write Face Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateProxyAlembic", + "label": "Create Proxy Alembic", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "write_color_sets", + "label": "Write Color Sets" + }, + { + "type": "boolean", + "key": "write_face_sets", + "label": "Write Face Sets" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CreateAss", + "label": "Create Ass", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + }, + { + "type": "boolean", + "key": "expandProcedurals", + "label": "Expand Procedurals" + }, + { + "type": "boolean", + "key": "motionBlur", + "label": "Motion Blur" + }, + { + "type": "number", + "key": "motionBlurKeys", + "label": "Motion Blur Keys", + "minimum": 0 + }, + { + "type": "number", + "key": "motionBlurLength", + "label": "Motion Blur Length", + "decimal": 3 + }, + { + "type": "boolean", + "key": "maskOptions", + "label": "Mask Options" + }, + { + "type": "boolean", + "key": "maskCamera", + "label": "Mask Camera" + }, + { + "type": "boolean", + "key": "maskLight", + "label": "Mask Light" + }, + { + "type": "boolean", + "key": "maskShape", + "label": "Mask Shape" + }, + { + "type": "boolean", + "key": "maskShader", + "label": "Mask Shader" + }, + { + "type": "boolean", + "key": "maskOverride", + "label": "Mask Override" + }, + { + "type": "boolean", + "key": "maskDriver", + "label": "Mask Driver" + }, + { + "type": "boolean", + "key": "maskFilter", + "label": "Mask Filter" + }, + { + "type": "boolean", + "key": "maskColor_manager", + "label": "Mask Color Manager" + }, + { + "type": "boolean", + "key": "maskOperator", + "label": "Mask Operator" + } + ] + }, { "type": "schema_template", "name": "template_create_plugin", "template_data": [ { - "key": "CreateAnimation", - "label": "Create Animation" + "key": "CreateMultiverseUsd", + "label": "Create Multiverse USD" }, { - "key": "CreateAss", - "label": "Create Ass" + "key": "CreateMultiverseUsdComp", + "label": "Create Multiverse USD Composition" + }, + { + "key": "CreateMultiverseUsdOver", + "label": "Create Multiverse USD Override" }, { "key": "CreateAssembly", @@ -126,14 +354,6 @@ "key": "CreateMayaScene", "label": "Create Maya Scene" }, - { - "key": "CreateModel", - "label": "Create Model" - }, - { - "key": "CreatePointCache", - "label": "Create Cache" - }, { "key": "CreateRenderSetup", "label": "Create Render Setup" diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json new file mode 100644 index 0000000000..68ad7ad63d --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_create_render.json @@ -0,0 +1,20 @@ +{ + "type": "dict", + "collapsible": true, + "key": "CreateRender", + "label": "Create Render", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "list", + "key": "defaults", + "label": "Default Subsets", + "object_type": "text" + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json index 2e5bc64e1c..873bb79c95 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_publish.json @@ -21,6 +21,34 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "key": "CollectFbxCamera", + "label": "Collect Camera for FBX export", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "CollectGLTF", + "label": "Collect Assets for GLTF/GLB export", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + } + ] + }, { "type": "splitter" }, @@ -49,12 +77,35 @@ ] }, { - "type": "schema_template", - "name": "template_publish_plugin", - "template_data": [ + "type": "dict", + "collapsible": true, + "key": "ValidateFrameRange", + "label": "Validate Frame Range", + "checkbox_key": "enabled", + "children": [ { - "key": "ValidateFrameRange", - "label": "Validate Frame Range" + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "splitter" + }, + { + "key": "exclude_families", + "label": "Families", + "type": "list", + "object_type": "text" } ] }, @@ -70,6 +121,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Shader name regex can use named capture group asset to validate against current asset name.

Example:
^.*(?P=<asset>.+)_SHD

" @@ -122,6 +178,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "whitelist_native_plugins", @@ -209,6 +270,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "validate_mesh", @@ -247,6 +313,45 @@ } ] }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ValidatePluginPathAttributes", + "label": "Plug-in Path Attributes", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + }, + { + "type": "label", + "label": "Fill in the node types and attributes you want to validate.

e.g. AlembicNode.abc_file, the node type is AlembicNode and the node attribute is abc_file" + }, + { + "type": "dict-modifiable", + "collapsible": true, + "key": "attribute", + "label": "File Attribute", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, { "type": "dict", "collapsible": true, @@ -295,6 +400,72 @@ } ] }, + { + "type": "schema_template", + "name": "template_publish_plugin", + "template_data": [ + { + "key": "ValidateCurrentRenderLayerIsRenderable", + "label": "Validate Current Render Layer Has Renderable Camera" + }, + { + "key": "ValidateRenderImageRule", + "label": "Validate Images File Rule (Workspace)" + }, + { + "key": "ValidateRenderNoDefaultCameras", + "label": "Validate No Default Cameras Renderable" + }, + { + "key": "ValidateRenderSingleCamera", + "label": "Validate Render Single Camera" + }, + { + "key": "ValidateRenderLayerAOVs", + "label": "Validate Render Passes / AOVs Are Registered" + }, + { + "key": "ValidateStepSize", + "label": "Validate Step Size" + }, + { + "key": "ValidateVRayDistributedRendering", + "label": "VRay Distributed Rendering" + }, + { + "key": "ValidateVrayReferencedAOVs", + "label": "VRay Referenced AOVs" + }, + { + "key": "ValidateVRayTranslatorEnabled", + "label": "VRay Translator Settings" + }, + { + "key": "ValidateVrayProxy", + "label": "VRay Proxy Settings" + }, + { + "key": "ValidateVrayProxyMembers", + "label": "VRay Proxy Members" + }, + { + "key": "ValidateYetiRenderScriptCallbacks", + "label": "Yeti Render Script Callbacks" + }, + { + "key": "ValidateYetiRigCacheState", + "label": "Yeti Rig Cache State" + }, + { + "key": "ValidateYetiRigInputShapesInInstance", + "label": "Yeti Rig Input Shapes In Instance" + }, + { + "key": "ValidateYetiRigSettings", + "label": "Yeti Rig Settings" + } + ] + }, { "type": "collapsible-wrap", "label": "Model", @@ -379,6 +550,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "label", "label": "Validates transform suffix based on the type of its children shapes." @@ -435,6 +611,14 @@ "key": "ValidateMeshNonManifold", "label": "ValidateMeshNonManifold" }, + { + "key": "ValidateMeshNoNegativeScale", + "label": "Validate Mesh No Negative Scale" + }, + { + "key": "ValidateMeshNonZeroEdgeLength", + "label": "Validate Mesh Edge Length Non Zero" + }, { "key": "ValidateMeshNormalsUnlocked", "label": "ValidateMeshNormalsUnlocked" @@ -488,6 +672,81 @@ { "key": "ValidateUniqueNames", "label": "ValidateUniqueNames" + }, + { + "key": "ValidateNoVRayMesh", + "label": "Validate No V-Ray Proxies (VRayMesh)" + }, + { + "key": "ValidateUnrealMeshTriangulated", + "label": "Validate if Mesh is Triangulated" + }, + { + "key": "ValidateAlembicVisibleOnly", + "label": "Validate Alembic visible node" + } + ] + }, + { + "type": "label", + "label": "Extractors" + }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractProxyAlembic", + "label": "Extract Proxy Alembic", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractAlembic", + "label": "Extract Alembic", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "ExtractObj", + "label": "Extract OBJ", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" } ] } @@ -512,6 +771,26 @@ { "key": "ValidateRigControllers", "label": "Validate Rig Controllers" + }, + { + "key": "ValidateAnimationContent", + "label": "Validate Animation Content" + }, + { + "key": "ValidateOutRelatedNodeIds", + "label": "Validate Animation Out Set Related Node Ids" + }, + { + "key": "ValidateRigControllersArnoldAttributes", + "label": "Validate Rig Controllers (Arnold Attributes)" + }, + { + "key": "ValidateSkeletalMeshHierarchy", + "label": "Validate Skeletal Mesh Top Node" + }, + { + "key": "ValidateSkinclusterDeformerSet", + "label": "Validate Skincluster Deformer Relationships" } ] }, @@ -528,6 +807,11 @@ "key": "enabled", "label": "Enabled" }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, { "type": "boolean", "key": "allow_history_only", @@ -550,9 +834,57 @@ "key": "ValidateAssemblyName", "label": "Validate Assembly Name" }, + { + "key": "ValidateAssemblyNamespaces", + "label": "Validate Assembly Namespaces" + }, + { + "key": "ValidateAssemblyModelTransforms", + "label": "Validate Assembly Model Transforms" + }, { "key": "ValidateAssRelativePaths", "label": "ValidateAssRelativePaths" + }, + { + "key": "ValidateInstancerContent", + "label": "Validate Instancer Content" + }, + { + "key": "ValidateInstancerFrameRanges", + "label": "Validate Instancer Cache Frame Ranges" + }, + { + "key": "ValidateNoDefaultCameras", + "label": "Validate No Default Cameras" + }, + { + "key": "ValidateUnrealUpAxis", + "label": "Validate Unreal Up-Axis check" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "ValidateCameraContents", + "label": "Validate Camera Content", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "validate_shapes", + "label": "Validate presence of shapes" } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json new file mode 100644 index 0000000000..636dfa114c --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_render_settings.json @@ -0,0 +1,515 @@ +{ + "type": "dict", + "collapsible": true, + "key": "RenderSettings", + "label": "Render Settings", + "children": [ + { + "type": "boolean", + "key": "apply_render_settings", + "label": "Apply Render Settings on creation" + }, + { + "type": "text", + "key": "default_render_image_folder", + "label": "Default render image folder" + }, + { + "type": "boolean", + "key": "enable_all_lights", + "label": "Include all lights in Render Setup Layers by default" + }, + { + "key": "aov_separator", + "label": "AOV Separator character", + "type": "enum", + "multiselection": false, + "default": "underscore", + "enum_items": [ + {"dash": "- (dash)"}, + {"underscore": "_ (underscore)"}, + {"dot": ". (dot)"} + ] + }, + { + "key": "remove_aovs", + "label": "Remove existing AOVs", + "type": "boolean" + }, + { + "key": "reset_current_frame", + "label": "Reset Current Frame", + "type": "boolean" + }, + { + "type": "dict", + "collapsible": true, + "key": "arnold_renderer", + "label": "Arnold Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"jpeg": "jpeg"}, + {"png": "png"}, + {"deepexr": "deep exr"}, + {"tif": "tif"}, + {"exr": "exr"}, + {"maya": "maya"}, + {"mtoa_shaders": "mtoa_shaders"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "tiled", + "label": "Tiled (tif, exr)", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"ID": "ID"}, + {"N": "N"}, + {"P": "P"}, + {"Pref": "Pref"}, + {"RGBA": "RGBA"}, + {"Z": "Z"}, + {"albedo": "albedo"}, + {"background": "background"}, + {"coat": "coat"}, + {"coat_albedo": "coat_albedo"}, + {"coat_direct": "coat_direct"}, + {"coat_indirect": "coat_indirect"}, + {"cputime": "cputime"}, + {"crypto_asset": "crypto_asset"}, + {"crypto_material": "cypto_material"}, + {"crypto_object": "crypto_object"}, + {"diffuse": "diffuse"}, + {"diffuse_albedo": "diffuse_albedo"}, + {"diffuse_direct": "diffuse_direct"}, + {"diffuse_indirect": "diffuse_indirect"}, + {"direct": "direct"}, + {"emission": "emission"}, + {"highlight": "highlight"}, + {"indirect": "indirect"}, + {"motionvector": "motionvector"}, + {"opacity": "opacity"}, + {"raycount": "raycount"}, + {"rim_light": "rim_light"}, + {"shadow": "shadow"}, + {"shadow_diff": "shadow_diff"}, + {"shadow_mask": "shadow_mask"}, + {"shadow_matte": "shadow_matte"}, + {"sheen": "sheen"}, + {"sheen_albedo": "sheen_albedo"}, + {"sheen_direct": "sheen_direct"}, + {"sheen_indirect": "sheen_indirect"}, + {"specular": "specular"}, + {"specular_albedo": "specular_albedo"}, + {"specular_direct": "specular_direct"}, + {"specular_indirect": "specular_indirect"}, + {"sss": "sss"}, + {"sss_albedo": "sss_albedo"}, + {"sss_direct": "sss_direct"}, + {"sss_indirect": "sss_indirect"}, + {"transmission": "transmission"}, + {"transmission_albedo": "transmission_albedo"}, + {"transmission_direct": "transmission_direct"}, + {"transmission_indirect": "transmission_indirect"}, + {"volume": "volume"}, + {"volume_Z": "volume_Z"}, + {"volume_albedo": "volume_albedo"}, + {"volume_direct": "volume_direct"}, + {"volume_indirect": "volume_indirect"}, + {"volume_opacity": "volume_opacity"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like defaultArnoldRenderOptions.AASamples = 4" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "vray_renderer", + "label": "V-Ray Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "engine", + "label": "Production Engine", + "type": "enum", + "multiselection": false, + "defaults": "1", + "enum_items": [ + {"1": "V-Ray"}, + {"2": "V-Ray GPU"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"png": "png"}, + {"jpg": "jpg"}, + {"vrimg": "vrimg"}, + {"hdr": "hdr"}, + {"exr": "exr"}, + {"exr (multichannel)": "exr (multichannel)"}, + {"exr (deep)": "exr (deep)"}, + {"tga": "tga"}, + {"bmp": "bmp"}, + {"sgi": "sgi"} + ] + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< empty >"}, + {"atmosphereChannel": "atmosphereChannel"}, + {"backgroundChannel": "backgroundChannel"}, + {"bumpNormalsChannel": "bumpNormalsChannel"}, + {"causticsChannel": "causticsChannel"}, + {"coatFilterChannel": "coatFilterChannel"}, + {"coatGlossinessChannel": "coatGlossinessChannel"}, + {"coatReflectionChannel": "coatReflectionChannel"}, + {"vrayCoatChannel": "vrayCoatChannel"}, + {"CoverageChannel": "CoverageChannel"}, + {"cryptomatteChannel": "cryptomatteChannel"}, + {"customColor": "customColor"}, + {"drBucketChannel": "drBucketChannel"}, + {"denoiserChannel": "denoiserChannel"}, + {"diffuseChannel": "diffuseChannel"}, + {"ExtraTexElement": "ExtraTexElement"}, + {"giChannel": "giChannel"}, + {"LightMixElement": "LightMixElement"}, + {"LightSelectElement": "LightSelectElement"}, + {"lightingChannel": "lightingChannel"}, + {"LightingAnalysisChannel": "LightingAnalysisChannel"}, + {"materialIDChannel": "materialIDChannel"}, + {"MaterialSelectElement": "MaterialSelectElement"}, + {"matteShadowChannel": "matteShadowChannel"}, + {"metalnessChannel": "metalnessChannel"}, + {"MultiMatteElement": "MultiMatteElement"}, + {"multimatteIDChannel": "multimatteIDChannel"}, + {"noiseLevelChannel": "noiseLevelChannel"}, + {"normalsChannel": "normalsChannel"}, + {"nodeIDChannel": "nodeIDChannel"}, + {"objectSelectChannel": "objectSelectChannel"}, + {"rawCoatFilterChannel": "rawCoatFilterChannel"}, + {"rawCoatReflectionChannel": "rawCoatReflectionChannel"}, + {"rawDiffuseFilterChannel": "rawDiffuseFilterChannel"}, + {"rawGiChannel": "rawGiChannel"}, + {"rawLightChannel": "rawLightChannel"}, + {"rawReflectionChannel": "rawReflectionChannel"}, + {"rawReflectionFilterChannel": "rawReflectionFilterChannel"}, + {"rawRefractionChannel": "rawRefractionChannel"}, + {"rawRefractionFilterChannel": "rawRefractionFilterChannel"}, + {"rawShadowChannel": "rawShadowChannel"}, + {"rawSheenFilterChannel": "rawSheenFilterChannel"}, + {"rawSheenReflectionChannel": "rawSheenReflectionChannel"}, + {"rawTotalLightChannel": "rawTotalLightChannel"}, + {"reflectIORChannel": "reflectIORChannel"}, + {"reflectChannel": "reflectChannel"}, + {"reflectionFilterChannel": "reflectionFilterChannel"}, + {"reflectGlossinessChannel": "reflectGlossinessChannel"}, + {"refractChannel": "refractChannel"}, + {"refractionFilterChannel": "refractionFilterChannel"}, + {"refractGlossinessChannel": "refractGlossinessChannel"}, + {"renderIDChannel": "renderIDChannel"}, + {"FastSSS2Channel": "FastSSS2Channel"}, + {"sampleRateChannel": "sampleRateChannel"}, + {"samplerInfo": "samplerInfo"}, + {"selfIllumChannel": "selfIllumChannel"}, + {"shadowChannel": "shadowChannel"}, + {"sheenFilterChannel": "sheenFilterChannel"}, + {"sheenGlossinessChannel": "sheenGlossinessChannel"}, + {"sheenReflectionChannel": "sheenReflectionChannel"}, + {"vraySheenChannel": "vraySheenChannel"}, + {"specularChannel": "specularChannel"}, + {"Toon": "Toon"}, + {"toonLightingChannel": "toonLightingChannel"}, + {"toonSpecularChannel": "toonSpecularChannel"}, + {"totalLightChannel": "totalLightChannel"}, + {"unclampedColorChannel": "unclampedColorChannel"}, + {"VRScansPaintMaskChannel": "VRScansPaintMaskChannel"}, + {"VRScansZoneMaskChannel": "VRScansZoneMaskChannel"}, + {"velocityChannel": "velocityChannel"}, + {"zdepthChannel": "zdepthChannel"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like vraySettings.aaFilterSize = 1.5" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "redshift_renderer", + "label": "Redshift Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "primary_gi_engine", + "label": "Primary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"3": "Irradiance Cache"}, + {"4": "Brute Force"} + ] + }, + { + "key": "secondary_gi_engine", + "label": "Secondary GI Engine", + "type": "enum", + "multiselection": false, + "defaults": "0", + "enum_items": [ + {"0": "None"}, + {"2": "Irradiance Point Cloud"}, + {"4": "Brute Force"} + ] + }, + { + "key": "image_format", + "label": "Output Image Format", + "type": "enum", + "multiselection": false, + "defaults": "exr", + "enum_items": [ + {"iff": "Maya IFF"}, + {"exr": "OpenEXR"}, + {"tif": "TIFF"}, + {"png": "PNG"}, + {"tga": "Targa"}, + {"jpg": "JPEG"} + ] + }, + { + "key": "multilayer_exr", + "label": "Multilayer (exr)", + "type": "boolean" + }, + { + "key": "force_combine", + "label": "Force combine beauty and AOVs", + "type": "boolean" + }, + { + "key": "aov_list", + "label": "AOVs to create", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"empty": "< none >"}, + {"Ambient Occlusion": "Ambient Occlusion"}, + {"Background": "Background"}, + {"Beauty": "Beauty"}, + {"Bump Normals": "Bump Normals"}, + {"Caustics": "Caustics"}, + {"Caustics Raw": "Caustics Raw"}, + {"Cryptomatte": "Cryptomatte"}, + {"Custom": "Custom"}, + {"Depth": "Depth"}, + {"Diffuse Filter": "Diffuse Filter"}, + {"Diffuse Lighting": "Diffuse Lighting"}, + {"Diffuse Lighting Raw": "Diffuse Lighting Raw"}, + {"Emission": "Emission"}, + {"Global Illumination": "Global Illumination"}, + {"Global Illumination Raw": "Global Illumination Raw"}, + {"Matte": "Matte"}, + {"Motion Vectors": "Motion Vectors"}, + {"Normals": "Normals"}, + {"ObjectID": "ObjectID"}, + {"Object-Space Bump Normals": "Object-Space Bump Normals"}, + {"Object-Space Positions": "Object-Space Positions"}, + {"Puzzle Matte": "Puzzle Matte"}, + {"Reflections": "Reflections"}, + {"Reflections Filter": "Reflections Filter"}, + {"Reflections Raw": "Reflections Raw"}, + {"Refractions": "Refractions"}, + {"Refractions Filter": "Refractions Filter"}, + {"Refractions Raw": "Refractions Filter"}, + {"Shadows": "Shadows"}, + {"SpecularLighting": "Specular Lighting"}, + {"Sub Surface Scatter": "Sub Surface Scatter"}, + {"Sub Surface Scatter Raw": "Sub Surface Scatter Raw"}, + {"Total Diffuse Lighting Raw": "Total Diffuse Lighting Raw"}, + {"Total Translucency Filter": "Total Translucency Filter"}, + {"Translucency Filter": "Translucency Filter"}, + {"Translucency Lighting Raw": "Translucency Lighting Raw"}, + {"Volume Fog Emission": "Volume Fog Emission"}, + {"Volume Fog Tint": "Volume Fog Tint"}, + {"Volume Lighting": "Volume Lighting"}, + {"World Position": "World Position"} + ] + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like redshiftOptions.reflectionMaxTraceDepth = 3" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "renderman_renderer", + "label": "Renderman Renderer", + "is_group": true, + "children": [ + { + "key": "image_prefix", + "label": "Image prefix template", + "type": "text" + }, + { + "key": "image_dir", + "label": "Image Output Directory", + "type": "text" + }, + { + "key": "display_filters", + "label": "Display Filters", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"PxrBackgroundDisplayFilter": "PxrBackgroundDisplayFilter"}, + {"PxrCopyAOVDisplayFilter": "PxrCopyAOVDisplayFilter"}, + {"PxrEdgeDetect":"PxrEdgeDetect"}, + {"PxrFilmicTonemapperDisplayFilter": "PxrFilmicTonemapperDisplayFilter"}, + {"PxrGradeDisplayFilter": "PxrGradeDisplayFilter"}, + {"PxrHalfBufferErrorFilter": "PxrHalfBufferErrorFilter"}, + {"PxrImageDisplayFilter": "PxrImageDisplayFilter"}, + {"PxrLightSaturation": "PxrLightSaturation"}, + {"PxrShadowDisplayFilter": "PxrShadowDisplayFilter"}, + {"PxrStylizedHatching": "PxrStylizedHatching"}, + {"PxrStylizedLines": "PxrStylizedLines"}, + {"PxrStylizedToon": "PxrStylizedToon"}, + {"PxrWhitePointDisplayFilter": "PxrWhitePointDisplayFilter"} + ] + }, + { + "key": "imageDisplay_dir", + "label": "Image Display Filter Directory", + "type": "text" + }, + { + "key": "sample_filters", + "label": "Sample Filters", + "type": "enum", + "multiselection": true, + "defaults": "empty", + "enum_items": [ + {"PxrBackgroundSampleFilter": "PxrBackgroundSampleFilter"}, + {"PxrCopyAOVSampleFilter": "PxrCopyAOVSampleFilter"}, + {"PxrCryptomatte": "PxrCryptomatte"}, + {"PxrFilmicTonemapperSampleFilter": "PxrFilmicTonemapperSampleFilter"}, + {"PxrGradeSampleFilter": "PxrGradeSampleFilter"}, + {"PxrShadowFilter": "PxrShadowFilter"}, + {"PxrWatermarkFilter": "PxrWatermarkFilter"}, + {"PxrWhitePointSampleFilter": "PxrWhitePointSampleFilter"} + ] + }, + { + "key": "cryptomatte_dir", + "label": "Cryptomatte Output Directory", + "type": "text" + }, + { + "key": "watermark_dir", + "label": "Watermark Filter Directory", + "type": "text" + }, + { + "type": "label", + "label": "Add additional options - put attribute and value, like Ci" + }, + { + "type": "dict-modifiable", + "store_as_list": true, + "key": "additional_options", + "label": "Additional Renderer Options", + "use_label_wrap": true, + "object_type": { + "type": "text" + } + } + ] + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_imageio.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_imageio.json new file mode 100644 index 0000000000..1cd6f0e67f --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_imageio.json @@ -0,0 +1,260 @@ +{ + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "collapsible": true, + "is_group": true, + "children": [ + { + "type": "label", + "label": "'Custom OCIO config path' has deprecated.
If you need to set custom config, just enable and add path into 'OCIO config'.
Anatomy keys are supported.." + }, + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + }, + { + "key": "viewer", + "type": "dict", + "label": "Viewer", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "viewerProcess", + "label": "Viewer Process" + } + ] + }, + { + "key": "baking", + "type": "dict", + "label": "Extract-review baking profile", + "collapsible": false, + "children": [ + { + "type": "text", + "key": "viewerProcess", + "label": "Viewer Process" + } + ] + }, + { + "key": "workfile", + "type": "dict", + "label": "Workfile", + "collapsible": false, + "children": [ + { + "type": "form", + "children": [ + { + "type": "enum", + "key": "colorManagement", + "label": "color management", + "enum_items": [ + { + "Nuke": "Nuke" + }, + { + "OCIO": "OCIO" + } + ] + }, + { + "type": "enum", + "key": "OCIO_config", + "label": "OpenColorIO Config", + "enum_items": [ + { + "nuke-default": "nuke-default" + }, + { + "spi-vfx": "spi-vfx" + }, + { + "spi-anim": "spi-anim" + }, + { + "aces_0.1.1": "aces_0.1.1" + }, + { + "aces_0.7.1": "aces_0.7.1" + }, + { + "aces_1.0.1": "aces_1.0.1" + }, + { + "aces_1.0.3": "aces_1.0.3" + }, + { + "aces_1.1": "aces_1.1" + }, + { + "aces_1.2": "aces_1.2" + }, + { + "custom": "custom" + } + ] + }, + { + "type": "path", + "key": "customOCIOConfigPath", + "label": "Custom OCIO config path", + "multiplatform": true, + "multipath": true + }, + { + "type": "text", + "key": "workingSpaceLUT", + "label": "Working Space" + }, + { + "type": "text", + "key": "monitorLut", + "label": "monitor" + }, + { + "type": "text", + "key": "int8Lut", + "label": "8-bit files" + }, + { + "type": "text", + "key": "int16Lut", + "label": "16-bit files" + }, + { + "type": "text", + "key": "logLut", + "label": "log files" + }, + { + "type": "text", + "key": "floatLut", + "label": "float files" + } + ] + } + ] + }, + { + "key": "nodes", + "type": "dict", + "label": "Nodes", + "collapsible": true, + "children": [ + { + "key": "requiredNodes", + "type": "list", + "label": "Plugin required", + "object_type": { + "type": "dict", + "children": [ + { + "type": "list", + "key": "plugins", + "label": "Used in plugins", + "object_type": { + "type": "text", + "key": "pluginClass" + } + }, + { + "type": "text", + "key": "nukeNodeClass", + "label": "Nuke Node Class" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs", + "key": "knobs" + } + ] + } + + ] + } + }, + { + "type": "splitter" + }, + { + "type": "list", + "key": "overrideNodes", + "label": "Plugin's node overrides", + "object_type": { + "type": "dict", + "children": [ + { + "type": "list", + "key": "plugins", + "label": "Used in plugins", + "object_type": { + "type": "text", + "key": "pluginClass" + } + }, + { + "type": "text", + "key": "nukeNodeClass", + "label": "Nuke Node Class" + }, + { + "key": "subsets", + "label": "Subsets", + "type": "list", + "object_type": "text" + }, + { + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ + { + "label": "Knobs overrides", + "key": "knobs" + } + ] + } + ] + } + } + ] + }, + { + "key": "regexInputs", + "type": "dict", + "label": "Colorspace on Inputs by regex detection", + "collapsible": true, + "children": [ + { + "type": "list", + "key": "inputs", + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "regex", + "label": "Regex" + }, + { + "type": "text", + "key": "colorspace", + "label": "Colorspace" + } + ] + } + } + ] + } + ] +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json index 5bd8337e4c..805424c632 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_load.json @@ -11,10 +11,52 @@ { "key": "LoadImage", "label": "Image Loader" + } + ] + }, + { + "type": "dict", + "collapsible": true, + "key": "LoadClip", + "label": "Clip Loader", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" }, { - "key": "LoadClip", - "label": "Clip Loader" + "type": "list", + "key": "_representations", + "label": "Representations", + "object_type": "text" + }, + { + "type": "text", + "key": "node_name_template", + "label": "Node name template" + }, + { + "type": "splitter" + }, + { + "type": "dict", + "collapsible": false, + "key": "options_defaults", + "label": "Loader option defaults", + "children": [ + { + "type": "boolean", + "key": "start_at_workfile", + "label": "Start at worfile beggining" + }, + { + "type": "boolean", + "key": "add_retime", + "label": "Add retime" + } + ] } ] } diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json index 1636a8d700..5b9145e7d9 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_publish.json @@ -11,8 +11,8 @@ { "type": "dict", "collapsible": true, - "key": "PreCollectNukeInstances", - "label": "PreCollectNukeInstances", + "key": "CollectInstanceData", + "label": "CollectInstanceData", "is_group": true, "children": [ { @@ -41,6 +41,9 @@ }, { "render": "render" + }, + { + "write": "write" } ] } @@ -58,8 +61,8 @@ "name": "template_publish_plugin", "template_data": [ { - "key": "ValidateInstanceInContext", - "label": "Validate Instance In Context" + "key": "ValidateCorrectAssetName", + "label": "Validate Correct Asset name" } ] }, @@ -132,9 +135,46 @@ "label": "Enabled" }, { - "type": "raw-json", - "key": "nodes", - "label": "Nodes" + "type": "boolean", + "key": "use_rendered", + "label": "Use rendered images" + }, + { + "type": "boolean", + "key": "bake_viewer_process", + "label": "Bake viewer process" + }, + { + "type": "boolean", + "key": "bake_viewer_input_process", + "label": "Bake viewer input process" + }, + { + "type": "collapsible-wrap", + "label": "Nodes", + "collapsible": true, + "children": [ + { + "type": "raw-json", + "key": "nodes", + "label": "Nodes" + } + ] + } + ] + }, + { + "type": "dict", + "collapsible": true, + "checkbox_key": "enabled", + "key": "ExtractReviewData", + "label": "ExtractReviewData", + "is_group": true, + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" } ] }, @@ -197,7 +237,7 @@ "object_type": "text" }, { - "key": "sebsets", + "key": "subsets", "label": "Subsets", "type": "list", "object_type": "text" @@ -208,9 +248,10 @@ "type": "separator" }, { - "type": "text", - "key": "extension", - "label": "File extension" + "type": "boolean", + "key": "read_raw", + "label": "Read colorspace RAW", + "default": false }, { "type": "text", @@ -227,12 +268,6 @@ "key": "bake_viewer_input_process", "label": "Bake Viewer Input Process (LUTs)" }, - { - "key": "add_tags", - "label": "Add additional tags to representations", - "type": "list", - "object_type": "text" - }, { "type": "separator" }, @@ -243,110 +278,28 @@ "default": false }, { - "type": "collapsible-wrap", - "label": "Reformat Node Knobs", - "collapsible": true, - "collapsed": false, - "children": [ + "type": "schema_template", + "name": "template_nuke_knob_inputs", + "template_data": [ { - "type": "list", - "key": "reformat_node_config", - "object_type": { - "type": "dict-conditional", - "enum_key": "type", - "enum_label": "Type", - "enum_children": [ - { - "key": "string", - "label": "String", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "text", - "key": "value", - "label": "Value" - } - ] - }, - { - "key": "bool", - "label": "Boolean", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "boolean", - "key": "value", - "label": "Value" - } - ] - }, - { - "key": "number", - "label": "Number", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "list-strict", - "key": "value", - "label": "Value", - "object_types": [ - { - "type": "number", - "key": "number", - "default": 1, - "decimal": 4 - } - ] - } - - ] - }, - { - "key": "list_numbers", - "label": "2 Numbers", - "children": [ - { - "type": "text", - "key": "name", - "label": "Name" - }, - { - "type": "list-strict", - "key": "value", - "label": "Value", - "object_types": [ - { - "type": "number", - "key": "x", - "default": 1, - "decimal": 4 - }, - { - "type": "number", - "key": "y", - "default": 1, - "decimal": 4 - } - ] - } - ] - } - ] - } + "label": "Reformat Node Knobs", + "key": "reformat_node_config" } ] + }, + { + "type": "separator" + }, + { + "type": "text", + "key": "extension", + "label": "Write node file type" + }, + { + "key": "add_custom_tags", + "label": "Add custom tags", + "type": "list", + "object_type": "text" } ] } @@ -365,6 +318,59 @@ "type": "boolean", "key": "viewer_lut_raw", "label": "Viewer LUT raw" + }, + { + "type": "separator" + }, + { + "type": "label", + "label": "Fill specific slate node values with templates. Uncheck the checkbox to not change the value.", + "word_wrap": true + }, + { + "type": "dict", + "key": "key_value_mapping", + "children": [ + { + "type": "list-strict", + "key": "f_submission_note", + "label": "Submission Note:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_submitting_for", + "label": "Submission For:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + }, + { + "type": "list-strict", + "key": "f_vfx_scope_of_work", + "label": "VFX Scope Of Work:", + "object_types": [ + { + "type": "boolean" + }, + { + "type": "text" + } + ] + } + ] } ] }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json new file mode 100644 index 0000000000..e4c65177a7 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_nuke_scriptsgizmo.json @@ -0,0 +1,129 @@ +{ + "type": "list", + "key": "gizmo", + "label": "Gizmo Menu", + "is_group": true, + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "toolbar_menu_name", + "label": "Toolbar Menu Name" + }, + { + "type": "path", + "key": "gizmo_source_dir", + "label": "Gizmo directory path", + "multipath": true, + "multiplatform": true + }, + { + "type": "collapsible-wrap", + "label": "Options", + "collapsible": true, + "collapsed": true, + "children": [ + { + "type": "path", + "key": "toolbar_icon_path", + "label": "Toolbar Icon Path", + "multipath": false, + "multiplatform": true + }, + { + "type": "splitter" + }, + { + "type": "list", + "key": "gizmo_definition", + "label": "Gizmo definitions", + "use_label_wrap": true, + "object_type": { + "type": "dict", + "children": [ + { + "type": "text", + "key": "gizmo_toolbar_path", + "label": "Gizmo Menu Path" + }, + { + "type": "list", + "key": "sub_gizmo_list", + "label": "Sub Gizmo List", + "use_label_wrap": true, + "object_type": { + "type": "dict-conditional", + "enum_key": "sourcetype", + "enum_label": "Type of usage", + "enum_children": [ + { + "key": "python", + "label": "Python", + "children": [ + { + "type": "text", + "key": "title", + "label": "Title" + }, + { + "type": "text", + "key": "command", + "label": "Python command" + }, + { + "type": "text", + "key": "icon", + "label": "Icon Path" + }, + { + "type": "text", + "key": "shortcut", + "label": "Hotkey" + } + ] + }, + { + "key": "file", + "label": "File", + "children": [ + { + "type": "text", + "key": "title", + "label": "Title" + }, + { + "type": "text", + "key": "file_name", + "label": "Gizmo file name" + }, + { + "type": "text", + "key": "shortcut", + "label": "Hotkey" + } + ] + }, + { + "key": "separator", + "label": "Separator", + "children": [ + { + "type": "text", + "key": "gizmo_toolbar_path", + "label": "Toolbar path" + } + ] + } + ] + } + } + ] + } + } + ] + } + ] + } +} \ No newline at end of file diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json index 484fbf9d07..a4b28f47bc 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_representation_tags.json @@ -13,6 +13,9 @@ { "ftrackreview": "Add review to Ftrack" }, + { + "shotgridreview": "Add review to Shotgrid" + }, { "delete": "Delete output" }, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_scriptsmenu.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_scriptsmenu.json similarity index 100% rename from openpype/settings/entities/schemas/projects_schema/schemas/schema_maya_scriptsmenu.json rename to openpype/settings/entities/schemas/projects_schema/schemas/schema_scriptsmenu.json diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/schema_templated_workfile_build.json b/openpype/settings/entities/schemas/projects_schema/schemas/schema_templated_workfile_build.json new file mode 100644 index 0000000000..b244460bbf --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/schema_templated_workfile_build.json @@ -0,0 +1,42 @@ +{ + "type": "dict", + "collapsible": true, + "key": "templated_workfile_build", + "label": "Templated Workfile Build Settings", + "children": [ + { + "type": "list", + "key": "profiles", + "label": "Profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "task_types", + "label": "Task types", + "type": "task-types-enum" + }, + { + "key": "task_names", + "label": "Task names", + "type": "list", + "object_type": "text" + }, + { + "key": "path", + "label": "Path to template", + "type": "path", + "multiplatform": false, + "multipath": false + }, + { + "key": "keep_placeholder", + "label": "Keep placeholders", + "type": "boolean", + "default": true + } + ] + } + } + ] +} diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json deleted file mode 100644 index af8fd9dae4..0000000000 --- a/openpype/settings/entities/schemas/projects_schema/schemas/template_color.json +++ /dev/null @@ -1,30 +0,0 @@ -[ - { - "type": "list-strict", - "key": "{name}", - "label": "{label}", - "object_types": [ - { - "label": "Red", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Green", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - }, - { - "label": "Blue", - "type": "number", - "minimum": 0, - "maximum": 1, - "decimal": 3 - } - ] - } -] diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_knob_inputs.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_knob_inputs.json new file mode 100644 index 0000000000..c9dee8681a --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_knob_inputs.json @@ -0,0 +1,291 @@ +[ + { + "type": "collapsible-wrap", + "label": "{label}", + "collapsible": true, + "collapsed": true, + "children": [{ + "type": "list", + "key": "{key}", + "object_type": { + "type": "dict-conditional", + "enum_key": "type", + "enum_label": "Type", + "enum_children": [ + { + "key": "text", + "label": "Text", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "value", + "label": "Value" + } + ] + }, + { + "key": "expression", + "label": "Expression", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "expression", + "label": "Expression" + } + ] + }, + { + "key": "formatable", + "label": "Formate from template", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "template", + "label": "Template", + "placeholder": "{{key}} or {{key}};{{key}}" + }, + { + "type": "enum", + "key": "to_type", + "label": "Knob type", + "enum_items": [ + { + "text": "Text" + }, + { + "number": "Number" + }, + { + "decimal_number": "Decimal number" + }, + { + "2d_vector": "2D vector" + } + ] + } + ] + }, + { + "key": "color_gui", + "label": "Color GUI", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "color", + "key": "value", + "label": "Value", + "use_alpha": false + } + ] + }, + { + "key": "bool", + "label": "Boolean", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "boolean", + "key": "value", + "label": "Value" + } + ] + }, + { + "key": "number", + "label": "Number", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "number", + "key": "value", + "default": 1, + "decimal": 0, + "maximum": 99999999 + } + + ] + }, + { + "key": "decimal_number", + "label": "Decimal number", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "number", + "key": "value", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + + ] + }, + { + "key": "2d_vector", + "label": "2D vector", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + ] + } + ] + }, + { + "key": "3d_vector", + "label": "3D vector", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + ] + } + ] + }, + { + "key": "color", + "label": "Color", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "list-strict", + "key": "value", + "label": "Value", + "object_types": [ + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "x", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + }, + { + "type": "number", + "key": "y", + "default": 1, + "decimal": 4, + "maximum": 99999999 + } + ] + } + ] + }, + { + "key": "__legacy__", + "label": "_ Legacy type _", + "children": [ + { + "type": "text", + "key": "name", + "label": "Name" + }, + { + "type": "text", + "key": "value", + "label": "Value" + } + ] + } + ] + } + }] + } +] diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_write_attrs.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_write_attrs.json new file mode 100644 index 0000000000..8be48e669d --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_nuke_write_attrs.json @@ -0,0 +1,19 @@ +[ + { + "key": "instance_attributes", + "label": "Instance attributes", + "type": "enum", + "multiselection": true, + "enum_items": [ + { + "reviewable": "Reviewable" + }, + { + "farm_rendering": "Farm rendering" + }, + { + "use_range_limit": "Use range limit" + } + ] + } +] diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_publish_families.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_publish_families.json index f39ad31fbb..43dd74cdf9 100644 --- a/openpype/settings/entities/schemas/projects_schema/schemas/template_publish_families.json +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_publish_families.json @@ -28,6 +28,7 @@ {"nukenodes": "nukenodes"}, {"plate": "plate"}, {"pointcache": "pointcache"}, + {"proxyAbc": "proxyAbc"}, {"prerender": "prerender"}, {"redshiftproxy": "redshiftproxy"}, {"reference": "reference"}, diff --git a/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json b/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json new file mode 100644 index 0000000000..b57cad6719 --- /dev/null +++ b/openpype/settings/entities/schemas/projects_schema/schemas/template_validate_plugin.json @@ -0,0 +1,26 @@ +[ + { + "type": "dict", + "collapsible": true, + "key": "{key}", + "label": "{label}", + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "boolean", + "key": "optional", + "label": "Optional" + }, + { + "type": "boolean", + "key": "active", + "label": "Active" + } + ] + } +] diff --git a/openpype/settings/entities/schemas/system_schema/example_schema.json b/openpype/settings/entities/schemas/system_schema/example_schema.json index 6a86dae259..b9747b5f4f 100644 --- a/openpype/settings/entities/schemas/system_schema/example_schema.json +++ b/openpype/settings/entities/schemas/system_schema/example_schema.json @@ -117,19 +117,6 @@ } ] }, - { - "key": "env_group_test", - "label": "EnvGroup Test", - "type": "dict", - "children": [ - { - "key": "key_to_store_in_system_settings", - "label": "Testing environment group", - "type": "raw-json", - "env_group_key": "test_group" - } - ] - }, { "key": "dict_wrapper", "type": "dict", diff --git a/openpype/settings/entities/schemas/system_schema/example_template.json b/openpype/settings/entities/schemas/system_schema/example_template.json index ff78c78e8f..9955cf5651 100644 --- a/openpype/settings/entities/schemas/system_schema/example_template.json +++ b/openpype/settings/entities/schemas/system_schema/example_template.json @@ -7,8 +7,7 @@ { "type": "raw-json", "label": "{host_label} Environments", - "key": "{host_name}_environments", - "env_group_key": "{host_name}" + "key": "{host_name}_environments" }, { "type": "path", diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_3dsmax.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_3dsmax.json new file mode 100644 index 0000000000..f7c57298af --- /dev/null +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_3dsmax.json @@ -0,0 +1,39 @@ +{ + "type": "dict", + "key": "3dsmax", + "label": "Autodesk 3ds Max", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "schema_template", + "name": "template_host_unchangables" + }, + { + "key": "environment", + "label": "Environment", + "type": "raw-json" + }, + { + "type": "dict-modifiable", + "key": "variants", + "collapsible_key": true, + "use_label_wrap": false, + "object_type": { + "type": "dict", + "collapsible": true, + "children": [ + { + "type": "schema_template", + "name": "template_host_variant_items" + } + ] + } + } + ] +} diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json index 334c9aa235..b92a2edf85 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_aftereffects.json @@ -20,31 +20,21 @@ "type": "raw-json" }, { - "type": "dict", + "type": "dict-modifiable", "key": "variants", - "children": [ - { - "type": "schema_template", - "name": "template_host_variant", - "template_data": [ - { - "app_variant_label": "2020", - "app_variant": "2020", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "2021", - "app_variant": "2021", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "2022", - "app_variant": "2022", - "variant_skip_paths": ["use_python_2"] - } - ] - } - ] + "collapsible_key": true, + "use_label_wrap": false, + "object_type": { + "type": "dict", + "collapsible": true, + "children": [ + { + "type": "schema_template", + "name": "template_host_variant_items", + "skip_paths": ["use_python_2"] + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json index 82be15c3b0..b104e3bb82 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_celaction.json @@ -28,8 +28,8 @@ "name": "template_host_variant", "template_data": [ { - "app_variant_label": "Local", - "app_variant": "local" + "app_variant_label": "Current", + "app_variant": "current" } ] } diff --git a/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json b/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json index 69ce7735e8..d5d041d0c2 100644 --- a/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json +++ b/openpype/settings/entities/schemas/system_schema/host_settings/schema_harmony.json @@ -20,31 +20,21 @@ "type": "raw-json" }, { - "type": "dict", + "type": "dict-modifiable", "key": "variants", - "children": [ - { - "type": "schema_template", - "name": "template_host_variant", - "template_data": [ - { - "app_variant_label": "21", - "app_variant": "21", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "20", - "app_variant": "20", - "variant_skip_paths": ["use_python_2"] - }, - { - "app_variant_label": "17", - "app_variant": "17", - "variant_skip_paths": ["use_python_2"] - } - ] - } - ] + "collapsible_key": true, + "use_label_wrap": false, + "object_type": { + "type": "dict", + "collapsible": true, + "children": [ + { + "type": "schema_template", + "name": "template_host_variant_items", + "skip_paths": ["use_python_2"] + } + ] + } } ] } diff --git a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json index 654ddf2938..7c5774415c 100644 --- a/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json +++ b/openpype/settings/entities/schemas/system_schema/module_settings/schema_ftrack.json @@ -50,8 +50,15 @@ "is_group": true, "children": [ { - "type": "label", - "label": "Intent" + "type": "boolean", + "key": "allow_empty_intent", + "label": "Allow empty intent" + }, + { + "type": "text", + "key": "empty_intent_label", + "label": "Empty item label", + "placeholder": "< Not set >" }, { "type": "dict-modifiable", @@ -64,7 +71,8 @@ { "key": "default", "type": "text", - "label": "Default Intent" + "label": "Default Intent", + "placeholder": "< First available >" }, { "type": "separator" diff --git a/openpype/settings/entities/schemas/system_schema/module_settings/schema_kitsu.json b/openpype/settings/entities/schemas/system_schema/module_settings/schema_kitsu.json new file mode 100644 index 0000000000..15a2ccc58d --- /dev/null +++ b/openpype/settings/entities/schemas/system_schema/module_settings/schema_kitsu.json @@ -0,0 +1,23 @@ +{ + "type": "dict", + "key": "kitsu", + "label": "Kitsu", + "collapsible": true, + "require_restart": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "server", + "label": "Server" + }, + { + "type": "splitter" + } + ] +} diff --git a/openpype/settings/entities/schemas/system_schema/schema_applications.json b/openpype/settings/entities/schemas/system_schema/schema_applications.json index 20be33320d..36c5811496 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_applications.json +++ b/openpype/settings/entities/schemas/system_schema/schema_applications.json @@ -9,6 +9,10 @@ "type": "schema", "name": "schema_maya" }, + { + "type": "schema", + "name": "schema_3dsmax" + }, { "type": "schema", "name": "schema_flame" diff --git a/openpype/settings/entities/schemas/system_schema/schema_general.json b/openpype/settings/entities/schemas/system_schema/schema_general.json index 6306317df8..d6c22fe54c 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_general.json +++ b/openpype/settings/entities/schemas/system_schema/schema_general.json @@ -34,12 +34,16 @@ "key": "environment", "label": "Environment", "type": "raw-json", - "env_group_key": "global", "require_restart": true }, { "type": "splitter" }, + { + "type": "boolean", + "key": "log_to_server", + "label": "Log to mongo" + }, { "type": "dict", "key": "disk_mapping", @@ -110,6 +114,17 @@ { "type": "splitter" }, + { + "type": "list", + "key": "local_env_white_list", + "label": "Local overrides of environment variable keys", + "tooltip": "Environment variable keys that can be changed per machine using Local settings UI.\nKey changes are applied only on applications and tools environments.", + "use_label_wrap": true, + "object_type": "text" + }, + { + "type": "splitter" + }, { "type": "collapsible-wrap", "label": "OpenPype deployment control", @@ -131,12 +146,12 @@ "label": "Define explicit OpenPype version that should be used. Keep empty to use latest available version." }, { - "type": "production-versions-text", + "type": "versions-text", "key": "production_version", "label": "Production version" }, { - "type": "staging-versions-text", + "type": "versions-text", "key": "staging_version", "label": "Staging version" }, diff --git a/openpype/settings/entities/schemas/system_schema/schema_modules.json b/openpype/settings/entities/schemas/system_schema/schema_modules.json index 52595914ed..952b38040c 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_modules.json +++ b/openpype/settings/entities/schemas/system_schema/schema_modules.json @@ -44,6 +44,64 @@ "type": "schema", "name": "schema_ftrack" }, + { + "type": "schema", + "name": "schema_kitsu" + }, + { + "type": "dict", + "key": "shotgrid", + "label": "Shotgrid", + "collapsible": true, + "checkbox_key": "enabled", + "children": [ + { + "type": "boolean", + "key": "enabled", + "label": "Enabled" + }, + { + "type": "text", + "key": "leecher_manager_url", + "label": "Shotgrid Leecher Manager URL" + }, + { + "type": "text", + "key": "leecher_backend_url", + "label": "Shotgrid Leecher Backend URL" + }, + { + "type": "boolean", + "key": "filter_projects_by_login", + "label": "Filter projects by SG login" + }, + { + "type": "dict-modifiable", + "key": "shotgrid_settings", + "label": "Shotgrid Servers", + "object_type": { + "type": "dict", + "children": [ + { + "key": "shotgrid_url", + "label": "Server URL", + "type": "text" + }, + { + "key": "shotgrid_script_name", + "label": "Script Name", + "type": "text" + }, + { + "key": "shotgrid_script_key", + "label": "Script api key", + "type": "text" + } + ] + } + } + ] + }, { "type": "dict", "key": "timers_manager", diff --git a/openpype/settings/entities/schemas/system_schema/schema_tools.json b/openpype/settings/entities/schemas/system_schema/schema_tools.json index 2346bef36d..7962fdd465 100644 --- a/openpype/settings/entities/schemas/system_schema/schema_tools.json +++ b/openpype/settings/entities/schemas/system_schema/schema_tools.json @@ -25,7 +25,30 @@ "key": "variants", "collapsible_key": true, "object_type": { - "type": "raw-json" + "type": "dict", + "children": [ + { + "key": "host_names", + "label": "Hosts", + "type": "hosts-enum", + "multiselection": true + }, + { + "key": "app_variants", + "label": "Applications", + "type": "apps-enum", + "multiselection": true, + "tooltip": "Applications are not \"live\" and may require to Save and refresh settings UI to update values." + }, + { + "type": "separator" + }, + { + "key": "environment", + "label": "Environments", + "type": "raw-json" + } + ] } } ] diff --git a/openpype/settings/handlers.py b/openpype/settings/handlers.py index 2109b53b09..373029d9df 100644 --- a/openpype/settings/handlers.py +++ b/openpype/settings/handlers.py @@ -7,6 +7,8 @@ from abc import ABCMeta, abstractmethod import six import openpype.version +from openpype.client.mongo import OpenPypeMongoConnection +from openpype.client.entities import get_project_connection, get_project from .constants import ( GLOBAL_SETTINGS_KEY, @@ -20,8 +22,175 @@ from .constants import ( ) +class SettingsStateInfo: + """Helper state information about some settings state. + + Is used to hold information about last saved and last opened UI. Keep + information about the time when that happened and on which machine under + which user and on which openpype version. + + To create currrent machine and time information use 'create_new' method. + """ + + timestamp_format = "%Y-%m-%d %H:%M:%S.%f" + + def __init__( + self, + openpype_version, + settings_type, + project_name, + timestamp, + hostname, + hostip, + username, + system_name, + local_id + ): + self.openpype_version = openpype_version + self.settings_type = settings_type + self.project_name = project_name + + timestamp_obj = None + if timestamp: + timestamp_obj = datetime.datetime.strptime( + timestamp, self.timestamp_format + ) + self.timestamp = timestamp + self.timestamp_obj = timestamp_obj + self.hostname = hostname + self.hostip = hostip + self.username = username + self.system_name = system_name + self.local_id = local_id + + def copy(self): + return self.from_data(self.to_data()) + + @classmethod + def create_new( + cls, openpype_version, settings_type=None, project_name=None + ): + """Create information about this machine for current time.""" + + from openpype.lib.pype_info import get_workstation_info + + now = datetime.datetime.now() + workstation_info = get_workstation_info() + + return cls( + openpype_version, + settings_type, + project_name, + now.strftime(cls.timestamp_format), + workstation_info["hostname"], + workstation_info["hostip"], + workstation_info["username"], + workstation_info["system_name"], + workstation_info["local_id"] + ) + + @classmethod + def from_data(cls, data): + """Create object from data.""" + + return cls( + data["openpype_version"], + data["settings_type"], + data["project_name"], + data["timestamp"], + data["hostname"], + data["hostip"], + data["username"], + data["system_name"], + data["local_id"] + ) + + def to_data(self): + data = self.to_document_data() + data.update({ + "openpype_version": self.openpype_version, + "settings_type": self.settings_type, + "project_name": self.project_name + }) + return data + + @classmethod + def create_new_empty(cls, openpype_version, settings_type=None): + return cls( + openpype_version, + settings_type, + None, + None, + None, + None, + None, + None, + None + ) + + @classmethod + def from_document(cls, openpype_version, settings_type, document): + document = document or {} + project_name = document.get("project_name") + last_saved_info = document.get("last_saved_info") + if last_saved_info: + copy_last_saved_info = copy.deepcopy(last_saved_info) + copy_last_saved_info.update({ + "openpype_version": openpype_version, + "settings_type": settings_type, + "project_name": project_name, + }) + return cls.from_data(copy_last_saved_info) + return cls( + openpype_version, + settings_type, + project_name, + None, + None, + None, + None, + None, + None + ) + + def to_document_data(self): + return { + "timestamp": self.timestamp, + "hostname": self.hostname, + "hostip": self.hostip, + "username": self.username, + "system_name": self.system_name, + "local_id": self.local_id, + } + + def __eq__(self, other): + if not isinstance(other, SettingsStateInfo): + return False + + if other.timestamp_obj != self.timestamp_obj: + return False + + return ( + self.openpype_version == other.openpype_version + and self.hostname == other.hostname + and self.hostip == other.hostip + and self.username == other.username + and self.system_name == other.system_name + and self.local_id == other.local_id + ) + + @six.add_metaclass(ABCMeta) -class SettingsHandler: +class SettingsHandler(object): + global_keys = { + "openpype_path", + "admin_password", + "log_to_server", + "disk_mapping", + "production_version", + "staging_version" + } + @abstractmethod def save_studio_settings(self, data): """Save studio overrides of system settings. @@ -168,6 +337,19 @@ class SettingsHandler: """ pass + @abstractmethod + def get_global_settings(self): + """Studio global settings available across versions. + + Output must contain all keys from 'global_keys'. If value is not set + the output value should be 'None'. + + Returns: + Dict[str, Any]: Global settings same across versions. + """ + + pass + # Clear methods - per version # - clearing may be helpfull when a version settings were created for # testing purposes @@ -224,7 +406,7 @@ class SettingsHandler: """OpenPype versions that have any studio project anatomy overrides. Returns: - list: OpenPype versions strings. + List[str]: OpenPype versions strings. """ pass @@ -235,7 +417,7 @@ class SettingsHandler: """OpenPype versions that have any studio project settings overrides. Returns: - list: OpenPype versions strings. + List[str]: OpenPype versions strings. """ pass @@ -249,8 +431,87 @@ class SettingsHandler: project_name(str): Name of project. Returns: - list: OpenPype versions strings. + List[str]: OpenPype versions strings. """ + + pass + + @abstractmethod + def get_system_last_saved_info(self): + """State of last system settings overrides at the moment when called. + + This method must provide most recent data so using cached data is not + the way. + + Returns: + SettingsStateInfo: Information about system settings overrides. + """ + + pass + + @abstractmethod + def get_project_last_saved_info(self, project_name): + """State of last project settings overrides at the moment when called. + + This method must provide most recent data so using cached data is not + the way. + + Args: + project_name (Union[None, str]): Project name for which state + should be returned. + + Returns: + SettingsStateInfo: Information about project settings overrides. + """ + + pass + + # UI related calls + @abstractmethod + def get_last_opened_info(self): + """Get information about last opened UI. + + Last opened UI is empty if there is noone who would have opened UI at + the moment when called. + + Returns: + Union[None, SettingsStateInfo]: Information about machine who had + opened Settings UI. + """ + + pass + + @abstractmethod + def opened_settings_ui(self): + """Callback called when settings UI is opened. + + Information about this machine must be available when + 'get_last_opened_info' is called from anywhere until + 'closed_settings_ui' is called again. + + Returns: + SettingsStateInfo: Object representing information about this + machine. Must be passed to 'closed_settings_ui' when finished. + """ + + pass + + @abstractmethod + def closed_settings_ui(self, info_obj): + """Callback called when settings UI is closed. + + From the moment this method is called the information about this + machine is removed and no more available when 'get_last_opened_info' + is called. + + Callback should validate if this machine is still stored as opened ui + before changing any value. + + Args: + info_obj (SettingsStateInfo): Object created when + 'opened_settings_ui' was called. + """ + pass @@ -283,19 +544,22 @@ class CacheValues: self.data = None self.creation_time = None self.version = None + self.last_saved_info = None def data_copy(self): if not self.data: return {} return copy.deepcopy(self.data) - def update_data(self, data, version=None): + def update_data(self, data, version): self.data = data self.creation_time = datetime.datetime.now() - if version is not None: - self.version = version + self.version = version - def update_from_document(self, document, version=None): + def update_last_saved_info(self, last_saved_info): + self.last_saved_info = last_saved_info + + def update_from_document(self, document, version): data = {} if document: if "data" in document: @@ -304,9 +568,9 @@ class CacheValues: value = document["value"] if value: data = json.loads(value) + self.data = data - if version is not None: - self.version = version + self.version = version def to_json_string(self): return json.dumps(self.data or {}) @@ -318,27 +582,18 @@ class CacheValues: delta = (datetime.datetime.now() - self.creation_time).seconds return delta > self.cache_lifetime + def set_outdated(self): + self.create_time = None + class MongoSettingsHandler(SettingsHandler): """Settings handler that use mongo for storing and loading of settings.""" - global_general_keys = ( - "openpype_path", - "admin_password", - "disk_mapping", - "production_version", - "staging_version" - ) key_suffix = "_versioned" _version_order_key = "versions_order" _all_versions_keys = "all_versions" - _production_versions_key = "production_versions" - _staging_versions_key = "staging_versions" def __init__(self): # Get mongo connection - from openpype.lib import OpenPypeMongoConnection - from avalon.api import AvalonMongoDB - settings_collection = OpenPypeMongoConnection.get_mongo_client() self._anatomy_keys = None @@ -361,8 +616,8 @@ class MongoSettingsHandler(SettingsHandler): self.collection_name = collection_name self.collection = settings_collection[database_name][collection_name] - self.avalon_db = AvalonMongoDB() + self.global_settings_cache = CacheValues() self.system_settings_cache = CacheValues() self.project_settings_cache = collections.defaultdict(CacheValues) self.project_anatomy_cache = collections.defaultdict(CacheValues) @@ -396,6 +651,23 @@ class MongoSettingsHandler(SettingsHandler): self._prepare_project_settings_keys() return self._attribute_keys + def get_global_settings_doc(self): + if self.global_settings_cache.is_outdated: + global_settings_doc = self.collection.find_one({ + "type": GLOBAL_SETTINGS_KEY + }) or {} + self.global_settings_cache.update_data(global_settings_doc, None) + return self.global_settings_cache.data_copy() + + def get_global_settings(self): + global_settings_doc = self.get_global_settings_doc() + global_settings = global_settings_doc.get("data", {}) + return { + key: global_settings[key] + for key in self.global_keys + if key in global_settings + } + def _extract_global_settings(self, data): """Extract global settings data from system settings overrides. @@ -412,7 +684,7 @@ class MongoSettingsHandler(SettingsHandler): general_data = data["general"] # Add predefined keys to global settings if are set - for key in self.global_general_keys: + for key in self.global_keys: if key not in general_data: continue # Pop key from values @@ -456,7 +728,7 @@ class MongoSettingsHandler(SettingsHandler): # Check if data contain any key from predefined keys any_key_found = False if globals_data: - for key in self.global_general_keys: + for key in self.global_keys: if key in globals_data: any_key_found = True break @@ -483,7 +755,7 @@ class MongoSettingsHandler(SettingsHandler): system_settings_data["general"] = system_general overridden_keys = system_general.get(M_OVERRIDDEN_KEY) or [] - for key in self.global_general_keys: + for key in self.global_keys: if key not in globals_data: continue @@ -510,6 +782,14 @@ class MongoSettingsHandler(SettingsHandler): # Update cache self.system_settings_cache.update_data(data, self._current_version) + last_saved_info = SettingsStateInfo.create_new( + self._current_version, + SYSTEM_SETTINGS_KEY + ) + self.system_settings_cache.update_last_saved_info( + last_saved_info + ) + # Get copy of just updated cache system_settings_data = self.system_settings_cache.data_copy() @@ -517,20 +797,33 @@ class MongoSettingsHandler(SettingsHandler): global_settings = self._extract_global_settings( system_settings_data ) + self.global_settings_cache.update_data( + global_settings, + None + ) + + system_settings_doc = self.collection.find_one( + { + "type": self._system_settings_key, + "version": self._current_version + }, + {"_id": True} + ) # Store system settings - self.collection.replace_one( - { - "type": self._system_settings_key, - "version": self._current_version - }, - { - "type": self._system_settings_key, - "data": system_settings_data, - "version": self._current_version - }, - upsert=True - ) + new_system_settings_doc = { + "type": self._system_settings_key, + "version": self._current_version, + "data": system_settings_data, + "last_saved_info": last_saved_info.to_document_data() + } + if not system_settings_doc: + self.collection.insert_one(new_system_settings_doc) + else: + self.collection.update_one( + {"_id": system_settings_doc["_id"]}, + {"$set": new_system_settings_doc} + ) # Store global settings self.collection.replace_one( @@ -563,8 +856,19 @@ class MongoSettingsHandler(SettingsHandler): data_cache = self.project_settings_cache[project_name] data_cache.update_data(overrides, self._current_version) + last_saved_info = SettingsStateInfo.create_new( + self._current_version, + PROJECT_SETTINGS_KEY, + project_name + ) + + data_cache.update_last_saved_info(last_saved_info) + self._save_project_data( - project_name, self._project_settings_key, data_cache + project_name, + self._project_settings_key, + data_cache, + last_saved_info ) def save_project_anatomy(self, project_name, anatomy_data): @@ -582,8 +886,16 @@ class MongoSettingsHandler(SettingsHandler): self._save_project_anatomy_data(project_name, data_cache) else: + last_saved_info = SettingsStateInfo.create_new( + self._current_version, + PROJECT_ANATOMY_KEY, + project_name + ) self._save_project_data( - project_name, self._project_anatomy_key, data_cache + project_name, + self._project_anatomy_key, + data_cache, + last_saved_info ) @classmethod @@ -606,16 +918,14 @@ class MongoSettingsHandler(SettingsHandler): new_data = data_cache.data_copy() # Prepare avalon project document - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({ - "type": "project" - }) + project_doc = get_project(project_name) if not project_doc: raise ValueError(( "Project document of project \"{}\" does not exists." " Create project first." ).format(project_name)) + collection = get_project_connection(project_name) # Project's data update_dict_data = {} project_doc_data = project_doc.get("data") or {} @@ -666,28 +976,39 @@ class MongoSettingsHandler(SettingsHandler): {"$set": update_dict} ) - def _save_project_data(self, project_name, doc_type, data_cache): + def _save_project_data( + self, project_name, doc_type, data_cache, last_saved_info + ): is_default = bool(project_name is None) - replace_filter = { + query_filter = { "type": doc_type, "is_default": is_default, "version": self._current_version } - replace_data = { + + new_project_settings_doc = { "type": doc_type, "data": data_cache.data, "is_default": is_default, - "version": self._current_version + "version": self._current_version, + "last_saved_info": last_saved_info.to_data() } - if not is_default: - replace_filter["project_name"] = project_name - replace_data["project_name"] = project_name - self.collection.replace_one( - replace_filter, - replace_data, - upsert=True + if not is_default: + query_filter["project_name"] = project_name + new_project_settings_doc["project_name"] = project_name + + project_settings_doc = self.collection.find_one( + query_filter, + {"_id": True} ) + if project_settings_doc: + self.collection.update_one( + {"_id": project_settings_doc["_id"]}, + {"$set": new_project_settings_doc} + ) + else: + self.collection.insert_one(new_project_settings_doc) def _get_versions_order_doc(self, projection=None): # TODO cache @@ -710,10 +1031,7 @@ class MongoSettingsHandler(SettingsHandler): return self._version_order_checked = True - from openpype.lib.openpype_version import ( - get_OpenPypeVersion, - is_running_staging - ) + from openpype.lib.openpype_version import get_OpenPypeVersion OpenPypeVersion = get_OpenPypeVersion() # Skip if 'OpenPypeVersion' is not available @@ -725,25 +1043,11 @@ class MongoSettingsHandler(SettingsHandler): if not doc: doc = {"type": self._version_order_key} - if self._production_versions_key not in doc: - doc[self._production_versions_key] = [] - - if self._staging_versions_key not in doc: - doc[self._staging_versions_key] = [] - if self._all_versions_keys not in doc: doc[self._all_versions_keys] = [] - if is_running_staging(): - versions_key = self._staging_versions_key - else: - versions_key = self._production_versions_key - # Skip if current version is already available - if ( - self._current_version in doc[self._all_versions_keys] - and self._current_version in doc[versions_key] - ): + if self._current_version in doc[self._all_versions_keys]: return if self._current_version not in doc[self._all_versions_keys]: @@ -760,18 +1064,6 @@ class MongoSettingsHandler(SettingsHandler): str(version) for version in sorted(all_objected_versions) ] - if self._current_version not in doc[versions_key]: - objected_versions = [ - OpenPypeVersion(version=self._current_version) - ] - for version_str in doc[versions_key]: - objected_versions.append(OpenPypeVersion(version=version_str)) - - # Update versions list and push changes to Mongo - doc[versions_key] = [ - str(version) for version in sorted(objected_versions) - ] - self.collection.replace_one( {"type": self._version_order_key}, doc, @@ -1011,22 +1303,12 @@ class MongoSettingsHandler(SettingsHandler): def get_studio_system_settings_overrides(self, return_version): """Studio overrides of system settings.""" if self.system_settings_cache.is_outdated: - globals_document = self.collection.find_one({ - "type": GLOBAL_SETTINGS_KEY - }) - document = ( - self._get_studio_system_settings_overrides_for_version() + globals_document = self.get_global_settings_doc() + document, version = self._get_system_settings_overrides_doc() + + last_saved_info = SettingsStateInfo.from_document( + version, SYSTEM_SETTINGS_KEY, document ) - if document is None: - document = self._find_closest_system_settings() - - version = None - if document: - if document["type"] == self._system_settings_key: - version = document["version"] - else: - version = LEGACY_SETTINGS_VERSION - merged_document = self._apply_global_settings( document, globals_document ) @@ -1034,6 +1316,9 @@ class MongoSettingsHandler(SettingsHandler): self.system_settings_cache.update_from_document( merged_document, version ) + self.system_settings_cache.update_last_saved_info( + last_saved_info + ) cache = self.system_settings_cache data = cache.data_copy() @@ -1041,24 +1326,43 @@ class MongoSettingsHandler(SettingsHandler): return data, cache.version return data + def _get_system_settings_overrides_doc(self): + document = ( + self._get_studio_system_settings_overrides_for_version() + ) + if document is None: + document = self._find_closest_system_settings() + + version = None + if document: + if document["type"] == self._system_settings_key: + version = document["version"] + else: + version = LEGACY_SETTINGS_VERSION + + return document, version + + def get_system_last_saved_info(self): + # Make sure settings are recaches + self.system_settings_cache.set_outdated() + self.get_studio_system_settings_overrides(False) + + return self.system_settings_cache.last_saved_info.copy() + def _get_project_settings_overrides(self, project_name, return_version): if self.project_settings_cache[project_name].is_outdated: - document = self._get_project_settings_overrides_for_version( + document, version = self._get_project_settings_overrides_doc( project_name ) - if document is None: - document = self._find_closest_project_settings(project_name) - - version = None - if document: - if document["type"] == self._project_settings_key: - version = document["version"] - else: - version = LEGACY_SETTINGS_VERSION - self.project_settings_cache[project_name].update_from_document( document, version ) + last_saved_info = SettingsStateInfo.from_document( + version, PROJECT_SETTINGS_KEY, document + ) + self.project_settings_cache[project_name].update_last_saved_info( + last_saved_info + ) cache = self.project_settings_cache[project_name] data = cache.data_copy() @@ -1066,6 +1370,29 @@ class MongoSettingsHandler(SettingsHandler): return data, cache.version return data + def _get_project_settings_overrides_doc(self, project_name): + document = self._get_project_settings_overrides_for_version( + project_name + ) + if document is None: + document = self._find_closest_project_settings(project_name) + + version = None + if document: + if document["type"] == self._project_settings_key: + version = document["version"] + else: + version = LEGACY_SETTINGS_VERSION + + return document, version + + def get_project_last_saved_info(self, project_name): + # Make sure settings are recaches + self.project_settings_cache[project_name].set_outdated() + self._get_project_settings_overrides(project_name, False) + + return self.project_settings_cache[project_name].last_saved_info.copy() + def get_studio_project_settings_overrides(self, return_version): """Studio overrides of default project settings.""" return self._get_project_settings_overrides(None, return_version) @@ -1143,9 +1470,9 @@ class MongoSettingsHandler(SettingsHandler): self.project_anatomy_cache[project_name].update_from_document( document, version ) + else: - collection = self.avalon_db.database[project_name] - project_doc = collection.find_one({"type": "project"}) + project_doc = get_project(project_name) self.project_anatomy_cache[project_name].update_data( self.project_doc_to_anatomy_data(project_doc), self._current_version @@ -1363,6 +1690,64 @@ class MongoSettingsHandler(SettingsHandler): return output return self._sort_versions(output) + def get_last_opened_info(self): + doc = self.collection.find_one({ + "type": "last_opened_settings_ui", + "version": self._current_version + }) or {} + info_data = doc.get("info") + if not info_data: + return None + + # Fill not available information + info_data["openpype_version"] = self._current_version + info_data["settings_type"] = None + info_data["project_name"] = None + return SettingsStateInfo.from_data(info_data) + + def opened_settings_ui(self): + doc_filter = { + "type": "last_opened_settings_ui", + "version": self._current_version + } + + opened_info = SettingsStateInfo.create_new(self._current_version) + new_doc_data = copy.deepcopy(doc_filter) + new_doc_data["info"] = opened_info.to_document_data() + + doc = self.collection.find_one( + doc_filter, + {"_id": True} + ) + if doc: + self.collection.update_one( + {"_id": doc["_id"]}, + {"$set": new_doc_data} + ) + else: + self.collection.insert_one(new_doc_data) + return opened_info + + def closed_settings_ui(self, info_obj): + doc_filter = { + "type": "last_opened_settings_ui", + "version": self._current_version + } + doc = self.collection.find_one(doc_filter) or {} + info_data = doc.get("info") + if not info_data: + return + + info_data["openpype_version"] = self._current_version + info_data["settings_type"] = None + info_data["project_name"] = None + current_info = SettingsStateInfo.from_data(info_data) + if current_info == info_obj: + self.collection.update_one( + {"_id": doc["_id"]}, + {"$set": {"info": None}} + ) + class MongoLocalSettingsHandler(LocalSettingsHandler): """Settings handler that use mongo for store and load local settings. @@ -1409,7 +1794,7 @@ class MongoLocalSettingsHandler(LocalSettingsHandler): """ data = data or {} - self.local_settings_cache.update_data(data) + self.local_settings_cache.update_data(data, None) self.collection.replace_one( { @@ -1432,6 +1817,6 @@ class MongoLocalSettingsHandler(LocalSettingsHandler): "site_id": self.local_site_id }) - self.local_settings_cache.update_from_document(document) + self.local_settings_cache.update_from_document(document, None) return self.local_settings_cache.data_copy() diff --git a/openpype/settings/lib.py b/openpype/settings/lib.py index 1d303564d5..796eaeda01 100644 --- a/openpype/settings/lib.py +++ b/openpype/settings/lib.py @@ -9,7 +9,6 @@ from .exceptions import ( ) from .constants import ( M_OVERRIDDEN_KEY, - M_ENVIRONMENT_KEY, METADATA_KEYS, @@ -92,6 +91,31 @@ def calculate_changes(old_value, new_value): return changes +@require_handler +def get_system_last_saved_info(): + return _SETTINGS_HANDLER.get_system_last_saved_info() + + +@require_handler +def get_project_last_saved_info(project_name): + return _SETTINGS_HANDLER.get_project_last_saved_info(project_name) + + +@require_handler +def get_last_opened_info(): + return _SETTINGS_HANDLER.get_last_opened_info() + + +@require_handler +def opened_settings_ui(): + return _SETTINGS_HANDLER.opened_settings_ui() + + +@require_handler +def closed_settings_ui(info_obj): + return _SETTINGS_HANDLER.closed_settings_ui(info_obj) + + @require_handler def save_studio_settings(data): """Save studio overrides of system settings. @@ -114,8 +138,7 @@ def save_studio_settings(data): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener old_data = get_system_settings() default_values = get_default_settings()[SYSTEM_SETTINGS_KEY] @@ -162,8 +185,7 @@ def save_project_settings(project_name, overrides): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener default_values = get_default_settings()[PROJECT_SETTINGS_KEY] if project_name: @@ -224,8 +246,7 @@ def save_project_anatomy(project_name, anatomy_data): SaveWarningExc: If any module raises the exception. """ # Notify Pype modules - from openpype.modules import ModulesManager - from openpype_interfaces import ISettingsChangeListener + from openpype.modules import ModulesManager, ISettingsChangeListener default_values = get_default_settings()[PROJECT_ANATOMY_KEY] if project_name: @@ -265,11 +286,59 @@ def save_project_anatomy(project_name, anatomy_data): raise SaveWarningExc(warnings) +def _system_settings_backwards_compatible_conversion(studio_overrides): + # Backwards compatibility of tools 3.9.1 - 3.9.2 to keep + # "tools" environments + if ( + "tools" in studio_overrides + and "tool_groups" in studio_overrides["tools"] + ): + tool_groups = studio_overrides["tools"]["tool_groups"] + for tool_group, group_value in tool_groups.items(): + if tool_group in METADATA_KEYS: + continue + + variants = group_value.get("variants") + if not variants: + continue + + for key in set(variants.keys()): + if key in METADATA_KEYS: + continue + + variant_value = variants[key] + if "environment" not in variant_value: + variants[key] = { + "environment": variant_value + } + + +def _project_anatomy_backwards_compatible_conversion(project_anatomy): + # Backwards compatibility of node settings in Nuke 3.9.x - 3.10.0 + # - source PR - https://github.com/pypeclub/OpenPype/pull/3143 + value = project_anatomy + for key in ("imageio", "nuke", "nodes", "requiredNodes"): + if key not in value: + return + value = value[key] + + for item in value: + for node in item.get("knobs") or []: + if "type" in node: + break + node["type"] = "__legacy__" + + @require_handler def get_studio_system_settings_overrides(return_version=False): - return _SETTINGS_HANDLER.get_studio_system_settings_overrides( + output = _SETTINGS_HANDLER.get_studio_system_settings_overrides( return_version ) + value = output + if return_version: + value, version = output + _system_settings_backwards_compatible_conversion(value) + return output @require_handler @@ -295,7 +364,9 @@ def get_project_settings_overrides(project_name, return_version=False): @require_handler def get_project_anatomy_overrides(project_name): - return _SETTINGS_HANDLER.get_project_anatomy_overrides(project_name) + output = _SETTINGS_HANDLER.get_project_anatomy_overrides(project_name) + _project_anatomy_backwards_compatible_conversion(output) + return output @require_handler @@ -425,24 +496,6 @@ def get_local_settings(): return _LOCAL_SETTINGS_HANDLER.get_local_settings() -class DuplicatedEnvGroups(Exception): - def __init__(self, duplicated): - self.origin_duplicated = duplicated - self.duplicated = {} - for key, items in duplicated.items(): - self.duplicated[key] = [] - for item in items: - self.duplicated[key].append("/".join(item["parents"])) - - msg = "Duplicated environment group keys. {}".format( - ", ".join([ - "\"{}\"".format(env_key) for env_key in self.duplicated.keys() - ]) - ) - - super(DuplicatedEnvGroups, self).__init__(msg) - - def load_openpype_default_settings(): """Load openpype default settings.""" return load_jsons_from_dir(DEFAULTS_DIR) @@ -528,7 +581,7 @@ def load_jsons_from_dir(path, *args, **kwargs): Data are loaded recursively from a directory and recreate the hierarchy as a dictionary. - Entered path hiearchy: + Entered path hierarchy: |_ folder1 | |_ data1.json |_ folder2 @@ -592,69 +645,6 @@ def load_jsons_from_dir(path, *args, **kwargs): return output -def find_environments(data, with_items=False, parents=None): - """ Find environemnt values from system settings by it's metadata. - - Args: - data(dict): System settings data or dictionary which may contain - environments metadata. - - Returns: - dict: Key as Environment key and value for `acre` module. - """ - if not data or not isinstance(data, dict): - return {} - - output = {} - if parents is None: - parents = [] - - if M_ENVIRONMENT_KEY in data: - metadata = data.get(M_ENVIRONMENT_KEY) - for env_group_key, env_keys in metadata.items(): - if env_group_key not in output: - output[env_group_key] = [] - - _env_values = {} - for key in env_keys: - _env_values[key] = data[key] - - item = { - "env": _env_values, - "parents": parents[:-1] - } - output[env_group_key].append(item) - - for key, value in data.items(): - _parents = copy.deepcopy(parents) - _parents.append(key) - result = find_environments(value, True, _parents) - if not result: - continue - - for env_group_key, env_values in result.items(): - if env_group_key not in output: - output[env_group_key] = [] - - for env_values_item in env_values: - output[env_group_key].append(env_values_item) - - if with_items: - return output - - duplicated_env_groups = {} - final_output = {} - for key, value_in_list in output.items(): - if len(value_in_list) > 1: - duplicated_env_groups[key] = value_in_list - else: - final_output[key] = value_in_list[0]["env"] - - if duplicated_env_groups: - raise DuplicatedEnvGroups(duplicated_env_groups) - return final_output - - def subkey_merge(_dict, value, keys): key = keys.pop(0) if not keys: @@ -1050,17 +1040,15 @@ def get_current_project_settings(): return get_project_settings(project_name) -def get_environments(): - """Calculated environment based on defaults and system settings. - - Any default environment also found in the system settings will be fully - overridden by the one from the system settings. - - Returns: - dict: Output should be ready for `acre` module. - """ - - return find_environments(get_system_settings(False)) +@require_handler +def get_global_settings(): + default_settings = load_openpype_default_settings() + default_values = default_settings["system_settings"]["general"] + studio_values = _SETTINGS_HANDLER.get_global_settings() + return { + key: studio_values.get(key, default_values.get(key)) + for key in _SETTINGS_HANDLER.global_keys + } def get_general_environments(): @@ -1081,6 +1069,14 @@ def get_general_environments(): clear_metadata_from_settings(environments) + whitelist_envs = result["general"].get("local_env_white_list") + if whitelist_envs: + local_settings = get_local_settings() + local_envs = local_settings.get("environments") or {} + for key, value in local_envs.items(): + if key in whitelist_envs and key in environments: + environments[key] = value + return environments diff --git a/openpype/style/__init__.py b/openpype/style/__init__.py index b2a1a4ce6c..48e943beb1 100644 --- a/openpype/style/__init__.py +++ b/openpype/style/__init__.py @@ -1,4 +1,5 @@ import os +import copy import json import collections import six @@ -19,6 +20,9 @@ class _Cache: disabled_entity_icon_color = None deprecated_entity_font_color = None + colors_data = None + objected_colors = None + def get_style_image_path(image_name): # All filenames are lowered @@ -46,8 +50,11 @@ def _get_colors_raw_data(): def get_colors_data(): """Only color data from stylesheet data.""" - data = _get_colors_raw_data() - return data.get("color") or {} + if _Cache.colors_data is None: + data = _get_colors_raw_data() + color_data = data.get("color") or {} + _Cache.colors_data = color_data + return copy.deepcopy(_Cache.colors_data) def _convert_color_values_to_objects(value): @@ -75,17 +82,38 @@ def _convert_color_values_to_objects(value): return parse_color(value) -def get_objected_colors(): +def get_objected_colors(*keys): """Colors parsed from stylesheet data into color definitions. + You can pass multiple arguments to get a key from the data dict's colors. + Because this functions returns a deep copy of the cached data this allows + a much smaller dataset to be copied and thus result in a faster function. + It is however a micro-optimization in the area of 0.001s and smaller. + + For example: + >>> get_colors_data() # copy of full colors dict + >>> get_colors_data("font") + >>> get_colors_data("loader", "asset-view") + + Args: + *keys: Each key argument will return a key nested deeper in the + objected colors data. + Returns: - dict: Parsed color objects by keys in data. + Any: Parsed color objects by keys in data. """ - colors_data = get_colors_data() - output = {} - for key, value in colors_data.items(): - output[key] = _convert_color_values_to_objects(value) - return output + if _Cache.objected_colors is None: + colors_data = get_colors_data() + output = {} + for key, value in colors_data.items(): + output[key] = _convert_color_values_to_objects(value) + + _Cache.objected_colors = output + + output = _Cache.objected_colors + for key in keys: + output = output[key] + return copy.deepcopy(output) def _load_stylesheet(): @@ -129,7 +157,7 @@ def _load_stylesheet(): def _load_font(): """Load and register fonts into Qt application.""" - from Qt import QtGui + from qtpy import QtGui # Check if font ids are still loaded if _Cache.font_ids is not None: diff --git a/openpype/style/color_defs.py b/openpype/style/color_defs.py index 0f4e145ca0..69703583c4 100644 --- a/openpype/style/color_defs.py +++ b/openpype/style/color_defs.py @@ -47,7 +47,7 @@ def create_qcolor(*args): *args (tuple): It is possible to pass initialization arguments for Qcolor. """ - from Qt import QtGui + from qtpy import QtGui return QtGui.QColor(*args) @@ -296,7 +296,7 @@ class HSLColor: if "%" in sat_str: sat = float(sat_str.rstrip("%")) / 100 else: - sat = float(sat) + sat = float(sat_str) if "%" in light_str: light = float(light_str.rstrip("%")) / 100 @@ -337,8 +337,8 @@ class HSLAColor: as float (0-1 range). Examples: - "hsl(27, 0.7, 0.3)" - "hsl(27, 70%, 30%)" + "hsla(27, 0.7, 0.3, 0.5)" + "hsla(27, 70%, 30%, 0.5)" """ def __init__(self, value): modified_color = value.lower().strip() @@ -350,7 +350,7 @@ class HSLAColor: if "%" in sat_str: sat = float(sat_str.rstrip("%")) / 100 else: - sat = float(sat) + sat = float(sat_str) if "%" in light_str: light = float(light_str.rstrip("%")) / 100 diff --git a/openpype/style/data.json b/openpype/style/data.json index a76a77015b..404ca6944c 100644 --- a/openpype/style/data.json +++ b/openpype/style/data.json @@ -20,14 +20,14 @@ "color": { "font": "#D3D8DE", "font-hover": "#F0F2F5", - "font-disabled": "#99A3B2", + "font-disabled": "#5b6779", "font-view-selection": "#ffffff", "font-view-hover": "#F0F2F5", "bg": "#2C313A", "bg-inputs": "#21252B", "bg-buttons": "#434a56", - "bg-button-hover": "rgba(168, 175, 189, 0.3)", + "bg-button-hover": "rgb(81, 86, 97)", "bg-inputs-disabled": "#2C313A", "bg-buttons-disabled": "#434a56", @@ -61,7 +61,13 @@ "icon-entity-default": "#bfccd6", "icon-entity-disabled": "#808080", "font-entity-deprecated": "#666666", - + "overlay-messages": { + "close-btn": "#D3D8DE", + "bg-success": "#458056", + "bg-success-hover": "#55a066", + "bg-error": "#AD2E2E", + "bg-error-hover": "#C93636" + }, "tab-widget": { "bg": "#21252B", "bg-selected": "#434a56", @@ -85,8 +91,10 @@ }, "publisher": { "error": "#AA5050", + "crash": "#FF6432", "success": "#458056", "warning": "#ffc671", + "tab-bg": "#16191d", "list-view-group": { "bg": "#434a56", "bg-hover": "rgba(168, 175, 189, 0.3)", diff --git a/openpype/style/pyside6_resources.py b/openpype/style/pyside6_resources.py new file mode 100644 index 0000000000..125fcb64fa --- /dev/null +++ b/openpype/style/pyside6_resources.py @@ -0,0 +1,1522 @@ +# Resource object code (Python 3) +# Created by: object code +# Created by: The Resource Compiler for Qt version 6.4.1 +# WARNING! All changes made in this file will be lost! + +from PySide6 import QtCore + +qt_resource_data = b"\ +\x00\x00\x00\xa6\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1f \xb9\ +\x8dw\xe9\x00\x00\x00*IDAT\x08\xd7c`\xc0\ +\x06\xe6|```B0\xa1\x1c\x08\x93\x81\x81\x09\xc1\ +d``b`H\x11@\xe2 s\x19\x90\x8d@\x02\ +\x00#\xed\x08\xafd\x9f\x0f\x15\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +\x00\x00\x04\x12\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x03\xc4IDATh\x81\xed\ +\x9a_\x88\x94U\x18\xc6\x7f3;\x1a\x0b\x19\x15f\x17\ +\xca\x03IPM\x09J7Q^D)&f\x05[\ +\xb9\xd2\x82\xb1P\x17\x91$t\x11\x08z\xa1D\x17]\ +T\x94\x04\xd6E\xe1\xa2\x15L\xb1\x18\x99\xfd%\xd8\x08\ +\x82nj]\xa4\x22\xe2\x81\xa8\x96\xd5(\xe8\x9f\xae\xd5\ +\xc5\xf9\xb6\xc6\xd9\xf9\xbesf\xc6vf\xc0\xdf\xdd\x9c\ +\xef=\xefy\x9fs\xe6\x9c\xf3~\xdf9%2Fj\ +SK\x81\xdd\xc0Z\xe0:`\x11\xbd\xc5i`\x12\x98\ +\x00\xf6\x8c\x0dUg\x00J\x00#\xb5\xa9[\x80C\xc0\ +\xb2\xae\x85\xd7\x1a\xd3\xc0\xd6\xb1\xa1\xea\x07\xa5\xac\xe7\x8f\ +\xd1?\xc1\xcf1\x0d\x5c[&\xfcm\xfa-x\x081\ +\xef\xaa\x10\xfe\xf3\x8d\x9cY\xe0`R\x19h\xf8\xbd\xb6\ +B\x98\xb0\xf5\x9c\x19\x1b\xaaV\x16(\xa0\x96\x18\xa9M\ +\xcdr\xb6\x88Uezo\xb5i\x85E\xe5nG\xd0\ +)\xe7\x05t\x9b\xf3\x02\xfe\x0fl\xdfc\xfb\x90\xed\xf5\ +1\xdb\x9e\x13`\xfb\x11\xe05`\x188j{\x9f\xed\ +\xdc\x95\xb2\xa7\x04\xd8\x1e\x06\x9e\xaa+*\x01\x0f\x01/\ +\xe4\xd5\xe9\x19\x01\xb6\xd7\x01/\x93%\x98\x0dl\xb3\xfd\ +h\xb3z=!\xc0\xf6\xf5\xc0\xeb\xc0\xe2\x02\xb3\x8d\xcd\ +\x0a\xbb.\xc0\xf6\x95\xc0[\xc0\x92\x88\xe9\x8b\xcd\x0a\xbb\ +*\xc0\xf6\xe5\xc0Q\xe2\xd9\xf0\x93\x92^i\xf6\xa0k\ +\x02l/\x01\x8e\x00+#\xa6\x07\x80\xc7\xf2\x1evE\ +\x80\xed\xc5\xc0\x1b\xc0\x9a\x88\xe9\xdb\xc0\xa8\xa4\xbf\xf3\x0c\ +\x16\x5c\x80\xed2\xa1Wo\x8d\x98~\x0a\xdc-i\xb6\ +\xc8\xa8\x1b#\xf04po\xc4\xe6K`\x93\xa4_c\ +\xce\x92\x04\xd8.e=\xd7\x11\xb6w\x02\xdb#f\xdf\ +\x03\x1b$\xcd\xa4\xf8,\x0c*\x0b|\x07\xf0;0i\ +{UR\xa4\xcd}\x8d\x02\x8fG\xcc~\x06n\x93\xf4\ +m\xaa\xdf\x5c\x01Y\xfe1N\xd8\xda/\x00\xae\x01\xde\ +\xb3}U\xaa\xf3:_\x9b\x81\xfd\x11\xb3?\x81;%\ +}\xde\x8a\xef\xa2\x11x\x0e\xd8\xdcP\xb6\x0cx\xdf\xf6\ +\x15\xa9\x0d\xd8\xbe\x11x\x95\xf9/\xe4\xf5\xfc\x05\xdc'\ +\xe9\xa3T\xbfs4\x15\x90\xf5\xd8\x839u\x96\x13F\ +by\xcc\xb9\xed*p\x18\x18\x8c\x98>,\xa9\x16\xf3\ +\xd7\x8c\xbc\x11\xb84Ro%A\xc4ey\x06\xb6W\ +\x10v\xd9\x98\xaf\xbd\x92\x9e\x8f\xd8\xe4\x92'\xe0 a\ +\x1d.\xe2j\xe0\x1d\xdb\x177>\xb0}\x09!\xf8\x15\ +\x11\x1f\xfb%\xed\x8eFY@S\x01\x92N\x13\xb2\xbf\ +/\x22\xf5W\x03Gl_8W`{\x90\xf0\xb7\xa9\ +F\xea\x8e\x13r\xfd\x8e\xc8\x9d\xc4\x92N\x02\xeb\x81\xaf\ +\x22>n\x00\x0e\xdb\x1e\xb4=@\x98\xb07E\xeaL\ +\x00\xc3\x92:\xfe\x02X\xb8\x0fH\xfa\x11X\x078\xe2\ +\xe7f\xa0FX*\x1bW\xaeF&\x81;$\xfd\x91\ +\x18c!\xd1\xddU\x92\x09y\xcb\x0f\x11\xd3\x8d\xc0h\ +\xc4\xc6\x84\x8d\xea\xa7\xb4\xf0\xe2$\xa5\x07\x92\xbe&\x8c\ +\xc4\x89\x0e\xda:IH\x11\xbe\xeb\xc0\xc7<\x92\xf3\x1b\ +I\xc7\x80\x0d\xc0/m\xb4\xf3\x1bp\xbb\xa4\xe3m\xd4\ +-\xa4\xa5\x04M\xd2g\xc0\xa6,\xa0Tf\x81-\x92\ +>i\xa5\xadTZ\xce0%M\x00w\x11r\x97\x14\ +\x1e\x90\xf4f\xab\xed\xa4\xd2V\x8a,\xe9]`\x0b\xa1\ +w\x8b\xd8)\xe9\xa5v\xdaH\xa5\xed\x1c_\xd28\xb0\ +\x8d\x90\x885\xe3YIO\xb4\xeb?\x95\x8e^R$\ +\x1d\x04\xeeg\xfe\x9c\xd8\x07\xec\xe8\xc4w*\x1d\x1f%\ +I:`\xfbc\xc2\x06v\x11\xf0a6O\x16\x84s\ +r\x16&\xe9\x1b\xe0\x99s\xe1\xabU\xca\x84\x13\xf0~\ +\xe5T\x85\x90\x9b\xd4\x7f\x9f\x19\xc8N\x03{\x91\xc6\xb7\ +\xba\xc9\x0a!3l\xfc\xc0T\xf4\xfa\xd7KL\x94\x81\ +=\x84c\xfb~c\x1a\xd8[\xcen}l\xa5\xbfD\ +\xcc]\xf6\x98\xf9\xf70!\xbb\xf4\xb1\x8b\xff\xae\xdb\x14\ +}\xab\xef\x06\xa78\xfb\xba\xcd\x09\x80\x7f\x00\xc4\x1e\x10\ +)3[\x85\xf7\x00\x00\x00\x00IEND\xaeB`\ +\x82\ +\x00\x00\x01\xef\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01\xa1IDATh\x81\xed\ +\x9a\xbfN\xc2P\x14\x87\xbf\x96\xe2\xa4\x9bq\xbc\x8b\x1b\ +\xea\xc0\xe2D\x1c\x8c\x83\x83\xd1\x81\x89\x84\xd1\x17\xf0\x01\ +p\xc0\x07\xf0\x05\x1cI\x9c\xba\xa8#q0<\x02v\ +\x92\xe5\x8e\x04'\xe3\xc2\x9f\xc4\xa1m\x04B\x8b\x95\xc2\ +\xa1\xe4~[{\xee\xf0\xfb\x9a{o\x9a\x9cc\x11P\ +u\xbd]\xe0\x16(\x01\x87@\x9e\xf5b\x00\xb4\x81\x16\ +Po\x94\x0b=\x00\x0b\xa0\xeaz\xa7\xc0#\xb0'\x16\ +/\x19]\xa0\xd2(\x17^\xad\xe0\xcb\xbf\x93\x9d\xf0!\ +]\xe0\xc0\xc6\xdf6Y\x0b\x0f~\xe6\x9a\x83\xbf\xe7\xa7\ +\x19\xad8\xcc_\xc9M=\x97\x1c\xfc\x03;\xce\xa8Q\ +.8+\x0a\x94\x88\xaa\xeb\x0d\x99\x948\xb2Y\xbf\xdb\ +&\x09y[:\xc1\xa2\x18\x01i\x8c\x804F@\x1a\ +# \x8d\x11\x90\xc6\x08H3\xf7\xb7Yk}\x06\x1c\ +\x03\xdb\xcb\x8f3\xc1\x10\xf0\x80g\xa5\xd4w\xd4\xa2H\ +\x01\xad\xb5\x03\xb8\xc0e\xfa\xd9\x12\xd1\xd1Z\x9f+\xa5\ +>f\x15\xe3\xb6\xd0\x0d\xf2\xe1\x01\xf6\x81\x87\xa8b\x9c\ +\xc0E\xfaY\xfe\xcd\x89\xd6zgV!\xf3\x878N\ +\xe0ee)\xe6\xf3\xa6\x94\xfa\x9aU\x88\x13\xb8\x07\x9e\ +\x96\x93'\x11\x1d\xe0:\xaa\x18y\x0b)\xa5\x86\xc0U\ +f\xaf\xd1\x10\xa5T\x13h\xa6\x18,U6\xfa\x10g\ +\x02# \x8d\x11\x90\xc6\x08Hc\x04\xa41\x02\xd2l\ +\x84\xc0@:\xc4\x02\xf4\x1d\xfc\xf6}q\xece.\xe8\ +\x06\xae#\xd3m\xd6\xb6\x83?{P\x9c\xb3p]i\ +\xd9@\x1d\xbfm\x9f5\xba\xc0\x9d\x1dL}T\xc8\x96\ +D8\xec\xd1\xb3\xc27\xc1\xd0G\x8d\xdfq\x9b-\xa1\ +pQ\xf4\x99\x1c\xb7\xf9\x04\xf8\x01o\xedXc-\xfd\ +\xb2Y\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x00\xa6\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x15;\xdc\ +;\x0c\x9b\x00\x00\x00*IDAT\x08\xd7c`\xc0\ +\x00\x8c\x0c\x0cs> \x0b\xa4\x08020 \x0b\xa6\ +\x08000B\x98\x10\xc1\x14\x01\x14\x13P\xb5\xa3\x01\ +\x00\xc6\xb9\x07\x90]f\x1f\x83\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +\x00\x00\x00\xa5\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\x9cS4\xfc]\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b\x02\x04m\ +\x98\x1bi\x00\x00\x00)IDAT\x08\xd7c`\xc0\ +\x00\x8c\x0c\x0c\xff\xcf\xa3\x08\x18220 \x0b2\x1a\ +200B\x98\x10AFC\x14\x13P\xb5\xa3\x01\x00\ +\xd6\x10\x07\xd2/H\xdfJ\x00\x00\x00\x00IEND\ +\xaeB`\x82\ +\x00\x00\x00\xa5\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\x9cS4\xfc]\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b\x02\x04m\ +\x98\x1bi\x00\x00\x00)IDAT\x08\xd7c`\xc0\ +\x00\x8c\x0c\x0c\xff\xcf\xa3\x08\x18220 \x0b2\x1a\ +200B\x98\x10AFC\x14\x13P\xb5\xa3\x01\x00\ +\xd6\x10\x07\xd2/H\xdfJ\x00\x00\x00\x00IEND\ +\xaeB`\x82\ +\x00\x00\x00\xa5\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\x9cS4\xfc]\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x0b\x02\x04m\ +\x98\x1bi\x00\x00\x00)IDAT\x08\xd7c`\xc0\ +\x00\x8c\x0c\x0c\xff\xcf\xa3\x08\x18220 \x0b2\x1a\ +200B\x98\x10AFC\x14\x13P\xb5\xa3\x01\x00\ +\xd6\x10\x07\xd2/H\xdfJ\x00\x00\x00\x00IEND\ +\xaeB`\x82\ +\x00\x00\x00\xa0\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1c\x1f$\ +\xc6\x09\x17\x00\x00\x00$IDAT\x08\xd7c`@\ +\x05\xff\xcf\xc3XL\xc8\x5c&dY&d\xc5p\x0e\ +\xa3!\x9c\xc3h\x88a\x1a\x0a\x00\x00m\x84\x09u7\ +\x9e\xd9#\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x00\xa6\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x15;\xdc\ +;\x0c\x9b\x00\x00\x00*IDAT\x08\xd7c`\xc0\ +\x00\x8c\x0c\x0cs> \x0b\xa4\x08020 \x0b\xa6\ +\x08000B\x98\x10\xc1\x14\x01\x14\x13P\xb5\xa3\x01\ +\x00\xc6\xb9\x07\x90]f\x1f\x83\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +\x00\x00\x01i\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01\x1bIDATh\x81\xed\ +\xda\xb1m\xc2@\x14\x87\xf1\xcf\xc7\x91\x09P\x86pB\ +AO\xc5\x0a\xae\x90\xbc\x0a)\xc8*\x96Ry\x05*\ +F \x1e\xc2\x82\x05H\x90R\xdcY\x01KQbE\ +\xe2\xef\x93\xde\xaf\xb3E\xf1>\xcb\xa6\xb9\x97\x11\x95u\ +3\x03^\x80%\xf0\x0cL\x19\x97\x0f\xe0\x00\xec\x81m\ +U\xe4G\x80\x0c\xa0\xac\x9b\x15\xf0\x06<\xca\xc6\x1b\xa6\ +\x05\xd6U\x91\xef\xb2\xf8\xe4\xdfIg\xf8N\x0b<9\ +\xc2k\x93\xda\xf0\x10f\xdex\xc2;\xdfw\xb9\xf30\ +\x7f5\xe9]/=\xe1\x83\xbdv\xa9\x8a\xdc\xdfi\xa0\ +A\xca\xba\xf9\xe46b\xee\x18\xdf\xbf\xcd\x10S\xa7\x9e\ +\xe0\xbf,@\xcd\x02\xd4,@\xcd\x02\xd4,@\xcd\x02\ +\xd4,@\xcd\x02\xd4,@\xcd\x02\xd4,@\xcd\x02\xd4\ +,@\xcd\x02\xd4,@\xcd\x02\xd4,@\xcd\x11N\xc0\ +Su\xf6\x84\xe3\xfb\xc5\xd5\xcdI<\x0d\x1c\xa3\xfe1\ +\xeb\xc1\x13v\x0f\x16\xbf\xfcp\xac\xf6\x0e\xd8\x12\x8e\xed\ +S\xd3\x02\xaf.n}\xacI+\xa2[\xf68f\xdd\ +\x9d\xb8\xf4\xb1\xe1{\xdd\xe6A4\xdcO\xce\xdc\xae\xdb\ +\x9c\x00\xbe\x00\x9f\xf64>6O7\x81\x00\x00\x00\x00\ +IEND\xaeB`\x82\ +\x00\x00\x07\x06\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x0a\x00\x00\x00\x07\x08\x06\x00\x00\x001\xac\xdcc\ +\x00\x00\x04\xb0iTXtXML:com.\ +adobe.xmp\x00\x00\x00\x00\x00\x0a\x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a\x0a\x85\x9d\x9f\x08\x00\x00\x01\x83\ +iCCPsRGB IEC6196\ +6-2.1\x00\x00(\x91u\x91\xcf+DQ\x14\ +\xc7?fh\xfc\x18\x8dba1e\x12\x16B\x83\x12\ +\x1b\x8b\x99\x18\x0a\x8b\x99Q~mf\x9ey3j\xde\ +x\xbd7\xd2d\xabl\xa7(\xb1\xf1k\xc1_\xc0V\ +Y+E\xa4d\xa7\xac\x89\x0dz\xce\x9bQ#\x99s\ +;\xf7|\xee\xf7\xdes\xba\xf7\x5cpD\xd3\x8afV\ +\xfaA\xcbd\x8dp(\xe0\x9b\x99\x9d\xf3\xb9\x9e\xa8\xa2\ +\x85\x1a:\xf1\xc6\x14S\x9f\x8c\x8cF)k\xef\xb7T\ +\xd8\xf1\xba\xdb\xaeU\xfe\xdc\xbfV\xb7\x980\x15\xa8\xa8\ +\x16\x1eVt#+<&<\xb1\x9a\xd5m\xde\x12n\ +RR\xb1E\xe1\x13\xe1.C.(|c\xeb\xf1\x22\ +?\xdb\x9c,\xf2\xa7\xcdF4\x1c\x04G\x83\xb0/\xf9\ +\x8b\xe3\xbfXI\x19\x9a\xb0\xbc\x9c6-\xbd\xa2\xfc\xdc\ +\xc7~\x89;\x91\x99\x8eHl\x15\xf7b\x12&D\x00\ +\x1f\xe3\x8c\x10d\x80^\x86d\x1e\xa0\x9b>zdE\ +\x99|\x7f!\x7f\x8ae\xc9Ud\xd6\xc9a\xb0D\x92\ +\x14Y\xbaD]\x91\xea\x09\x89\xaa\xe8\x09\x19irv\ +\xff\xff\xf6\xd5T\xfb\xfb\x8a\xd5\xdd\x01\xa8z\xb4\xac\xd7\ +vpm\xc2W\xde\xb2>\x0e,\xeb\xeb\x10\x9c\x0fp\ +\x9e)\xe5/\xef\xc3\xe0\x9b\xe8\xf9\x92\xd6\xb6\x07\x9eu\ +8\xbd(i\xf1m8\xdb\x80\xe6{=f\xc4\x0a\x92\ +S\xdc\xa1\xaa\xf0r\x0c\xf5\xb3\xd0x\x05\xb5\xf3\xc5\x9e\ +\xfd\xecst\x07\xd15\xf9\xaaK\xd8\xd9\x85\x0e9\xef\ +Y\xf8\x06\x8e\xfdg\xf8\xfd\x8a\x18\x97\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00mIDAT\x18\x95u\xcf\xc1\x09\xc2P\ +\x10\x84\xe1\xd7\x85\x07\x9b\xd0C@\xd2\x82x\x14{0\ +W!\x8d\x84`?bKzH\xcc\x97\x83\xfb0\x04\ +\xdf\x9c\x86\x7fg\x99\xdd\x84\x0d\xaaT\x10jl\x13\x1e\ +\xbe\xba\xfe\x0951{\xe6\x8d\x0f&\x1c\x17\xa1S\xb0\ +\x11\x87\x0c/\x01\x07\xec\xb0\x0f?\xe1\xbc\xaei\xa3\xe6\ +\x85w\xf8[\xe9\xf0\xbb\x9f\xfa\xd2\x839\xdc\xa3[\xf3\ +\x19.\xa8\x89\xb50\xf7C\xa0\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +\x00\x00\x00\xa6\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1d\x00\xb0\ +\xd55\xa3\x00\x00\x00*IDAT\x08\xd7c`\xc0\ +\x06\xfe\x9fg``B0\xa1\x1c\x08\x93\x81\x81\x09\xc1\ +d``b``4D\xe2 s\x19\x90\x8d@\x02\ +\x00d@\x09u\x86\xb3\xad\x9c\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +\x00\x00\x07\xad\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x07\x00\x00\x00\x0a\x08\x06\x00\x00\x00x\xccD\x0d\ +\x00\x00\x05RiTXtXML:com.\ +adobe.xmp\x00\x00\x00\x00\x00\x0a\x0a \x0a \x0a \x0a \ +\x0a branch_close<\ +/rdf:li>\x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a <\ +/rdf:RDF>\x0a\x0a$\xe15\x97\x00\x00\ +\x01\x83iCCPsRGB IEC61\ +966-2.1\x00\x00(\x91u\x91\xcf+D\ +Q\x14\xc7?fh\xfc\x18\x8dba1e\x12\x16B\ +\x83\x12\x1b\x8b\x99\x18\x0a\x8b\x99Q~mf\x9ey3\ +j\xdex\xbd7\xd2d\xabl\xa7(\xb1\xf1k\xc1_\ +\xc0VY+E\xa4d\xa7\xac\x89\x0dz\xce\x9bQ#\ +\x99s;\xf7|\xee\xf7\xdes\xba\xf7\x5cpD\xd3\x8a\ +fV\xfaA\xcbd\x8dp(\xe0\x9b\x99\x9d\xf3\xb9\x9e\ +\xa8\xa2\x85\x1a:\xf1\xc6\x14S\x9f\x8c\x8cF)k\xef\ +\xb7T\xd8\xf1\xba\xdb\xaeU\xfe\xdc\xbfV\xb7\x980\x15\ +\xa8\xa8\x16\x1eVt#+<&<\xb1\x9a\xd5m\xde\ +\x12nRR\xb1E\xe1\x13\xe1.C.(|c\xeb\ +\xf1\x22?\xdb\x9c,\xf2\xa7\xcdF4\x1c\x04G\x83\xb0\ +/\xf9\x8b\xe3\xbfXI\x19\x9a\xb0\xbc\x9c6-\xbd\xa2\ +\xfc\xdc\xc7~\x89;\x91\x99\x8eHl\x15\xf7b\x12&\ +D\x00\x1f\xe3\x8c\x10d\x80^\x86d\x1e\xa0\x9b>z\ +dE\x99|\x7f!\x7f\x8ae\xc9Ud\xd6\xc9a\xb0\ +D\x92\x14Y\xbaD]\x91\xea\x09\x89\xaa\xe8\x09\x19i\ +rv\xff\xff\xf6\xd5T\xfb\xfb\x8a\xd5\xdd\x01\xa8z\xb4\ +\xac\xd7vpm\xc2W\xde\xb2>\x0e,\xeb\xeb\x10\x9c\ +\x0fp\x9e)\xe5/\xef\xc3\xe0\x9b\xe8\xf9\x92\xd6\xb6\x07\ +\x9eu8\xbd(i\xf1m8\xdb\x80\xe6{=f\xc4\ +\x0a\x92S\xdc\xa1\xaa\xf0r\x0c\xf5\xb3\xd0x\x05\xb5\xf3\ +\xc5\x9e\xfd\xecst\x07\xd15\xf9\xaaK\xd8\xd9\x85\x0e\ +9\xefY\xf8\x06\x8e\xfdg\xf8\xfd\x8a\x18\x97\x00\x00\x00\ +\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\ +\x9c\x18\x00\x00\x00rIDAT\x18\x95m\xcf1\x0a\ +\xc2P\x14D\xd1\xe8\x02\xb4W\x08\xd6Ia\x99JC\ +t\x15\x82\xabI6(\xee@\x04\xdb\xa8\x95Xx,\ +\xf2\x09\xe1\xf3\x07\xa6\x9a\xfb\xe0\xbe\x0c\x1b\xb4Xdq\ +p0\xe4\x82U\x0a8\xe3\x8b\x1b\x8a\x14p\xc4\x1b=\ +v)`\x8b\x07>\xa8\xe6\xd1\xfe\x0b\x9d\x85\x8eW\x0d\ +^x\xa2\x9e\x0e\xa7 tG9\x1d\xf6\xe1\x95+\xd6\ +\xb1D\x8e\x0e\xcbX\xf0\x0fR\x8ay\x18\xdc\xe2\x02p\ +\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x00\x9f\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x14\x1f\xf9\ +#\xd9\x0b\x00\x00\x00#IDAT\x08\xd7c`\xc0\ +\x0d\xe6|\x80\xb1\x18\x91\x05R\x04\xe0B\x08\x15)\x02\ +\x0c\x0c\x8c\xc8\x02\x08\x95h\x00\x00\xac\xac\x07\x90Ne\ +4\xac\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x070\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x0a\x00\x00\x00\x07\x08\x06\x00\x00\x001\xac\xdcc\ +\x00\x00\x04\xb0iTXtXML:com.\ +adobe.xmp\x00\x00\x00\x00\x00\x0a\x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a\x0aH\x8b[^\x00\x00\x01\x83\ +iCCPsRGB IEC6196\ +6-2.1\x00\x00(\x91u\x91\xcf+DQ\x14\ +\xc7?fh\xfc\x18\x8dba1e\x12\x16B\x83\x12\ +\x1b\x8b\x99\x18\x0a\x8b\x99Q~mf\x9ey3j\xde\ +x\xbd7\xd2d\xabl\xa7(\xb1\xf1k\xc1_\xc0V\ +Y+E\xa4d\xa7\xac\x89\x0dz\xce\x9bQ#\x99s\ +;\xf7|\xee\xf7\xdes\xba\xf7\x5cpD\xd3\x8afV\ +\xfaA\xcbd\x8dp(\xe0\x9b\x99\x9d\xf3\xb9\x9e\xa8\xa2\ +\x85\x1a:\xf1\xc6\x14S\x9f\x8c\x8cF)k\xef\xb7T\ +\xd8\xf1\xba\xdb\xaeU\xfe\xdc\xbfV\xb7\x980\x15\xa8\xa8\ +\x16\x1eVt#+<&<\xb1\x9a\xd5m\xde\x12n\ +RR\xb1E\xe1\x13\xe1.C.(|c\xeb\xf1\x22\ +?\xdb\x9c,\xf2\xa7\xcdF4\x1c\x04G\x83\xb0/\xf9\ +\x8b\xe3\xbfXI\x19\x9a\xb0\xbc\x9c6-\xbd\xa2\xfc\xdc\ +\xc7~\x89;\x91\x99\x8eHl\x15\xf7b\x12&D\x00\ +\x1f\xe3\x8c\x10d\x80^\x86d\x1e\xa0\x9b>zdE\ +\x99|\x7f!\x7f\x8ae\xc9Ud\xd6\xc9a\xb0D\x92\ +\x14Y\xbaD]\x91\xea\x09\x89\xaa\xe8\x09\x19irv\ +\xff\xff\xf6\xd5T\xfb\xfb\x8a\xd5\xdd\x01\xa8z\xb4\xac\xd7\ +vpm\xc2W\xde\xb2>\x0e,\xeb\xeb\x10\x9c\x0fp\ +\x9e)\xe5/\xef\xc3\xe0\x9b\xe8\xf9\x92\xd6\xb6\x07\x9eu\ +8\xbd(i\xf1m8\xdb\x80\xe6{=f\xc4\x0a\x92\ +S\xdc\xa1\xaa\xf0r\x0c\xf5\xb3\xd0x\x05\xb5\xf3\xc5\x9e\ +\xfd\xecst\x07\xd15\xf9\xaaK\xd8\xd9\x85\x0e9\xef\ +Y\xf8\x06\x8e\xfdg\xf8\xfd\x8a\x18\x97\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x97IDAT\x18\x95m\xcf\xb1j\x02A\ +\x14\x85\xe1o\xb7\xb6\xd0'H=Vi\x03\xb1\xb4H\ +;l\xa5\xf19\xf6Y\x02VB\xbaa\x0a\x0b;\x1b\ +\x1bkA\x18\x02)m\xe3\xbe\x82\xcd\x06\x16\xd9\xdb\xdd\ +\x9f\xff\x5c\xee\xa9b*\x13Ls\x13nF&\xa6\xf2\ +\x82\xaeF\x8b\xdf\x98\xca\xfb\x88\xb4\xc0\x0f\xda\x1a[t\ +\xd8\xc7T\xc2@\x9ac\x8f?|U=|\xc5\x09w\ +\xbc\xa1\xc2\x193,r\x13.\xd5\xe0\xc2\x12\x07\x5cQ\ +#\xe0#7\xe1\xa8O\x0e\x7f\xda`\xd7\xaf\x9f\xb9\x09\ +\xdfc\x05\xff\xe5uLe\xf5\xcc\x1f\x0d3,\x83\xb6\ +\x06D\x83\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x03\xff\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x03\xb1IDATh\x81\xed\ +\x9aOh\x1eE\x18\xc6\x7f\x9b&\x85\x82\x15\x15\xabB\ +\xcb\x03\x06\x05\xa9\x0a\x8a\xb7R<\xd4\x96\xaa\xb5\xe0A\ +\xad\xc5C%\xa0\x07Q\xccM(\xb4\x07E\x80\xae9\x1f\xc0\xff\ +\x81\xed\xbbm\xbff{W\xca6\x0b!\x84BY\xa7\ +\xdb\xa8\xedG\x81#\xf9\xcf\x00\x1c\x05&%-\x94l\ +\xa3\xc35\x03\xb6\xef\x05\x9e\xe9)\xca\x80\x87\x80\x17\xab\ +\xda\x0c\xcd\x81e{'\xf0\x0aQt\x91\x03\xb6\xbf*\ +k7\x143`\xfb&\xe0M`}\x8d\xd9me\x85\ +\x9d\x07`\xfb*\xe0]`c\xc2\xf4\xa5\xb2\xc2N\x03\ +\xb0}9\xf0>\xe9l\xf8iI\xaf\x97Ut\x16\x80\ +\xed\x8d\xc0\x140\x9e0}\x15x\xac\xaa\xb2\x93\x00l\ +\xaf\x07\xde\x02nL\x98\xbe\x07LH*n\xf5K\xac\ +z\x00\xb6G\x88\xa3zK\xc2\xf4\x0b\xe0.I\x8bu\ +F]\xcc\xc0\x11\xe0\x9e\x84\xcd\xb7\xc0\x1eI\xbf\xa5\x9c\ +5\x0a\xc0v\x96\x8f\xdc@\xd8>\x08<\x920\xfb\x11\ +\xd8-i.a\x07$\x02\xc8\x85O\x02\x7f\x003\xb6\ +\xafo\xa4\xb4\xdc\xd7\x04\xf0d\xc2\xec\x17\xe0VI\xdf\ +7\xf5[\x99\x0b\xd9\x1e\x03\x8e\x03{{\xeaf\x81\x9b\ +%}\xd3\xb4\x03\x00\xdb{\x89\x8b\xb6x\xa7\xed\xe5,\ +q\xe4?\xab2\xe87\x17z\x8e\xe5\xe2!\xee\xd7\x1f\ +\xdb\xbe\xb2Vq\x0f\xb6\xb7\x01o\x14;.\xf0\x17p\ +_\x9d\xf8*J\x03\xc8G\xec\xc1\x8a6\x9b\x81\x8fl\ +oN9\xb7\xbd\x15x\x1b\xd8\x900}X\xd2\xf1\x94\ +\xbf2\xaaf\xe0\x92D\xbbqb\x10\x9b\xaa\x0clo\ +!\x9e\xb2)_OH:\x9a\xb0\xa9\xa4*\x80c\xc4\ +}\xb8\x8ek\x80\x0fl_T\xac\xb0}1Q\xfc\x96\ +\x84\x8f\x17$\x1dN\xaa\xac\xa14\x00I\x0b\xc4\xec\xaf\ +4\x85\xed\xe1\x06`\xca\xf6\x05\xff\x14\xd8\xde@|l\ +\xb6&\xda\x9e \xe6\xfa\x03Q{#\xcb\x93\xad\x93\xc0\ +\xd5\x09?\x9f\x02\xb7\x03\xf3\xc4\xdd\xa6\xb8\xf8\x8bL\x03\ +\xbb$\xfd\xd9\x8f\xd8\xb2](y\xa5\xb4-b\x10J\ +\xf8\x9f\x22\x1eB\x13\x09\xbb\x19\xe2V\xfcs#\xd5=\ +\xb4\x0a\x00\x96r\xf6\x93\xc0\x15\xfdvZ\xc0\xc06I\ +?\xb4i\xdc\xfaN,\xe9;`'p\xa6M\xc79\ +?\x11\x0f\xaaV\xe2\xabh\x9c\xdfH:\x05\xec\x06~\ +m\xd1\xcf\xef\xc0\x1d\x92\xben\xd1\xb6\x96\xbe\x124I\ +_\x02{rAMY\x04\xf6I\xfa\xbc\x9f\xbe\x9a\xd2\ +w\x86)i\x1a\xb8\x93\x98\xbb4\xe1\x01I\xef\xf4\xdb\ +OSZ\xa5\xc8\x92>\x04\xf6\x11G\xb7\x8e\x83\x92^\ +n\xd3GSZ\xe7\xf8\x92N\x00\x07\x88\x89X\x19\xcf\ +Jz\xaa\xad\xff\xa6\x0ctI\x91t\x0c\xb8\x9f\xff\xae\ +\x89\xe7\x81\xc9A|7eE\xde\x8d\xda\x1e'\x9e\xbe\ +\x17\x02\x9f\xe4\xebd\xc5i}\x90\x0d\x0bU\x07\xd9B\ +7rV\x84\xf9Qbn\xd2\xfb~f]\x1e\xe90\ +R\xbc\xd5\xcd\x8c\x123\xc3\xe2\x0b\xa6\xba\xeb\xdf01\ +\xbd\xf6?5\xc8\xbf\xfa\xd8\x9f\x17\xac\x15f\x81\xfdY\ +\x96\xcd-\xfd\x99\x90\xcf\xc4!\xfe\xfd\xdc\xa6\xee]}\ +\x17\xcc\xb3\xfcs\x9b3\x00\x7f\x03\xd9\x1a\xfb\xdb\xbb\xa7\ +\x8f\x07\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x01[\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01\x0dIDATh\x81\xed\ +\xda\xb1m\x02A\x10F\xe1w\xc7\xe2\x0a,\x87\xd3\x00\ +8 'r\x17\x14\x83\x037\xe3.\x1cQ\x0240\ +!\xc2\x0d`\x90\x1c\xec\x9e\x0c'Y\xf6\x09\x89\xffV\ +\x9a/cE0\x0f\x0e\x92\x9d\x86\xc2\xdd\x1f\x81W`\ +\x09\xcc\x81)\xe3\xf2\x05l\x81\x0d\xf0ff\x07\x80\x06\ +\xc0\xdd_\x80w\xe0I6\xde0{`ef\x1fM\ +\xf9\xe4w\xd43|g\x0f\xccZ\xf2cS\xdb\xf0\x90\ +g^'\xf23\xdfw\xbe\xf30\xff5\xe9\xbd^&\ +\xf2\x0f\xf6\xd2\xd9\xcc\xd2\x9d\x06\x1a\xc4\xddO\x5cG<\ +\xb7\x8c\xef\xdff\x88i\xab\x9e\xe0V\x11\xa0\x16\x01j\ +\x11\xa0\x16\x01j\x11\xa0\x16\x01j\x11\xa0\x16\x01j\x11\ +\xa0\x16\x01j\x11\xa0\x16\x01j\x11\xa0\x16\x01j\x11\xa0\ +\x16\x01j\x11\xa0\xd6\x92o\xc0kuL\xe4\xeb\xfb\xc5\ +\xc5\xe1\xa4\xdc\x06\x8eQ\xff\x9au\x9b\xc8\xbb\x07\x8b?\ +\xde8V\x9b\xfaW\x0d\xca\xd6\xc7\xaa\x1c\xd4\xa2[\xf6\ +84\xddI\xf9&\xd6\xfc\xac\xdb<\x88\x86\xfb\xcd\x91\ +\xebu\x9bO\x80oV\x016\x1ew\x0d\xa5B\x00\x00\ +\x00\x00IEND\xaeB`\x82\ +\x00\x00\x05~\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00\x00\x00\x1f\x15\xc4\x89\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x05\x17iTXtXML\ +:com.adobe.xmp\x00\x00\ +\x00\x00\x00 \ + \x07b\x0c\x81\x00\x00\x00\x0dIDAT\ +\x08\x1dc\xf8\xff\xff?\x03\x00\x08\xfc\x02\xfe\xe6\x0c\xff\ +\xab\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x00\xa6\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x15;\xdc\ +;\x0c\x9b\x00\x00\x00*IDAT\x08\xd7c`\xc0\ +\x00\x8c\x0c\x0cs> \x0b\xa4\x08020 \x0b\xa6\ +\x08000B\x98\x10\xc1\x14\x01\x14\x13P\xb5\xa3\x01\ +\x00\xc6\xb9\x07\x90]f\x1f\x83\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +\x00\x00\x043\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x03\xe5IDATh\x81\xed\ +\x9aM\x88\x1cE\x14\xc7\x7f3;\x89.\x18\xf13\x1e\ +\x92\x8b\x8b\xa2&\x06\x14/\xa29\x88\x15\x89\xa9\x18\x15\ +\xd1\xca\x06\x0f\x91\x05=\x88bnB\xc0\x1c\x12\xc4\x83\ +\x07\x15\x0dB\x14\x89\x04\xa2\x96DY\x22/F}\x8a\ +\xb0\x22\x08\x1e4\xbb\x22F\x8c,\x88\xba\xc4\x88\x82_\ +\xc9F=T\x0f\x8c\xbd\xdd]\xdd\xd3a\xa7\x07\xfc\xdd\ +\xa6\xfa\xd5\xab\xf7\xea\xf3\xdf\xd5\xd3\x22\xc1Xw\x11\xb0\ +\x03X\x0b\x5c\x0d,\xa1Y\x9c\x02\xa6\x81)`\xa7\x8a\ +?\x0e\xd0\x020\xd6\xdd\x0c\xbc\x02,\x1fXx\xd5\x98\ +\x03\xb6\xa8\xf8\xf7[I\xcf\xcf0<\xc1w\x99\x03V\ +\xb7\x09\xd3f\xd8\x82\x87\x10\xf3c\x1d\xc2\x9cOsz\ +\x91\x83)\xcbH\xea\xf7\xda\x0ea\xc1\xf6rZ\xc5w\ +\x16)\xa0J\x18\xeb\xe6\xf9o\x12k\xda4o\xb7\xa9\ +\xc2\x92\xf6\xa0#\xa8\xcb\xff\x09\x0c\x9a\xa1O\xa0\xa9\xbb\ +\xcd=\xc0]\xc0K*\xfe\xdd\x22\xdb\xc6\x8d\x80\xb1\xee\ +\x11\xc0\x03\xe3\xc0ac\xddnc]\xeeN\xd9\xa8\x04\ +\x8cu\xe3\xc0S=E-\xe0A\xe0\x85\xbc:\x8d\x99\ +B\xc6\xbau\xc0\xcb$\x023\xc5Vc\xdd\x91\xacz\ +\x8d\x18\x01c\xddu\xc0\x1b\xc0\xd2\x02\xb3\x0dY\x85\x03\ +O\xc0Xw\x19 \xc0\xb2\x88\xe9\x8bY\x85\x03M\xc0\ +Xw\x09p\x98\xb8\x1a~R\xc5\xbf\x9a\xf5``\x09\ +\x18\xeb\x96\x01\x87\x80\xb1\x88\xe9>\xe0\xd1\xbc\x87\x03I\ +\xc0X\xb7\x14x\x13\xb86b\xfa60\xa1\xe2\xff\xc9\ +3X\xf4\x04\x8cumB\xaf\x9a\x88\xe9'\xc0\xdd*\ +~\xbe\xc8h\x10#\xf04\xe0\x226_\x01\x1bU\xfc\ +o1g\xa5\x120\xd6\xb5\x92\x9e\xab\x85\xb1n;\xf0\ +p\xc4\xec{`}\xf7\xd6!FaPI\xe0\xdb\x80\ +?\x80ic\xdd\x9aR\x91f\xfb\x9a\x00\x1e\x8f\x98\xfd\ +\x02\xdc\xaa\xe2\xbf-\xeb77\x81D\x7fL\x12\x8e\xf6\ +\xb3\x80\xab\x80\xf7\x8cuW\x94u\xde\xe3k\x13\xb0'\ +b\xf6\x17p\x87\x8a\xff\xbc\x8a\xef\xa2\x11x\x0e\xd8\x94\ +*[\x0e\xa8\xb1\xee\xd2\xb2\x0d\x18\xebn\x00^c\xe1\ +\x0by/\x7f\x03\xf7\xaa\xf8\x0f\xcb\xfa\xed\x92\x99@\xd2\ +c\x0f\xe4\xd4YA\x18\x89\x151\xe7\xc6\xbaU\xc0A\ +`4b\xfa\x90\x8a?\x10\xf3\x97E\xde\x08\x5c\x10\xa9\ +7FH\xe2\xe2<\x03c\xddJ\xc2)\x1b\xf3\xb5K\ +\xc5?\x1f\xb1\xc9%/\x81\xfd\x84}\xb8\x88+\x81w\ +\x8cu\xe7\xa5\x1f\x18\xeb\xce'\x04\xbf2\xe2c\x8f\x8a\ +\xdf\x11\x8d\xb2\x80\xcc\x04T\xfc)\x82\xfa\xcb\x94\xb0=\ +\x5c\x03\x1c2\xd6\x9d\xd3-0\xd6\x8d\x12\xa6\xcd\xaaH\ +\xddI\x82\xd6\xafE\xee\x22V\xf1'\x80[\x80\xa3\x11\ +\x1f\xd7\x03\x07\x8du\xa3\xc6\xba\x11\xc2\x82\xbd1Rg\ +\x0a\x18W\xf1\xb5o\x00\x0b\xcf\x01\x15\xff#\xb0\x0e\x98\ +\x8d\xf8\xb9\x098@\xd8*\xd3;W\x9ai\xe0v\x15\ +\xffg\xc9\x18\x0b\x89\x9e\xae*~\x96\xa0[~\x88\x98\ +n\x00&\x226\xb3\x84\x83\xea\xe7r\xe1\xc5)%\x0f\ +T\xfc\xd7\x84\x91\xf8\xa9F['\x08\x12\xe1\xbb\x1a>\ +\x16PZ\xdf\xa8\xf8\x19`=\xf0k\x1f\xed\xfc\x0e\xdc\ +\xa6\xe2\xbf\xec\xa3n!\x95\x04\x9a\x8a\xff\x14\xd8\x98\x04\ +T\x96y`\xb3\x8a\xff\xb8J[e\xa9\xac0U\xfc\ +\x14p'A\xbb\x94\xe1~\x15\xffV\xd5v\xca\xd2\x97\ +DNn\xcb6\x13z\xb7\x88\xed*~o?m\x94\ +\xa5o\x8d\xaf\xe2'\x81\xad\x04!\x96\xc5\xb3*\xfe\x89\ +~\xfd\x97\xa5\xd6K\x8a\x8a\xdf\x0f\xdc\xc7\xc25\xb1\x1b\ +\xd8V\xc7wYj\xdf\xcc\xa9\xf8}\xc6\xba\x8f\x08\x07\ +\xd8\xb9\xc0\x07\xc9:Y\x14\xce\xc8\xd5\xa2\x8a\xff\x06x\ +\xe6L\xf8\xaaJ\x9b\xf0\x05|X9\xd9!h\x93\xde\ +\xfb\x99\x91\xe4k`\x13I\xbf\xd5Mw\x08\xca0}\ +\xc1T\xf4\xfa\xd7$\xa6\xda\xc0N\xc2g\xfbac\x0e\ +\xd8\xd5N\xee_\xb60\x5cIt\xff\xecq|\x04\xe0\ +\xd8\xd1\x99cc\x97\xaf\xde\x0b\x9cM\xf8\xf0}!\xcd\ +\x9bF'\x81\xcf\x80\xd7\x01\xa7\xe2\xbf\x00\xf8\x17]\x81\ +\x0b8\xb3\xfa \x9c\x00\x00\x00\x00IEND\xaeB\ +`\x82\ +\x00\x00\x00\xa0\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1c\x1f$\ +\xc6\x09\x17\x00\x00\x00$IDAT\x08\xd7c`@\ +\x05\xff\xcf\xc3XL\xc8\x5c&dY&d\xc5p\x0e\ +\xa3!\x9c\xc3h\x88a\x1a\x0a\x00\x00m\x84\x09u7\ +\x9e\xd9#\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x01\xdc\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01\x8eIDATh\x81\xed\ +\x9a\xafN\xc3P\x14\x87\xbfn\x1d\x0a\x1cA\x1e\x83\x04\ +\xc4\x0cjA\x10\x04\x82\x80\x9e\xe7\x05x\x80!x\x01\ +^\x00\x8f\xc2\x00rA\x90=\xc2@1s\xe42\x14\ +\xc1\xecO\x82h\x1b\xb6e\xed(\xebv\xda\xe5~\xae\ +\xf7\x5c\xf1\xfb\xda{o\x9a\xdc\xe3\x11\xa2\xaa\xdb\xc05\ +P\x03\xf6\x81\x0a\xf9b\x00\xb4\x81\x16p#\x22=\x00\ +\x0f@U\x8f\x81{`\xc7,^:\xba@]D^\ +\xbc\xf0\xcd\xbfQ\x9c\xf0\x11]`\xafD\xb0l\x8a\x16\ +\x1e\x82\xcc\x0d\x9f`\xcdO3Zq\x98\xbfR\x9ez\ +\xae\xf9\x04\x1bv\x9c\x91\x88\xf8+\x0a\x94\x0aU\x1d2\ +)qP\x22\x7f\xa7M\x1a*%\xeb\x04\x8b\xe2\x04\xac\ +q\x02\xd68\x01k\x9c\x805N\xc0\x1a'`\xcd\xdc\ +\xdffU=\x01\x0e\x81\xcd\xe5\xc7\x99`\x08\xbc\x03O\ +\x22\xf2\x1d7)V@U}\xe0\x018\xcf>[*\ +:\xaaz*\x22\x1f\xb3\x8aIK\xe8\x0a\xfb\xf0\x00\xbb\ +\xc0]\x5c1I\xe0,\xfb,\xff\xe6HU\xb7f\x15\ +\x0a\xbf\x89\x93\x04\x9eW\x96b>\xaf\x22\xf25\xab\x90\ +$p\x0b<.'O*:\xc0e\x5c1\xf6\x14\x12\ +\x91!pQ\xd8c4BD\x9a@3\xc3`\x99\xb2\ +\xd6\x9b\xb8\x108\x01k\x9c\x805N\xc0\x1a'`\x8d\ +\x13\xb0f-\x04\x06\xd6!\x16\xa0\xef\x13\x5c\xdfW\xc7\ +\x06\xcb\xe1m`\x1e\x99\xbefm\xfb\x04\xbd\x07\xd59\ +\x13\xf3J\xab\xf8\xad\x06a\xd7G=\x1c(\x0aQ\xb3\ +G\xcf\x8bF\xc2/\xd1\xe0\xb7\xddf\xc3(\x5c\x1c}\ +&\xdbm>\x01~\x00%\xf8ZCUN:\x7f\x00\ +\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x01\xfc\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01\xaeIDATh\x81\xed\ +\x9a\xbdJ\x03A\x14FO6\x1b\xb0\xd0J\xf1\x01\x14\ +\xabh\x91\xc6*X\xb8\x16\xb2\x88v\x0b\xe9}\x01\x1f\ +@\x8b\xf8\x00\xbe\x80\x85\x9d0b\xa32U\xc6B\xf2\ +\x02B\x92F\x83}H'6\xf9\x01\x8b\xdd@\x12\xb2\ +\x89k~f7\xcc\xe9v\xef\x14\xdfY\xee\x0c\x0bw\ +R\x048\xae\xb7\x01\x5c\x01y`\x17\xc8\x10/\xda@\ +\x05(\x03E%E\x13 \x05\xe0\xb8\xde!p\x0fl\ +j\x8b\x17\x8d\x06PPR\xbc\xa6\x82/_%9\xe1\ +{4\x80\xac\x85\xdf6I\x0b\x0f~\xe6K\x1b\xbf\xe7\ +\x87\xe9.8\xcc_I\x0f=\xe7m\xfc\x0d\xdbOW\ +Ia/(P$\x1c\xd7\xeb0(\xb1g\x11\xbf\xd3\ +&\x0a\x19Kw\x82i1\x02\xba1\x02\xba1\x02\xba\ +1\x02\xba1\x02\xba1\x02\xba\x99\xf8\xdb\xec\xb8\xde\x11\ +\xb0\x0f\xac\xce?\xce\x00\x1d\xa0\x06<+)~\xc2\x16\ +\x85\x0a8\xaeg\x03\x8f\xc0\xe9\xec\xb3E\xa2\xee\xb8\xde\ +\xb1\x92\xe2sTq\x5c\x0b]\xa0?<\xc06p\x1b\ +V\x1c'p2\xfb,\xff\xe6\xc0q\xbd\xb5Q\x85\xc4\ +o\xe2q\x02/\x0bK1\x997%\xc5\xf7\xa8\xc28\ +\x81\x1b\xe0i>y\x22Q\x07\xce\xc3\x8a\xa1\xa7\x90\x92\ +\xa2\x03\x9c%\xf6\x18\xed\xa1\xa4(\x01\xa5\x19\x06\x9b)\ +K\xbd\x89\x13\x81\x11\xd0\x8d\x11\xd0\x8d\x11\xd0\x8d\x11\xd0\ +\x8d\x11\xd0\xcdR\x08\xb4u\x87\x98\x82\x96\x8d?\xbe\xcf\ +\xf5\xbdL\x07\xd3\xc082\xaa_[\ +;\xd9;`\x05\x7f\xf0\xbdN\xfc\xda\xa8\x05\xbc\x03\x0f\ +\x80\xa7\xa4\xa8\x01\xfc\x02Q\xab\x5c\x8a?\xde\xe3Y\x00\ +\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x00\x9e\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x15\x0f\xfd\ +\x8f\xf8.\x00\x00\x00\x22IDAT\x08\xd7c`\xc0\ +\x0d\xfe\x9f\x87\xb1\x18\x91\x05\x18\x0d\xe1BH*\x0c\x19\ +\x18\x18\x91\x05\x10*\xd1\x00\x00\xca\xb5\x07\xd2v\xbb\xb2\ +\xc5\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x00\xa6\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1d\x00\xb0\ +\xd55\xa3\x00\x00\x00*IDAT\x08\xd7c`\xc0\ +\x06\xfe\x9fg``B0\xa1\x1c\x08\x93\x81\x81\x09\xc1\ +d``b``4D\xe2 s\x19\x90\x8d@\x02\ +\x00d@\x09u\x86\xb3\xad\x9c\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +\x00\x00\x00\x9e\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce|N\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x08\x15\x0f\xfd\ +\x8f\xf8.\x00\x00\x00\x22IDAT\x08\xd7c`\xc0\ +\x0d\xfe\x9f\x87\xb1\x18\x91\x05\x18\x0d\xe1BH*\x0c\x19\ +\x18\x18\x91\x05\x10*\xd1\x00\x00\xca\xb5\x07\xd2v\xbb\xb2\ +\xc5\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x07\xdd\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x07\x00\x00\x00\x0a\x08\x06\x00\x00\x00x\xccD\x0d\ +\x00\x00\x05RiTXtXML:com.\ +adobe.xmp\x00\x00\x00\x00\x00\x0a\x0a \x0a \x0a \x0a \ +\x0a branch_close<\ +/rdf:li>\x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a \x0a <\ +/rdf:RDF>\x0a\x0aX\xad\xf2\x80\x00\x00\ +\x01\x83iCCPsRGB IEC61\ +966-2.1\x00\x00(\x91u\x91\xcf+D\ +Q\x14\xc7?fh\xfc\x18\x8dba1e\x12\x16B\ +\x83\x12\x1b\x8b\x99\x18\x0a\x8b\x99Q~mf\x9ey3\ +j\xdex\xbd7\xd2d\xabl\xa7(\xb1\xf1k\xc1_\ +\xc0VY+E\xa4d\xa7\xac\x89\x0dz\xce\x9bQ#\ +\x99s;\xf7|\xee\xf7\xdes\xba\xf7\x5cpD\xd3\x8a\ +fV\xfaA\xcbd\x8dp(\xe0\x9b\x99\x9d\xf3\xb9\x9e\ +\xa8\xa2\x85\x1a:\xf1\xc6\x14S\x9f\x8c\x8cF)k\xef\ +\xb7T\xd8\xf1\xba\xdb\xaeU\xfe\xdc\xbfV\xb7\x980\x15\ +\xa8\xa8\x16\x1eVt#+<&<\xb1\x9a\xd5m\xde\ +\x12nRR\xb1E\xe1\x13\xe1.C.(|c\xeb\ +\xf1\x22?\xdb\x9c,\xf2\xa7\xcdF4\x1c\x04G\x83\xb0\ +/\xf9\x8b\xe3\xbfXI\x19\x9a\xb0\xbc\x9c6-\xbd\xa2\ +\xfc\xdc\xc7~\x89;\x91\x99\x8eHl\x15\xf7b\x12&\ +D\x00\x1f\xe3\x8c\x10d\x80^\x86d\x1e\xa0\x9b>z\ +dE\x99|\x7f!\x7f\x8ae\xc9Ud\xd6\xc9a\xb0\ +D\x92\x14Y\xbaD]\x91\xea\x09\x89\xaa\xe8\x09\x19i\ +rv\xff\xff\xf6\xd5T\xfb\xfb\x8a\xd5\xdd\x01\xa8z\xb4\ +\xac\xd7vpm\xc2W\xde\xb2>\x0e,\xeb\xeb\x10\x9c\ +\x0fp\x9e)\xe5/\xef\xc3\xe0\x9b\xe8\xf9\x92\xd6\xb6\x07\ +\x9eu8\xbd(i\xf1m8\xdb\x80\xe6{=f\xc4\ +\x0a\x92S\xdc\xa1\xaa\xf0r\x0c\xf5\xb3\xd0x\x05\xb5\xf3\ +\xc5\x9e\xfd\xecst\x07\xd15\xf9\xaaK\xd8\xd9\x85\x0e\ +9\xefY\xf8\x06\x8e\xfdg\xf8\xfd\x8a\x18\x97\x00\x00\x00\ +\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\ +\x9c\x18\x00\x00\x00\xa2IDAT\x18\x95U\xcf\xb1J\ +\xc31\x00\xc4\xe1/\xff\xb9\x93\xa3\x93\xb8\xa5\x8b\x0f \ +UD\x10\x5c:\x84,\x1d\x5c|\x0f\xb7\x8e>J\x88\ +\xa3\xb8\x08m\x05\xbbw\xc8\xea\xe2\x0bto\xe9\xd2B\ +zpp\xf0\xe3\x0e.\xa4\xd2\xae\xf0\x8a\xf7\x9a\xe3V\ +\xa7\x01\xd7x\xc32\x95vy\x06k\x8e\xdfx\xc1\x18\ +\xbf\xa9\xb4\xf1\x09\x86SH\xa5=\xe2\x03;Lk\x8e\ +\xab\xd0\xcf\xa4\xd2n\xf0\x89\x0b\xdc\x0f\xce\xb5?: \ +\x0c]\xeb\x01?\x18\xe1\xa9\xe6\xb8\x1e\x8e`\x86/l\ +q[s\x5c@H\xa5\xdda\x81\x0d\x9ek\x8e\xff\xfd\ +\xcf?\xcc1\xe9\x01\x1c\x00sR-q\xe4J\x1bi\ +\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x03\xfb\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x03\xadIDATh\x81\xed\ +\x9aO\xa8\x15U\x1c\xc7?\xf7\xbe\xab\xf2 \xa3\x22m\ +\xa1|!\x09*K(\xdaD\xb9\x88RL\xccjQ\ +\xf9\xa4\x85\xf1\xa0\x16Q\xe4.\x10tQD\x8b\x16\x15\ +%\x81\xb5(\x04\xad\xc0\xe2ad\xf6\x97\xe0E\x10\xb4\ +\xa9'DE\xc4\x17\xa2\x125\x0a*\xff<\xab\xc5\x99\ +[\xd7y3s\xce\xdcko\xee\x05?\xbb9\xf3;\ +\xbf\xf3\xfb\x9d3\xe7\x9c\xef\x9c\x99\x16\x19\xb6/\x06v\ +\x00\xab\x81\xab\x81\x05\x0c\x17\xa7\x80\x19`\x1axL\xd2\ +\x11\x80\x16\x80\xed\x9b\x81\xbd\xc0\xd2\xc6\xc2\xab\xc7a`\ +\xb3\xa4\x0f[Y\xcf\x1fbt\x82\xefr\x18\xb8\xaaM\ +xlF-x\x081o\xef\x10\x9e\xf9<\xa7\xe79\ +\x98T\xc6r\xd7\xab;\x84\x09\xdb\xcbiI\x9dy\x0a\ +\xa8\x16\xb6g93\x89Um\x86o\xb5\xa9\xc3\x82v\ +\xd3\x11\x0c\xca\xb9\x04\x9a\xe6\x5c\x02\xff\x07\xb6\xef\xb6\xbd\ +\xd7\xf6\xda\x98\xed\xd0%`\xfb\x11\xe0u`\x028h\ +{\xa7\xed\xd2\x95r\xa8\x12\xb0=\x01<\xddS\xd4\x02\ +\x1e\x04^,\xab34\x1b\x96\xed5\xc0+d\x023\ +\xc7\x16\xdb_\x16\xd5\x1b\x8a\x11\xb0}\x1d\xf0\x06\xb0\xb0\ +\xc2l}Qa\xe3\x09\xd8\xbe\x0cx\x1bX\x1c1}\ +\xa9\xa8\xb0\xd1\x04l_\x02\x1c$\xae\x86\x9f\x92\xf4j\ +\xd1\x8d\xc6\x12\xb0\xbd\x188\x00\xac\x88\x98\xee\x06\x1e-\ +\xbb\xd9H\x02\xb6\x17\x02o\x02\xd7FL\xdf\x01&%\ +\xfd]f0\xef\x09\xd8n\x13z\xf5\x96\x88\xe9g\xc0\ +]\x92f\xab\x8c\x9a\x18\x81g\x80{\x226_\x03\x1b\ +$\xfd\x1es\x96\x94\x80\xedV\xd6s\x03a{\x1b\xf0\ +p\xc4\xecG`]\xf7\xd4!FePY\xe0[\x81\ +?\x81\x19\xdb\xab\x92\x22-\xf65\x09<\x111\xfb\x15\ +\xb8U\xd2\xf7\xa9~K\x13\xc8\xf4\xc7\x14ak_\x04\ +\x5c\x09\xbco\xfb\xf2T\xe7=\xbe6\x02\xbb\x22f'\ +\x80;$}Q\xc7w\xd5\x08<\x0fl\xcc\x95-\x05\ +>\xb0}ij\x03\xb6o\x00^c\xee\x0by/\x7f\ +\x01\xf7J\xfa8\xd5o\x97\xc2\x04\xb2\x1e{\xa0\xa4\xce\ +2\xc2H,\x8b9\xb7\xbd\x12\xd8\x0f\x8cGL\x1f\x92\ +\xb4/\xe6\xaf\x88\xb2\x11\xb8(Ro\x05!\x89%e\ +\x06\xb6\x97\x13v\xd9\x98\xaf\xc7%\xbd\x10\xb1)\xa5,\ +\x81=\x84u\xb8\x8a+\x80wm_\x90\xbfa\xfbB\ +B\xf0\xcb#>vI\xda\x11\x8d\xb2\x82\xc2\x04$\x9d\ +\x22\xa8\xbfB\x09\xdb\xc35\xc0\x01\xdb\xe7u\x0bl\x8f\ +\x13\x1e\x9b\x95\x91\xbaS\x04\xad?\x10\xa5\x93X\xd21\ +`-\xf0M\xc4\xc7\xf5\xc0~\xdb\xe3\xb6\xc7\x08\x13\xf6\ +\xc6H\x9di`B\xd2\xc0'\x80\x95\xfb\x80\xa4\x9f\x81\ +5\x80#~n\x02\xf6\x11\x96\xca\xfc\xca\x95g\x06\xb8\ +]\xd2\xf1\xc4\x18+\x89\xee\xae\x92L\xd0-?EL\ +\xd7\x03\x93\x11\x1b\x136\xaa_\xd2\xc2\x8b\x93$\x0f$\ +}K\x18\x89\xa3\x03\xb4u\x8c \x11~\x18\xc0\xc7\x1c\ +\x92\xf5\x8d\xa4C\xc0:\xe0\xb7>\xda\xf9\x03\xb8M\xd2\ +W}\xd4\xad\xa4\x96@\x93\xf49\xb0!\x0b(\x95Y\ +`\x93\xa4O\xeb\xb4\x95Jm\x85)i\x1a\xb8\x93\xa0\ +]R\xb8_\xd2[u\xdbI\xa5/\x89,\xe9=`\ +\x13\xa1w\xab\xd8&\xe9\xe5~\xdaH\xa5o\x8d/i\ +\x0a\xd8B\x10bE<'\xe9\xc9~\xfd\xa72\xd0K\ +\x8a\xa4=\xc0}\xcc\x9d\x13;\x81\xad\x83\xf8Ne\xe0\ +\x939I\xbbm\x7fB\xd8\xc0\xce\x07>\xca\xe6\xc9\xbc\ +pV\x8e\x16%}\x07<{6|\xd5\xa5M\xf8\x02\ +>\xaa\x9c\xec\x10\xb4I\xef\xf9\xccX\xf65p\x18\xc9\ +\xbf\xd5\xcdt\x08\xca0\x7f\xc0T\xf5\xfa7LL\x8f\ +\xfe\xaf\x06\xd9\xf9\xcb\xe6\xac`T\xe8\xfe\xecq\xe4\xdf\ +\x8f\x09\xd9Hl\xe7\xbf\xdfm\xaa\xce\xea\x9b\xe0$g\ +\xfens\x14\xe0\x1f\x0aC\x12kO\xfd?\x13\x00\x00\ +\x00\x00IEND\xaeB`\x82\ +\x00\x00\x00\xa0\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\ +\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09p\ +HYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\ +\x00\x00\x00\x07tIME\x07\xdc\x08\x17\x14\x1f\x0d\xfc\ +R+\x9c\x00\x00\x00$IDAT\x08\xd7c`@\ +\x05s>\xc0XL\xc8\x5c&dY&d\xc5pN\ +\x8a\x00\x9c\x93\x22\x80a\x1a\x0a\x00\x00)\x95\x08\xaf\x88\ +\xac\xba4\x00\x00\x00\x00IEND\xaeB`\x82\ +\x00\x00\x01\xe1\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01\x93IDATh\x81\xed\ +\x9a;N\xc3@\x10\x86\xbfq\x1c*\xe8\x10\xe56\x94\ +\xd0\xa4\xa1\x8a(\x22\x0a\x0aD\x9f\x9e\x0bp\x80Pp\ +\x01.\xc0\x15h\x80\x13\xa0\x1c!P\x91f\xbbD\xa1\ +B4yh(l\x1e\xb1\xfcH\x08\xc9\xda\xd2~\x9d\ +w\x5c\xfc\x9f\xb3\x1e9\xda\x11bTu\x17\xb8\x02\x9a\ +\xc0!P\xa7\x5cL\x80\x1e\xd0\x05\xaeEd\xf4]Q\ +\xd5\x96\xaa\x0e\xb4:\x0cT\xb5\x05 \x1a=\xf9g`\ +\xcf\xc5c]\x81!p\x10\x10m\x9b\xaa\x85\x87(s\ +'$\xda\xf3If\x1b\x0e\xb3(\xb5\xc4uSTu\ +\xcc\xfc\x0b;\x13\x91p\x83\xa1\x16FU\xa7\xccKL\ +\x02\xca\xd7m\x96\xa1\x1e\xb8N\xb0*^\xc05^\xc0\ +5^\xc05^\xc05^\xc05^\xc05\x85\x9f\xcd\ +\xd6\xda\x13\xe0\x08\xd8^\x7f\x9c9\xa6\xc0\x0b\xf0`\x8c\ +\xf9\xc8\xbaITU\x13k3\x11\x09\xad\xb5!p\x07\ +\x9c\xaf1\xe4\x22\xf4\x81Sc\xcck\xca\xff\x81\xdc-\ +t\x89\xfb\xf0\x00\xfb\xc0mV1O\xe0\xec\xff\xb3\xfc\ +\x99ck\xedNZ\xa1\xf2/q\x9e\xc0\xe3\xc6R\x14\ +\xf3d\x8cyO+\xe4\x09\xdc\x00\xf7\xeb\xc9\xb3\x14}\ +\xe0\x22\xab\x98\xd9\x85\xbe.\xca\xd4F\xd3\xbaP\xa1@\ +\x99X\xb6\x8dV\x02/\xe0\x1a/\xe0\x1a/\xe0\x1a/\ +\xe0\x1a/\xe0\x1a/\xe0\x9a\x80\xe8\x04\xbc\xaa\x8cC\xa2\ +\xe3\xfb\xc6\xaf\xc5Z\xfc\xd9ZF\x92\xc7\xac\xbd\x90h\ +\xf6\xa0QpcY\xe9V\x7f\xd4 \x9e\xfah\xc7\x0b\ +Ua\x08\xb4Ed$_+\xf1/\xd1\xe1g\xdcf\ +\xcbQ\xb8,\xc6\xcc\x8f\xdb\xbc\x01|\x02mw#\xb3\ +\xd4\x95Sv\x00\x00\x00\x00IEND\xaeB`\x82\ +\ +\x00\x00\x01W\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01\x09IDATh\x81\xed\ +\xda\xcdm\xc2@\x14E\xe1\xf3\x8cI\x05Q\x9aH6\ +\xecY\xd1\x05\xc5\x90Ej\xa3\x04R\x04\x884`\x82\ +n\x163\xf9\xb1\xa5(DH\x5c[z\xdf\x8e\xc1\x8b\ +w\x8c\xcdf&\xa8$\xdd\x03\xcf\xc0\x12x\x02\xe6\x8c\ +\xcb\x09\xd8\x01[\xe0%\x22\x8e_\xdfHZI\xdak\ +:\xf6\x92V\x00\xa1r\xe7_\x81\x07\xc7m\xbd\xc2\x01\ +xl(\x8f\xcd\xd4\x86\x872\xf3\xa6\xa5<\xf3C\xe7\ +\x1b\x0fs\xa9\xd9\xe0\xf32$u\xf4_\xd8sD\xb4\ +7\x1c\xeab\x92\xde\xe9G\x9c\x1a\xc6\xf7o\xf3\x1f\xf3\ +\xc6=\xc1\xb52\xc0-\x03\xdc2\xc0-\x03\xdc2\xc0\ +-\x03\xdc2\xc0-\x03\xdc2\xc0-\x03\xdc2\xc0-\ +\x03\xdc2\xc0-\x03\xdc2\xc0-\x03\xdc2\xc0\xad\xa1\ +\xec\x80OU\xd7R\xb6\xef\x17?\x16gu7p\x8c\ +\x86\xdb\xac\xbb\x96r\xf6`\xf1\xc7\x85c\xb5\x9d\xfeQ\ +\x83z\xeac]\x17\xa6\xe2\x00\xac#\xe2\x18\x9f+\xf5\ +\x97\xd8\xf0}\xdc\xe6\xce4\xdco:\xfa\xc7m\xde\x00\ +>\x00G\xd7\xea\xb1\xadi\xe1\xd6\x00\x00\x00\x00IE\ +ND\xaeB`\x82\ +\x00\x00\x01v\ +\x89\ +PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\ +\x00\x000\x00\x00\x000\x08\x06\x00\x00\x00W\x02\xf9\x87\ +\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\ +\x01\x00\x9a\x9c\x18\x00\x00\x01(IDATh\x81\xed\ +\xda\xb1J\xc3P\x14\x87\xf1/7\xb7\xe0\xae\xf8\x00\x82\ +Su\xe8\xde\xc9ly\x80@\x1fF\x87\xfa\x22nB\ +\xdc\xb3\xc5\xa9/ \xb4]:t\x0f}\x82j\xc1\xe1\ +\xa6P\xb3h\x10\xfa\xcf\x85\xf3\xdbR:\x9c\xaf\xdcf\ +97\xa1\x95\xe5\xc5\x15\xf0\x04L\x81;`\xc4\xb0|\ +\x02K`\x01\xcc\xeb\xaa\xdc\x01$\x00Y^<\x00\xaf\ +\xc0\xb5l\xbc~\x1a`VW\xe5{\xd2\xfe\xf2+\xe2\ +\x19\xfe\xa8\x01\xc6\x8eplb\x1b\x1e\xc2\xcc\x8f\x9ep\ +\xe6\xbb\x0eg\x1e\xe6\xaf\xd2\xce\xf3\xd4\x13\xfe\xb0\xa7\x0e\ +uU\xfa3\x0d\xd4K\x96\x17_\xfc\x8c\xb8w\x0c\xef\ +m\xd3\xc7\xc8\xa9'\xf8/\x0bP\xb3\x005\x0bP\xb3\ +\x005\x0bP\xb3\x005\x0bP\xb3\x005\x0bP\xb3\x00\ +5\x0bP\xb3\x005\x0bP\xb3\x005\x0bP\xb3\x005\ +\x0bPs\x84\x0dx\xac\xf6\x9e\xb0\xbe\x9f\x9c|\x98\xb6\ +\xdb\xc0!\xea\xaeY\x97\x9ep\xf7`\xf2\xcb\x17\x87j\ +\xe1\x809am\x1f\x9b\x06xv\xed\xad\x8f\x19qE\ +\x1c/{\xecR\x80\xedf\xb5\xbd\xb9\x1d\xbf\x00\x17\x84\ +\xc5\xf7%\xc3;F{\xe0\x03x\x03\x8a\xba*\xd7\x00\ +\xdf\xa4\xb56\xa2\xca\x99tG\x00\x00\x00\x00IEN\ +D\xaeB`\x82\ +" + +qt_resource_name = b"\ +\x00\x08\ +\x06\xc5\x8e\xa5\ +\x00o\ +\x00p\x00e\x00n\x00p\x00y\x00p\x00e\ +\x00\x06\ +\x07\x03}\xc3\ +\x00i\ +\x00m\x00a\x00g\x00e\x00s\ +\x00\x17\ +\x0ce\xce\x07\ +\x00l\ +\x00e\x00f\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\x00l\ +\x00e\x00d\x00.\x00p\x00n\x00g\ +\x00\x1a\ +\x03\x0e\xe4\x87\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\ +\x00h\x00o\x00v\x00e\x00r\x00.\x00p\x00n\x00g\ +\x00 \ +\x0f\xd4\x1b\xc7\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00i\x00n\x00d\x00e\x00t\x00e\x00r\x00m\ +\x00i\x00n\x00a\x00t\x00e\x00_\x00h\x00o\x00v\x00e\x00r\x00.\x00p\x00n\x00g\ +\x00\x15\ +\x03'rg\ +\x00c\ +\x00o\x00m\x00b\x00o\x00b\x00o\x00x\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00o\x00n\ +\x00.\x00p\x00n\x00g\ +\x00\x11\ +\x01\x1f\xc3\x87\ +\x00d\ +\x00o\x00w\x00n\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00o\x00n\x00.\x00p\x00n\x00g\ +\ +\x00\x0e\ +\x04\xa2\xfc\xa7\ +\x00d\ +\x00o\x00w\x00n\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\ +\x00\x1b\ +\x03Z2'\ +\x00c\ +\x00o\x00m\x00b\x00o\x00b\x00o\x00x\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\ +\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\ +\x00\x0f\ +\x02\x9f\x05\x87\ +\x00r\ +\x00i\x00g\x00h\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\ +\x00\x12\ +\x01.\x03'\ +\x00c\ +\x00o\x00m\x00b\x00o\x00b\x00o\x00x\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\ +\x00g\ +\x00\x1c\ +\x0e<\xde\x07\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\ +\x00d\x00_\x00h\x00o\x00v\x00e\x00r\x00.\x00p\x00n\x00g\ +\x00\x0f\ +\x06S%\xa7\ +\x00b\ +\x00r\x00a\x00n\x00c\x00h\x00_\x00o\x00p\x00e\x00n\x00.\x00p\x00n\x00g\ +\x00\x0e\ +\x0e\xde\xfa\xc7\ +\x00l\ +\x00e\x00f\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\ +\x00\x11\ +\x0b\xda0\xa7\ +\x00b\ +\x00r\x00a\x00n\x00c\x00h\x00_\x00c\x00l\x00o\x00s\x00e\x00d\x00.\x00p\x00n\x00g\ +\ +\x00\x15\ +\x0f\xf3\xc0\x07\ +\x00u\ +\x00p\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\ +\x00.\x00p\x00n\x00g\ +\x00\x12\ +\x05\x8f\x9d\x07\ +\x00b\ +\x00r\x00a\x00n\x00c\x00h\x00_\x00o\x00p\x00e\x00n\x00_\x00o\x00n\x00.\x00p\x00n\ +\x00g\ +\x00\x1a\ +\x05\x11\xe0\xe7\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\ +\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\ +\x00\x16\ +\x01u\xcc\x87\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\ +\x00d\x00.\x00p\x00n\x00g\ +\x00\x0f\ +\x0c\xe2hg\ +\x00t\ +\x00r\x00a\x00n\x00s\x00p\x00a\x00r\x00e\x00n\x00t\x00.\x00p\x00n\x00g\ +\x00\x17\ +\x0c\xabQ\x07\ +\x00d\ +\x00o\x00w\x00n\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\x00l\ +\x00e\x00d\x00.\x00p\x00n\x00g\ +\x00\x1d\ +\x09\x07\x81\x07\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00_\ +\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\ +\x00\x12\ +\x03\x8d\x04G\ +\x00r\ +\x00i\x00g\x00h\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00o\x00n\x00.\x00p\x00n\ +\x00g\ +\x00\x1a\ +\x01\x87\xaeg\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00i\x00n\x00d\x00e\x00t\x00e\x00r\x00m\ +\x00i\x00n\x00a\x00t\x00e\x00.\x00p\x00n\x00g\ +\x00#\ +\x06\xf2\x1aG\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00i\x00n\x00d\x00e\x00t\x00e\x00r\x00m\ +\x00i\x00n\x00a\x00t\x00e\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\ +\x00n\x00g\ +\x00\x0c\ +\x06\xe6\xe6g\ +\x00u\ +\x00p\x00_\x00a\x00r\x00r\x00o\x00w\x00.\x00p\x00n\x00g\ +\x00\x11\ +\x00\xb8\x8c\x07\ +\x00l\ +\x00e\x00f\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00o\x00n\x00.\x00p\x00n\x00g\ +\ +\x00\x0f\ +\x01s\x8b\x07\ +\x00u\ +\x00p\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00o\x00n\x00.\x00p\x00n\x00g\ +\x00\x14\ +\x04^-\xa7\ +\x00b\ +\x00r\x00a\x00n\x00c\x00h\x00_\x00c\x00l\x00o\x00s\x00e\x00d\x00_\x00o\x00n\x00.\ +\x00p\x00n\x00g\ +\x00\x14\ +\x07\xec\xd1\xc7\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00c\x00h\x00e\x00c\x00k\x00e\x00d\x00.\ +\x00p\x00n\x00g\ +\x00\x18\ +\x03\x8e\xdeg\ +\x00r\ +\x00i\x00g\x00h\x00t\x00_\x00a\x00r\x00r\x00o\x00w\x00_\x00d\x00i\x00s\x00a\x00b\ +\x00l\x00e\x00d\x00.\x00p\x00n\x00g\ +\x00 \ +\x09\xd7\x1f\xa7\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00i\x00n\x00d\x00e\x00t\x00e\x00r\x00m\ +\x00i\x00n\x00a\x00t\x00e\x00_\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\ +\x00\x1c\ +\x08?\xdag\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\ +\x00d\x00_\x00f\x00o\x00c\x00u\x00s\x00.\x00p\x00n\x00g\ +\x00\x1f\ +\x0a\xae'G\ +\x00c\ +\x00h\x00e\x00c\x00k\x00b\x00o\x00x\x00_\x00u\x00n\x00c\x00h\x00e\x00c\x00k\x00e\ +\x00d\x00_\x00d\x00i\x00s\x00a\x00b\x00l\x00e\x00d\x00.\x00p\x00n\x00g\ +" + +qt_resource_struct = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x16\x00\x02\x00\x00\x00 \x00\x00\x00\x03\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x04\xb8\x00\x00\x00\x00\x00\x01\x00\x008:\ +\x00\x00\x01{\xe9xF\xdd\ +\x00\x00\x01\x0c\x00\x00\x00\x00\x00\x01\x00\x00\x07]\ +\x00\x00\x01{\xe9xF\xdb\ +\x00\x00\x01\xb6\x00\x00\x00\x00\x00\x01\x00\x00\x09\xfc\ +\x00\x00\x01{\xe9xF\xd9\ +\x00\x00\x04\xe0\x00\x00\x00\x00\x00\x01\x00\x008\xe4\ +\x00\x00\x01{\xe9xF\xe0\ +\x00\x00\x03 \x00\x00\x00\x00\x00\x01\x00\x00'R\ +\x00\x00\x01}\x0f$Y\x81\ +\x00\x00\x04\x14\x00\x00\x00\x00\x00\x01\x00\x003\xb8\ +\x00\x00\x01}\x0f$Y~\ +\x00\x00\x01\x92\x00\x00\x00\x00\x00\x01\x00\x00\x09X\ +\x00\x00\x01{\xe9xF\xdd\ +\x00\x00\x00\x5c\x00\x00\x00\x00\x00\x01\x00\x00\x00\xaa\ +\x00\x00\x01}\x0f$Y~\ +\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x06\xb3\ +\x00\x00\x01{\xe9xF\xda\ +\x00\x00\x01V\x00\x00\x00\x00\x00\x01\x00\x00\x08\xaf\ +\x00\x00\x01{\xe9xF\xd9\ +\x00\x00\x03\xea\x00\x00\x00\x00\x00\x01\x00\x003\x14\ +\x00\x00\x01{\xe9xF\xde\ +\x00\x00\x05`\x00\x00\x00\x00\x00\x01\x00\x00Ef\ +\x00\x00\x01{\xe9xF\xde\ +\x00\x00\x05\x04\x00\x00\x00\x00\x00\x01\x00\x009\x86\ +\x00\x00\x01{\xe9xF\xd7\ +\x00\x00\x014\x00\x00\x00\x00\x00\x01\x00\x00\x08\x06\ +\x00\x00\x01{\xe9xF\xda\ +\x00\x00\x02\xe6\x00\x00\x00\x00\x00\x01\x00\x00#O\ +\x00\x00\x01}\x0f$Y}\ +\x00\x00\x02\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x1c\x1b\ +\x00\x00\x01{\xe9xF\xd8\ +\x00\x00\x02\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x13\ +\x00\x00\x01{\xe9xF\xd8\ +\x00\x00\x04\x9a\x00\x00\x00\x00\x00\x01\x00\x007\x98\ +\x00\x00\x01{\xe9xF\xdf\ +\x00\x00\x04N\x00\x00\x00\x00\x00\x01\x00\x005\x98\ +\x00\x00\x01}\x0f$Y\x7f\ +\x00\x00\x052\x00\x00\x00\x00\x00\x01\x00\x00Ag\ +\x00\x00\x01}\x0f$Y|\ +\x00\x00\x05\xdc\x00\x00\x00\x00\x00\x01\x00\x00G\xef\ +\x00\x00\x01}\x0f$Y\x82\ +\x00\x00\x03\xaa\x00\x00\x00\x00\x00\x01\x00\x00.\xdd\ +\x00\x00\x01}\x0f$Y}\ +\x00\x00\x05\x96\x00\x00\x00\x00\x00\x01\x00\x00F\x0a\ +\x00\x00\x01}\x0f$Y\x80\ +\x00\x00\x06\x1a\x00\x00\x00\x00\x00\x01\x00\x00IJ\ +\x00\x00\x01}\x0f$Y\x81\ +\x00\x00\x02d\x00\x00\x00\x00\x00\x01\x00\x00\x13\xc7\ +\x00\x00\x01{\xe9xF\xd7\ +\x00\x00\x00(\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +\x00\x00\x01{\xe9xF\xdc\ +\x00\x00\x03v\x00\x00\x00\x00\x00\x01\x00\x00.3\ +\x00\x00\x01{\xe9xF\xdb\ +\x00\x00\x03R\x00\x00\x00\x00\x00\x01\x00\x00(\xb1\ +\x00\x00\x01}\x0f$k\xb6\ +\x00\x00\x01\xe0\x00\x00\x00\x00\x00\x01\x00\x00\x0a\xa6\ +\x00\x00\x01}\x0f$Y\x82\ +\x00\x00\x02B\x00\x00\x00\x00\x00\x01\x00\x00\x13\x1d\ +\x00\x00\x01{\xe9xF\xdc\ +\x00\x00\x00\x96\x00\x00\x00\x00\x00\x01\x00\x00\x04\xc0\ +\x00\x00\x01}\x0f$Y\x80\ +\x00\x00\x02\x8c\x00\x00\x00\x00\x00\x01\x00\x00\x1bx\ +\x00\x00\x01{\xe9xF\xdf\ +" + + +def qInitResources(): + QtCore.qRegisterResourceData( + 0x03, qt_resource_struct, qt_resource_name, qt_resource_data) + + +def qCleanupResources(): + QtCore.qUnregisterResourceData( + 0x03, qt_resource_struct, qt_resource_name, qt_resource_data) diff --git a/openpype/style/qrc_resources.py b/openpype/style/qrc_resources.py index a9e219c9ad..85f912228d 100644 --- a/openpype/style/qrc_resources.py +++ b/openpype/style/qrc_resources.py @@ -1,11 +1,13 @@ -import Qt +import qtpy initialized = False resources = None -if Qt.__binding__ == "PySide2": +if qtpy.API == "pyside6": + from . import pyside6_resources as resources +elif qtpy.API == "pyside2": from . import pyside2_resources as resources -elif Qt.__binding__ == "PyQt5": +elif qtpy.API == "pyqt5": from . import pyqt5_resources as resources diff --git a/openpype/style/style.css b/openpype/style/style.css index df83600973..da477eeefa 100644 --- a/openpype/style/style.css +++ b/openpype/style/style.css @@ -148,6 +148,10 @@ QPushButton::menu-indicator { padding-right: 5px; } +QPushButton[state="error"] { + background: {color:publisher:error}; +} + QToolButton { border: 0px solid transparent; background: {color:bg-buttons}; @@ -687,6 +691,27 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: none; } +/* Messages overlay */ +OverlayMessageWidget { + border-radius: 0.2em; + background: {color:overlay-messages:bg-success}; +} + +OverlayMessageWidget:hover { + background: {color:overlay-messages:bg-success-hover}; +} + +OverlayMessageWidget[type="error"] { + background: {color:overlay-messages:bg-error}; +} +OverlayMessageWidget[type="error"]:hover { + background: {color:overlay-messages:bg-error-hover}; +} + +OverlayMessageWidget QWidget { + background: transparent; +} + /* Password dialog*/ #PasswordBtn { border: none; @@ -836,23 +861,90 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } /* New Create/Publish UI */ +PublisherTabsWidget { + background: {color:publisher:tab-bg}; +} + +PublisherTabBtn { + border-radius: 0px; + background: {color:bg-inputs}; + font-size: 9pt; + font-weight: regular; + padding: 0.5em 1em 0.5em 1em; +} + +PublisherTabBtn:disabled { + background: {color:bg-inputs}; +} + +PublisherTabBtn:hover { + background: {color:bg-buttons}; +} + +PublisherTabBtn[active="1"] { + background: {color:bg}; +} +PublisherTabBtn[active="1"]:hover { + background: {color:bg}; +} + +PixmapButton{ + border: 0px solid transparent; + border-radius: 0.2em; + background: {color:bg-buttons}; +} +PixmapButton:hover { + background: {color:bg-button-hover}; +} +PixmapButton:disabled { + background: {color:bg-buttons-disabled}; +} + +#ThumbnailPixmapHoverButton { + font-size: 11pt; + background: {color:bg-view}; +} +#ThumbnailPixmapHoverButton:hover { + background: {color:bg-button-hover}; +} + +#CreatorDetailedDescription { + padding-left: 5px; + padding-right: 5px; + padding-top: 5px; + background: transparent; + border: 1px solid {color:border}; +} + #CreateDialogHelpButton { - background: rgba(255, 255, 255, 31); + background: {color:bg-buttons}; + border-top-left-radius: 0.2em; + border-bottom-left-radius: 0.2em; border-top-right-radius: 0; border-bottom-right-radius: 0; - font-size: 10pt; font-weight: bold; - padding: 3px 3px 3px 3px; } #CreateDialogHelpButton:hover { - background: rgba(255, 255, 255, 63); + background: {color:bg-button-hover}; +} +#CreateDialogHelpButton QWidget { + background: transparent; } #PublishLogConsole { font-family: "Noto Sans Mono"; } - +#VariantInputsWidget QLineEdit { + border-bottom-right-radius: 0px; + border-top-right-radius: 0px; +} +#VariantInputsWidget QToolButton { + border-bottom-left-radius: 0px; + border-top-left-radius: 0px; + padding-top: 0.5em; + padding-bottom: 0.5em; +} #VariantInput[state="new"], #VariantInput[state="new"]:focus, #VariantInput[state="new"]:hover { border-color: {color:publisher:success}; } @@ -902,38 +994,26 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { color: {color:publisher:error}; } -#PublishFrame { - background: rgba(0, 0, 0, 127); -} -#PublishFrame[state="1"] { - background: rgb(22, 25, 29); -} -#PublishFrame[state="2"] { - background: {color:bg}; -} - #PublishInfoFrame { background: {color:bg}; - border: 2px solid black; border-radius: 0.3em; } - -#PublishInfoFrame[state="-1"] { - background: rgb(194, 226, 236); -} - #PublishInfoFrame[state="0"] { - background: {color:publisher:error}; + background: {color:publisher:success}; } #PublishInfoFrame[state="1"] { - background: {color:publisher:success}; + background: {color:publisher:crash}; } #PublishInfoFrame[state="2"] { background: {color:publisher:warning}; } +#PublishInfoFrame[state="3"], #PublishInfoFrame[state="4"] { + background: rgb(194, 226, 236); +} + #PublishInfoFrame QLabel { color: black; font-style: bold; @@ -947,6 +1027,11 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { font-size: 13pt; } +ValidationArtistMessage QLabel { + font-size: 20pt; + font-weight: bold; +} + #ValidationActionButton { border-radius: 0.2em; padding: 4px 6px 4px 6px; @@ -963,17 +1048,16 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } #ValidationErrorTitleFrame { - background: {color:bg-inputs}; - border-left: 4px solid transparent; + border-radius: 0.2em; + background: {color:bg-buttons}; } #ValidationErrorTitleFrame:hover { - border-left-color: {color:border}; + background: {color:bg-buttons-hover}; } #ValidationErrorTitleFrame[selected="1"] { - background: {color:bg}; - border-left-color: {palette:blue-light}; + background: {color:bg-view-selection}; } #ValidationErrorInstanceList { @@ -985,11 +1069,48 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { border-left: 1px solid {color:border}; } -#TasksCombobox[state="invalid"], #AssetNameInput[state="invalid"] { +#AssetNameInputWidget { + background: {color:bg-inputs}; + border: 1px solid {color:border}; + border-radius: 0.3em; +} + +#AssetNameInputWidget QWidget { + background: transparent; +} + +#AssetNameInputButton { + border-bottom-left-radius: 0px; + border-top-left-radius: 0px; + padding: 0px; + qproperty-iconSize: 11px 11px; + border-left: 1px solid {color:border}; + border-right: none; + border-top: none; + border-bottom: none; +} + +#AssetNameInput { + border-bottom-right-radius: 0px; + border-top-right-radius: 0px; + border: none; +} + +#AssetNameInputWidget:hover { + border-color: {color:border-hover}; +} +#AssetNameInputWidget:focus{ + border-color: {color:border-focus}; +} +#AssetNameInputWidget:disabled { + background: {color:bg-inputs-disabled}; +} + +#TasksCombobox[state="invalid"], #AssetNameInputWidget[state="invalid"], #AssetNameInputButton[state="invalid"] { border-color: {color:publisher:error}; } -#PublishProgressBar[state="0"]::chunk { +#PublishProgressBar[state="1"]::chunk, #PublishProgressBar[state="4"]::chunk { background: {color:bg-buttons}; } @@ -1009,6 +1130,10 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: transparent; } +CreateNextPageOverlay { + font-size: 32pt; +} + /* Settings - NOT USED YET - we need to define font family for settings UI */ @@ -1269,6 +1394,14 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { background: #21252B; } +/* Workfiles */ +#WorkfilesPublishedContextSelect { + background: rgba(0, 0, 0, 127); +} +#WorkfilesPublishedContextSelect QLabel { + font-size: 17pt; +} + /* Tray */ #TrayRestartButton { background: {color:restart-btn-bg}; @@ -1287,6 +1420,13 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { } /* Globally used names */ +#ValidatedLineEdit[state="valid"], #ValidatedLineEdit[state="valid"]:focus, #ValidatedLineEdit[state="valid"]:hover { + border-color: {color:publisher:success}; +} +#ValidatedLineEdit[state="invalid"], #ValidatedLineEdit[state="invalid"]:focus, #ValidatedLineEdit[state="invalid"]:hover { + border-color: {color:publisher:error}; +} + #Separator { background: {color:bg-menu-separator}; } @@ -1323,3 +1463,14 @@ QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical { #LikeDisabledInput:focus { border-color: {color:border}; } + +/* Attribute Definition widgets */ +InViewButton, InViewButton:disabled { + background: transparent; +} +InViewButton:hover { + background: rgba(255, 255, 255, 37); +} +SupportLabel { + color: {color:font-disabled}; +} diff --git a/openpype/tests/lib.py b/openpype/tests/lib.py index 85b9032836..1fa5fb8054 100644 --- a/openpype/tests/lib.py +++ b/openpype/tests/lib.py @@ -78,3 +78,12 @@ def tempdir(): yield tempdir finally: shutil.rmtree(tempdir) + + +def is_in_tests(): + """Returns if process is running in automatic tests mode. + + In tests mode different source DB is used, some plugins might be disabled + etc. + """ + return os.environ.get("IS_TEST") == '1' diff --git a/openpype/tests/test_avalon_plugin_presets.py b/openpype/tests/test_avalon_plugin_presets.py index f1b1a94713..464c216d6f 100644 --- a/openpype/tests/test_avalon_plugin_presets.py +++ b/openpype/tests/test_avalon_plugin_presets.py @@ -1,6 +1,9 @@ -import avalon.api as api -import openpype -from openpype.pipeline import LegacyCreator +from openpype.pipeline import ( + install_host, + LegacyCreator, + register_creator_plugin, + discover_creator_plugins, +) class MyTestCreator(LegacyCreator): @@ -19,16 +22,15 @@ class Test: __name__ = "test" ls = len - def __call__(self): - pass + @staticmethod + def install(): + register_creator_plugin(MyTestCreator) def test_avalon_plugin_presets(monkeypatch, printer): + install_host(Test) - openpype.install() - api.register_host(Test()) - api.register_plugin(LegacyCreator, MyTestCreator) - plugins = api.discover(LegacyCreator) + plugins = discover_creator_plugins() printer("Test if we got our test plugin") assert MyTestCreator in plugins for p in plugins: diff --git a/openpype/tests/test_lib_restructuralization.py b/openpype/tests/test_lib_restructuralization.py index 94080e550d..669706d470 100644 --- a/openpype/tests/test_lib_restructuralization.py +++ b/openpype/tests/test_lib_restructuralization.py @@ -5,11 +5,9 @@ def test_backward_compatibility(printer): printer("Test if imports still work") try: - from openpype.lib import filter_pyblish_plugins from openpype.lib import execute_hook from openpype.lib import PypeHook - from openpype.lib import get_latest_version from openpype.lib import ApplicationLaunchFailed from openpype.lib import get_ffmpeg_tool_path @@ -18,12 +16,6 @@ def test_backward_compatibility(printer): from openpype.lib import get_version_from_path from openpype.lib import version_up - from openpype.lib import is_latest - from openpype.lib import any_outdated - from openpype.lib import get_asset - from openpype.lib import get_hierarchy - from openpype.lib import get_linked_assets - from openpype.lib import get_latest_version from openpype.lib import get_ffprobe_streams from openpype.hosts.fusion.lib import switch_item diff --git a/openpype/tests/test_pyblish_filter.py b/openpype/tests/test_pyblish_filter.py index ea23da26e4..b74784145f 100644 --- a/openpype/tests/test_pyblish_filter.py +++ b/openpype/tests/test_pyblish_filter.py @@ -1,9 +1,9 @@ -from . import lib +import os import pyblish.api import pyblish.util import pyblish.plugin -from openpype.lib import filter_pyblish_plugins -import os +from openpype.pipeline.publish.lib import filter_pyblish_plugins +from . import lib def test_pyblish_plugin_filter_modifier(printer, monkeypatch): diff --git a/openpype/tools/adobe_webserver/app.py b/openpype/tools/adobe_webserver/app.py index b79d6c6c60..3911baf7ac 100644 --- a/openpype/tools/adobe_webserver/app.py +++ b/openpype/tools/adobe_webserver/app.py @@ -16,7 +16,7 @@ from wsrpc_aiohttp import ( WSRPCClient ) -from avalon import api +from openpype.pipeline import legacy_io log = logging.getLogger(__name__) @@ -80,9 +80,9 @@ class WebServerTool: loop=asyncio.get_event_loop()) await client.connect() - project = api.Session["AVALON_PROJECT"] - asset = api.Session["AVALON_ASSET"] - task = api.Session["AVALON_TASK"] + project = legacy_io.Session["AVALON_PROJECT"] + asset = legacy_io.Session["AVALON_ASSET"] + task = legacy_io.Session["AVALON_TASK"] log.info("Sending context change to {}-{}-{}".format(project, asset, task)) diff --git a/openpype/tools/assetlinks/widgets.py b/openpype/tools/assetlinks/widgets.py index 22e8848a60..7b05eef2d7 100644 --- a/openpype/tools/assetlinks/widgets.py +++ b/openpype/tools/assetlinks/widgets.py @@ -1,10 +1,16 @@ +import collections +from openpype.client import ( + get_versions, + get_subsets, + get_assets, + get_output_link_versions, +) -from Qt import QtWidgets +from qtpy import QtWidgets class SimpleLinkView(QtWidgets.QWidget): - - def __init__(self, dbcon, parent=None): + def __init__(self, dbcon, parent): super(SimpleLinkView, self).__init__(parent=parent) self.dbcon = dbcon @@ -24,6 +30,11 @@ class SimpleLinkView(QtWidgets.QWidget): self._in_view = in_view self._out_view = out_view + self._version_doc_to_process = None + + @property + def project_name(self): + return self.dbcon.current_project() def clear(self): self._in_view.clear() @@ -31,60 +42,114 @@ class SimpleLinkView(QtWidgets.QWidget): def set_version(self, version_doc): self.clear() - if not version_doc or not self.isVisible(): - return + self._version_doc_to_process = version_doc + if version_doc and self.isVisible(): + self._fill_values() - # inputs - # + def showEvent(self, event): + super(SimpleLinkView, self).showEvent(event) + self._fill_values() + + def _fill_values(self): + if self._version_doc_to_process is None: + return + version_doc = self._version_doc_to_process + self._version_doc_to_process = None + self._fill_inputs(version_doc) + self._fill_outputs(version_doc) + + def _fill_inputs(self, version_doc): + version_ids = set() for link in version_doc["data"].get("inputLinks", []): # Backwards compatibility for "input" key used as "id" if "id" not in link: link_id = link["input"] else: link_id = link["id"] - version = self.dbcon.find_one( - {"_id": link_id, "type": "version"}, - projection={"name": 1, "parent": 1} - ) - if not version: - continue - subset = self.dbcon.find_one( - {"_id": version["parent"], "type": "subset"}, - projection={"name": 1, "parent": 1} - ) - if not subset: - continue - asset = self.dbcon.find_one( - {"_id": subset["parent"], "type": "asset"}, - projection={"name": 1} - ) + version_ids.add(link_id) - self._in_view.addItem("{asset} {subset} v{version:0>3}".format( - asset=asset["name"], - subset=subset["name"], - version=version["name"], + version_docs = list(get_versions( + self.project_name, + version_ids=version_ids, + fields=["name", "parent"] + )) + + versions_by_subset_id = collections.defaultdict(list) + for version_doc in version_docs: + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + subset_docs = [] + if versions_by_subset_id: + subset_docs = list(get_subsets( + self.project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id", "name", "parent"] )) - # outputs - # - outputs = self.dbcon.find( - {"type": "version", "data.inputLinks.input": version_doc["_id"]}, - projection={"name": 1, "parent": 1} - ) - for version in outputs or []: - subset = self.dbcon.find_one( - {"_id": version["parent"], "type": "subset"}, - projection={"name": 1, "parent": 1} - ) - if not subset: - continue - asset = self.dbcon.find_one( - {"_id": subset["parent"], "type": "asset"}, - projection={"name": 1} - ) + asset_docs = [] + subsets_by_asset_id = collections.defaultdict(list) + if subset_docs: + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subsets_by_asset_id[asset_id].append(subset_doc) - self._out_view.addItem("{asset} {subset} v{version:0>3}".format( - asset=asset["name"], - subset=subset["name"], - version=version["name"], + asset_docs = list(get_assets( + self.project_name, + asset_ids=subsets_by_asset_id.keys(), + fields=["_id", "name"] )) + + for asset_doc in asset_docs: + asset_id = asset_doc["_id"] + for subset_doc in subsets_by_asset_id[asset_id]: + subset_id = subset_doc["_id"] + for version_doc in versions_by_subset_id[subset_id]: + self._in_view.addItem("{} {} v{:0>3}".format( + asset_doc["name"], + subset_doc["name"], + version_doc["name"], + )) + + def _fill_outputs(self, version_doc): + version_docs = list(get_output_link_versions( + self.project_name, + version_doc["_id"], + fields=["name", "parent"] + )) + versions_by_subset_id = collections.defaultdict(list) + for version_doc in version_docs: + subset_id = version_doc["parent"] + versions_by_subset_id[subset_id].append(version_doc) + + subset_docs = [] + if versions_by_subset_id: + subset_docs = list(get_subsets( + self.project_name, + subset_ids=versions_by_subset_id.keys(), + fields=["_id", "name", "parent"] + )) + + asset_docs = [] + subsets_by_asset_id = collections.defaultdict(list) + if subset_docs: + for subset_doc in subset_docs: + asset_id = subset_doc["parent"] + subsets_by_asset_id[asset_id].append(subset_doc) + + asset_docs = list(get_assets( + self.project_name, + asset_ids=subsets_by_asset_id.keys(), + fields=["_id", "name"] + )) + + for asset_doc in asset_docs: + asset_id = asset_doc["_id"] + for subset_doc in subsets_by_asset_id[asset_id]: + subset_id = subset_doc["_id"] + for version_doc in versions_by_subset_id[subset_id]: + self._out_view.addItem("{} {} v{:0>3}".format( + asset_doc["name"], + subset_doc["name"], + version_doc["name"], + )) diff --git a/openpype/tools/attribute_defs/__init__.py b/openpype/tools/attribute_defs/__init__.py new file mode 100644 index 0000000000..f991fdec3d --- /dev/null +++ b/openpype/tools/attribute_defs/__init__.py @@ -0,0 +1,16 @@ +from .widgets import ( + create_widget_for_attr_def, + AttributeDefinitionsWidget, +) + +from .dialog import ( + AttributeDefinitionsDialog, +) + + +__all__ = ( + "create_widget_for_attr_def", + "AttributeDefinitionsWidget", + + "AttributeDefinitionsDialog", +) diff --git a/openpype/tools/attribute_defs/dialog.py b/openpype/tools/attribute_defs/dialog.py new file mode 100644 index 0000000000..ef717d576a --- /dev/null +++ b/openpype/tools/attribute_defs/dialog.py @@ -0,0 +1,33 @@ +from qtpy import QtWidgets + +from .widgets import AttributeDefinitionsWidget + + +class AttributeDefinitionsDialog(QtWidgets.QDialog): + def __init__(self, attr_defs, parent=None): + super(AttributeDefinitionsDialog, self).__init__(parent) + + attrs_widget = AttributeDefinitionsWidget(attr_defs, self) + + btns_widget = QtWidgets.QWidget(self) + ok_btn = QtWidgets.QPushButton("OK", btns_widget) + cancel_btn = QtWidgets.QPushButton("Cancel", btns_widget) + + btns_layout = QtWidgets.QHBoxLayout(btns_widget) + btns_layout.setContentsMargins(0, 0, 0, 0) + btns_layout.addStretch(1) + btns_layout.addWidget(ok_btn, 0) + btns_layout.addWidget(cancel_btn, 0) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.addWidget(attrs_widget, 0) + main_layout.addStretch(1) + main_layout.addWidget(btns_widget, 0) + + ok_btn.clicked.connect(self.accept) + cancel_btn.clicked.connect(self.reject) + + self._attrs_widget = attrs_widget + + def get_values(self): + return self._attrs_widget.current_value() diff --git a/openpype/tools/attribute_defs/files_widget.py b/openpype/tools/attribute_defs/files_widget.py new file mode 100644 index 0000000000..067866035f --- /dev/null +++ b/openpype/tools/attribute_defs/files_widget.py @@ -0,0 +1,1011 @@ +import os +import collections +import uuid +import json + +from qtpy import QtWidgets, QtCore, QtGui + +from openpype.lib import FileDefItem +from openpype.tools.utils import ( + paint_image_with_color, + ClickableLabel, +) +# TODO change imports +from openpype.tools.resources import get_image +from openpype.tools.utils import ( + IconButton, + PixmapLabel +) + +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 +ITEM_LABEL_ROLE = QtCore.Qt.UserRole + 2 +ITEM_ICON_ROLE = QtCore.Qt.UserRole + 3 +FILENAMES_ROLE = QtCore.Qt.UserRole + 4 +DIRPATH_ROLE = QtCore.Qt.UserRole + 5 +IS_DIR_ROLE = QtCore.Qt.UserRole + 6 +IS_SEQUENCE_ROLE = QtCore.Qt.UserRole + 7 +EXT_ROLE = QtCore.Qt.UserRole + 8 + + +def convert_bytes_to_json(bytes_value): + if isinstance(bytes_value, QtCore.QByteArray): + # Raw data are already QByteArray and we don't have to load them + encoded_data = bytes_value + else: + encoded_data = QtCore.QByteArray.fromRawData(bytes_value) + stream = QtCore.QDataStream(encoded_data, QtCore.QIODevice.ReadOnly) + text = stream.readQString() + try: + return json.loads(text) + except Exception: + return None + + +def convert_data_to_bytes(data): + bytes_value = QtCore.QByteArray() + stream = QtCore.QDataStream(bytes_value, QtCore.QIODevice.WriteOnly) + stream.writeQString(json.dumps(data)) + return bytes_value + + +class SupportLabel(QtWidgets.QLabel): + pass + + +class DropEmpty(QtWidgets.QWidget): + _empty_extensions = "Any file" + + def __init__(self, single_item, allow_sequences, extensions_label, parent): + super(DropEmpty, self).__init__(parent) + + drop_label_widget = QtWidgets.QLabel("Drag & Drop files here", self) + + items_label_widget = SupportLabel(self) + items_label_widget.setWordWrap(True) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addSpacing(20) + layout.addWidget( + drop_label_widget, 0, alignment=QtCore.Qt.AlignCenter + ) + layout.addSpacing(30) + layout.addStretch(1) + layout.addWidget( + items_label_widget, 0, alignment=QtCore.Qt.AlignCenter + ) + layout.addSpacing(10) + + for widget in ( + drop_label_widget, + items_label_widget, + ): + widget.setAlignment(QtCore.Qt.AlignCenter) + widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + update_size_timer = QtCore.QTimer() + update_size_timer.setInterval(10) + update_size_timer.setSingleShot(True) + + update_size_timer.timeout.connect(self._on_update_size_timer) + + self._update_size_timer = update_size_timer + + if extensions_label and not extensions_label.startswith(" "): + extensions_label = " " + extensions_label + + self._single_item = single_item + self._extensions_label = extensions_label + self._allow_sequences = allow_sequences + self._allowed_extensions = set() + self._allow_folders = None + + self._drop_label_widget = drop_label_widget + self._items_label_widget = items_label_widget + + self.set_allow_folders(False) + + def set_extensions(self, extensions): + if extensions: + extensions = { + ext.replace(".", "") + for ext in extensions + } + if extensions == self._allowed_extensions: + return + self._allowed_extensions = extensions + + self._update_items_label() + + def set_allow_folders(self, allowed): + if self._allow_folders == allowed: + return + + self._allow_folders = allowed + self._update_items_label() + + def _update_items_label(self): + allowed_items = [] + if self._allow_folders: + allowed_items.append("folder") + + if self._allowed_extensions: + allowed_items.append("file") + if self._allow_sequences: + allowed_items.append("sequence") + + if not self._single_item: + allowed_items = [item + "s" for item in allowed_items] + + if not allowed_items: + self._drop_label_widget.setVisible(False) + self._items_label_widget.setText( + "It is not allowed to add anything here!" + ) + return + + self._drop_label_widget.setVisible(True) + items_label = "Multiple " + if self._single_item: + items_label = "Single " + + if len(allowed_items) == 1: + extensions_label = allowed_items[0] + elif len(allowed_items) == 2: + extensions_label = " or ".join(allowed_items) + else: + last_item = allowed_items.pop(-1) + new_last_item = " or ".join([last_item, allowed_items.pop(-1)]) + allowed_items.append(new_last_item) + extensions_label = ", ".join(allowed_items) + + allowed_items_label = extensions_label + + items_label += allowed_items_label + label_tooltip = None + if self._allowed_extensions: + items_label += " of\n{}".format( + ", ".join(sorted(self._allowed_extensions)) + ) + + if self._extensions_label: + label_tooltip = items_label + items_label = self._extensions_label + + if self._items_label_widget.text() == items_label: + return + + self._items_label_widget.setToolTip(label_tooltip) + self._items_label_widget.setText(items_label) + self._update_size_timer.start() + + def resizeEvent(self, event): + super(DropEmpty, self).resizeEvent(event) + self._update_size_timer.start() + + def _on_update_size_timer(self): + """Recalculate height of label with extensions. + + Dynamic QLabel with word wrap does not handle properly it's sizeHint + calculations on show. This way it is recalculated. It is good practice + to trigger this method with small offset using '_update_size_timer'. + """ + + width = self._items_label_widget.width() + height = self._items_label_widget.heightForWidth(width) + self._items_label_widget.setMinimumHeight(height) + self._items_label_widget.updateGeometry() + + def paintEvent(self, event): + super(DropEmpty, self).paintEvent(event) + painter = QtGui.QPainter(self) + pen = QtGui.QPen() + pen.setWidth(1) + pen.setBrush(QtCore.Qt.darkGray) + pen.setStyle(QtCore.Qt.DashLine) + painter.setPen(pen) + content_margins = self.layout().contentsMargins() + + left_m = content_margins.left() + top_m = content_margins.top() + rect = QtCore.QRect( + left_m, + top_m, + ( + self.rect().width() + - (left_m + content_margins.right() + pen.width()) + ), + ( + self.rect().height() + - (top_m + content_margins.bottom() + pen.width()) + ) + ) + painter.drawRect(rect) + + +class FilesModel(QtGui.QStandardItemModel): + def __init__(self, single_item, allow_sequences): + super(FilesModel, self).__init__() + + self._id = str(uuid.uuid4()) + self._single_item = single_item + self._multivalue = False + self._allow_sequences = allow_sequences + + self._items_by_id = {} + self._file_items_by_id = {} + self._filenames_by_dirpath = collections.defaultdict(set) + self._items_by_dirpath = collections.defaultdict(list) + + self.rowsAboutToBeRemoved.connect(self._on_about_to_be_removed) + self.rowsInserted.connect(self._on_insert) + + @property + def id(self): + return self._id + + def _on_about_to_be_removed(self, parent_index, start, end): + """Make sure that removed items are removed from items mapping. + + Connected with '_on_insert'. When user drag item and drop it to same + view the item is actually removed and creted again but it happens in + inner calls of Qt. + """ + + for row in range(start, end + 1): + index = self.index(row, 0, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id is not None: + self._items_by_id.pop(item_id, None) + + def _on_insert(self, parent_index, start, end): + """Make sure new added items are stored in items mapping. + + Connected to '_on_about_to_be_removed'. Some items are not created + using '_create_item' but are recreated using Qt. So the item is not in + mapping and if it would it would not lead to same item pointer. + """ + + for row in range(start, end + 1): + index = self.index(start, end, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id not in self._items_by_id: + self._items_by_id[item_id] = self.item(row) + + def set_multivalue(self, multivalue): + """Disable filtering.""" + + if self._multivalue == multivalue: + return + self._multivalue = multivalue + + def add_filepaths(self, items): + if not items: + return + + if self._multivalue: + _items = [] + for item in items: + if isinstance(item, (tuple, list, set)): + _items.extend(item) + else: + _items.append(item) + items = _items + + file_items = FileDefItem.from_value(items, self._allow_sequences) + if not file_items: + return + + if not self._multivalue and self._single_item: + file_items = [file_items[0]] + current_ids = list(self._file_items_by_id.keys()) + if current_ids: + self.remove_item_by_ids(current_ids) + + new_model_items = [] + for file_item in file_items: + item_id, model_item = self._create_item(file_item) + new_model_items.append(model_item) + self._file_items_by_id[item_id] = file_item + self._items_by_id[item_id] = model_item + + if new_model_items: + roow_item = self.invisibleRootItem() + roow_item.appendRows(new_model_items) + + def remove_item_by_ids(self, item_ids): + if not item_ids: + return + + items = [] + for item_id in set(item_ids): + if item_id not in self._items_by_id: + continue + item = self._items_by_id.pop(item_id) + self._file_items_by_id.pop(item_id) + items.append(item) + + if items: + for item in items: + self.removeRows(item.row(), 1) + + def get_file_item_by_id(self, item_id): + return self._file_items_by_id.get(item_id) + + def _create_item(self, file_item): + if file_item.is_dir: + icon_pixmap = paint_image_with_color( + get_image(filename="folder.png"), QtCore.Qt.white + ) + else: + icon_pixmap = paint_image_with_color( + get_image(filename="file.png"), QtCore.Qt.white + ) + + item = QtGui.QStandardItem() + item_id = str(uuid.uuid4()) + item.setData(item_id, ITEM_ID_ROLE) + item.setData(file_item.label or "< empty >", ITEM_LABEL_ROLE) + item.setData(file_item.filenames, FILENAMES_ROLE) + item.setData(file_item.directory, DIRPATH_ROLE) + item.setData(icon_pixmap, ITEM_ICON_ROLE) + item.setData(file_item.lower_ext, EXT_ROLE) + item.setData(file_item.is_dir, IS_DIR_ROLE) + item.setData(file_item.is_sequence, IS_SEQUENCE_ROLE) + + return item_id, item + + def mimeData(self, indexes): + item_ids = [ + index.data(ITEM_ID_ROLE) + for index in indexes + ] + + item_ids_data = convert_data_to_bytes(item_ids) + mime_data = super(FilesModel, self).mimeData(indexes) + mime_data.setData("files_widget/internal_move", item_ids_data) + + file_items = [] + for item_id in item_ids: + file_item = self.get_file_item_by_id(item_id) + if file_item: + file_items.append(file_item.to_dict()) + + full_item_data = convert_data_to_bytes({ + "items": file_items, + "id": self._id + }) + mime_data.setData("files_widget/full_data", full_item_data) + return mime_data + + def dropMimeData(self, mime_data, action, row, col, index): + item_ids = convert_bytes_to_json( + mime_data.data("files_widget/internal_move") + ) + if item_ids is None: + return False + + # Find matching item after which will be items moved + # - store item before moved items are removed + root = self.invisibleRootItem() + if row >= 0: + src_item = self.item(row) + else: + src_item_id = index.data(ITEM_ID_ROLE) + src_item = self._items_by_id.get(src_item_id) + + src_row = None + if src_item: + src_row = src_item.row() + + # Take out items that should be moved + items = [] + for item_id in item_ids: + item = self._items_by_id.get(item_id) + if item: + self.takeRow(item.row()) + items.append(item) + + # Skip if there are not items that can be moved + if not items: + return False + + # Calculate row where items should be inserted + row_count = root.rowCount() + if src_row is None: + src_row = row_count + + if src_row > row_count: + src_row = row_count + + root.insertRow(src_row, items) + return True + + +class FilesProxyModel(QtCore.QSortFilterProxyModel): + def __init__(self, *args, **kwargs): + super(FilesProxyModel, self).__init__(*args, **kwargs) + self._allow_folders = False + self._allowed_extensions = None + self._multivalue = False + + def set_multivalue(self, multivalue): + """Disable filtering.""" + + if self._multivalue == multivalue: + return + self._multivalue = multivalue + self.invalidateFilter() + + def set_allow_folders(self, allow=None): + if allow is None: + allow = not self._allow_folders + + if allow == self._allow_folders: + return + self._allow_folders = allow + self.invalidateFilter() + + def set_allowed_extensions(self, extensions=None): + if extensions is not None: + _extensions = set() + for ext in set(extensions): + if not ext.startswith("."): + ext = ".{}".format(ext) + _extensions.add(ext.lower()) + extensions = _extensions + + if self._allowed_extensions != extensions: + self._allowed_extensions = extensions + self.invalidateFilter() + + def are_valid_files(self, filepaths): + for filepath in filepaths: + if os.path.isfile(filepath): + _, ext = os.path.splitext(filepath) + if ext.lower() in self._allowed_extensions: + return True + + elif self._allow_folders: + return True + return False + + def filter_valid_files(self, filepaths): + filtered_paths = [] + for filepath in filepaths: + if os.path.isfile(filepath): + _, ext = os.path.splitext(filepath) + if ext.lower() in self._allowed_extensions: + filtered_paths.append(filepath) + + elif self._allow_folders: + filtered_paths.append(filepath) + return filtered_paths + + def filterAcceptsRow(self, row, parent_index): + # Skip filtering if multivalue is set + if self._multivalue: + return True + + model = self.sourceModel() + index = model.index(row, self.filterKeyColumn(), parent_index) + # First check if item is folder and if folders are enabled + if index.data(IS_DIR_ROLE): + if not self._allow_folders: + return False + return True + + # Check if there are any allowed extensions + if self._allowed_extensions is None: + return False + + if index.data(EXT_ROLE) not in self._allowed_extensions: + return False + return True + + def lessThan(self, left, right): + left_comparison = left.data(DIRPATH_ROLE) + right_comparison = right.data(DIRPATH_ROLE) + if left_comparison == right_comparison: + left_comparison = left.data(ITEM_LABEL_ROLE) + right_comparison = right.data(ITEM_LABEL_ROLE) + + if sorted((left_comparison, right_comparison))[0] == left_comparison: + return True + return False + + +class ItemWidget(QtWidgets.QWidget): + context_menu_requested = QtCore.Signal(QtCore.QPoint) + + def __init__( + self, item_id, label, pixmap_icon, is_sequence, multivalue, parent=None + ): + self._item_id = item_id + + super(ItemWidget, self).__init__(parent) + + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + icon_widget = PixmapLabel(pixmap_icon, self) + label_widget = QtWidgets.QLabel(label, self) + + label_size_hint = label_widget.sizeHint() + height = label_size_hint.height() + actions_menu_pix = paint_image_with_color( + get_image(filename="menu.png"), QtCore.Qt.white + ) + + split_btn = ClickableLabel(self) + split_btn.setFixedSize(height, height) + split_btn.setPixmap(actions_menu_pix) + if multivalue: + split_btn.setVisible(False) + else: + split_btn.setVisible(is_sequence) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(5, 5, 5, 5) + layout.addWidget(icon_widget, 0) + layout.addWidget(label_widget, 1) + layout.addWidget(split_btn, 0) + + split_btn.clicked.connect(self._on_actions_clicked) + + self._icon_widget = icon_widget + self._label_widget = label_widget + self._split_btn = split_btn + self._actions_menu_pix = actions_menu_pix + self._last_scaled_pix_height = None + + def _update_btn_size(self): + label_size_hint = self._label_widget.sizeHint() + height = label_size_hint.height() + if height == self._last_scaled_pix_height: + return + self._last_scaled_pix_height = height + self._split_btn.setFixedSize(height, height) + pix = self._actions_menu_pix.scaled( + height, height, + QtCore.Qt.KeepAspectRatio, + QtCore.Qt.SmoothTransformation + ) + self._split_btn.setPixmap(pix) + + def showEvent(self, event): + super(ItemWidget, self).showEvent(event) + self._update_btn_size() + + def resizeEvent(self, event): + super(ItemWidget, self).resizeEvent(event) + self._update_btn_size() + + def _on_actions_clicked(self): + pos = self._split_btn.rect().bottomLeft() + point = self._split_btn.mapToGlobal(pos) + self.context_menu_requested.emit(point) + + +class InViewButton(IconButton): + pass + + +class FilesView(QtWidgets.QListView): + """View showing instances and their groups.""" + + remove_requested = QtCore.Signal() + context_menu_requested = QtCore.Signal(QtCore.QPoint) + + def __init__(self, *args, **kwargs): + super(FilesView, self).__init__(*args, **kwargs) + + self.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) + self.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection + ) + self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setAcceptDrops(True) + self.setDragEnabled(True) + self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove) + + remove_btn = InViewButton(self) + pix_enabled = paint_image_with_color( + get_image(filename="delete.png"), QtCore.Qt.white + ) + pix_disabled = paint_image_with_color( + get_image(filename="delete.png"), QtCore.Qt.gray + ) + icon = QtGui.QIcon(pix_enabled) + icon.addPixmap(pix_disabled, QtGui.QIcon.Disabled, QtGui.QIcon.Off) + remove_btn.setIcon(icon) + remove_btn.setEnabled(False) + + remove_btn.clicked.connect(self._on_remove_clicked) + self.customContextMenuRequested.connect(self._on_context_menu_request) + + self._remove_btn = remove_btn + self._multivalue = False + + def setSelectionModel(self, *args, **kwargs): + """Catch selection model set to register signal callback. + + Selection model is not available during initialization. + """ + + super(FilesView, self).setSelectionModel(*args, **kwargs) + selection_model = self.selectionModel() + selection_model.selectionChanged.connect(self._on_selection_change) + + def set_multivalue(self, multivalue): + """Disable remove button on multivalue.""" + + self._multivalue = multivalue + self._remove_btn.setVisible(not multivalue) + + def update_remove_btn_visibility(self): + model = self.model() + visible = False + if not self._multivalue and model: + visible = model.rowCount() > 0 + self._remove_btn.setVisible(visible) + + def has_selected_item_ids(self): + """Is any index selected.""" + for index in self.selectionModel().selectedIndexes(): + instance_id = index.data(ITEM_ID_ROLE) + if instance_id is not None: + return True + return False + + def get_selected_item_ids(self): + """Ids of selected instances.""" + + selected_item_ids = set() + for index in self.selectionModel().selectedIndexes(): + instance_id = index.data(ITEM_ID_ROLE) + if instance_id is not None: + selected_item_ids.add(instance_id) + return selected_item_ids + + def has_selected_sequence(self): + for index in self.selectionModel().selectedIndexes(): + if index.data(IS_SEQUENCE_ROLE): + return True + return False + + def event(self, event): + if event.type() == QtCore.QEvent.KeyPress: + if ( + event.key() == QtCore.Qt.Key_Delete + and self.has_selected_item_ids() + ): + self.remove_requested.emit() + return True + + return super(FilesView, self).event(event) + + def _on_context_menu_request(self, pos): + index = self.indexAt(pos) + if index.isValid(): + point = self.viewport().mapToGlobal(pos) + self.context_menu_requested.emit(point) + + def _on_selection_change(self): + self._remove_btn.setEnabled(self.has_selected_item_ids()) + + def _on_remove_clicked(self): + self.remove_requested.emit() + + def _update_remove_btn(self): + """Position remove button to bottom right.""" + + viewport = self.viewport() + height = viewport.height() + pos_x = viewport.width() - self._remove_btn.width() - 5 + pos_y = height - self._remove_btn.height() - 5 + self._remove_btn.move(max(0, pos_x), max(0, pos_y)) + + def resizeEvent(self, event): + super(FilesView, self).resizeEvent(event) + self._update_remove_btn() + + def showEvent(self, event): + super(FilesView, self).showEvent(event) + self._update_remove_btn() + self.update_remove_btn_visibility() + + +class FilesWidget(QtWidgets.QFrame): + value_changed = QtCore.Signal() + + def __init__(self, single_item, allow_sequences, extensions_label, parent): + super(FilesWidget, self).__init__(parent) + self.setAcceptDrops(True) + + empty_widget = DropEmpty( + single_item, allow_sequences, extensions_label, self + ) + + files_model = FilesModel(single_item, allow_sequences) + files_proxy_model = FilesProxyModel() + files_proxy_model.setSourceModel(files_model) + files_view = FilesView(self) + files_view.setModel(files_proxy_model) + + layout = QtWidgets.QStackedLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.setStackingMode(QtWidgets.QStackedLayout.StackAll) + layout.addWidget(empty_widget) + layout.addWidget(files_view) + layout.setCurrentWidget(empty_widget) + + files_proxy_model.rowsInserted.connect(self._on_rows_inserted) + files_proxy_model.rowsRemoved.connect(self._on_rows_removed) + files_view.remove_requested.connect(self._on_remove_requested) + files_view.context_menu_requested.connect( + self._on_context_menu_requested + ) + + self._in_set_value = False + self._single_item = single_item + self._multivalue = False + + self._empty_widget = empty_widget + self._files_model = files_model + self._files_proxy_model = files_proxy_model + self._files_view = files_view + + self._widgets_by_id = {} + + self._layout = layout + + def _set_multivalue(self, multivalue): + if self._multivalue is multivalue: + return + self._multivalue = multivalue + self._files_view.set_multivalue(multivalue) + self._files_model.set_multivalue(multivalue) + self._files_proxy_model.set_multivalue(multivalue) + self.setEnabled(not multivalue) + + def set_value(self, value, multivalue): + self._in_set_value = True + + widget_ids = set(self._widgets_by_id.keys()) + self._remove_item_by_ids(widget_ids) + + self._set_multivalue(multivalue) + + self._add_filepaths(value) + + self._in_set_value = False + + def current_value(self): + model = self._files_proxy_model + item_ids = set() + for row in range(model.rowCount()): + index = model.index(row, 0) + item_ids.add(index.data(ITEM_ID_ROLE)) + + file_items = [] + for item_id in item_ids: + file_item = self._files_model.get_file_item_by_id(item_id) + if file_item is not None: + file_items.append(file_item.to_dict()) + + if not self._single_item: + return file_items + if file_items: + return file_items[0] + + empty_item = FileDefItem.create_empty_item() + return empty_item.to_dict() + + def set_filters(self, folders_allowed, exts_filter): + self._files_proxy_model.set_allow_folders(folders_allowed) + self._files_proxy_model.set_allowed_extensions(exts_filter) + self._empty_widget.set_extensions(exts_filter) + self._empty_widget.set_allow_folders(folders_allowed) + + def _on_rows_inserted(self, parent_index, start_row, end_row): + for row in range(start_row, end_row + 1): + index = self._files_proxy_model.index(row, 0, parent_index) + item_id = index.data(ITEM_ID_ROLE) + if item_id in self._widgets_by_id: + continue + label = index.data(ITEM_LABEL_ROLE) + pixmap_icon = index.data(ITEM_ICON_ROLE) + is_sequence = index.data(IS_SEQUENCE_ROLE) + + widget = ItemWidget( + item_id, + label, + pixmap_icon, + is_sequence, + self._multivalue + ) + widget.context_menu_requested.connect( + self._on_context_menu_requested + ) + self._files_view.setIndexWidget(index, widget) + self._files_proxy_model.setData( + index, widget.sizeHint(), QtCore.Qt.SizeHintRole + ) + self._widgets_by_id[item_id] = widget + + if not self._in_set_value: + self.value_changed.emit() + + self._update_visibility() + + def _on_rows_removed(self, parent_index, start_row, end_row): + available_item_ids = set() + for row in range(self._files_proxy_model.rowCount()): + index = self._files_proxy_model.index(row, 0) + item_id = index.data(ITEM_ID_ROLE) + available_item_ids.add(index.data(ITEM_ID_ROLE)) + + widget_ids = set(self._widgets_by_id.keys()) + for item_id in available_item_ids: + if item_id in widget_ids: + widget_ids.remove(item_id) + + for item_id in widget_ids: + widget = self._widgets_by_id.pop(item_id) + widget.setVisible(False) + widget.deleteLater() + + if not self._in_set_value: + self.value_changed.emit() + self._update_visibility() + + def _on_split_request(self): + if self._multivalue: + return + + item_ids = self._files_view.get_selected_item_ids() + if not item_ids: + return + + for item_id in item_ids: + file_item = self._files_model.get_file_item_by_id(item_id) + if not file_item: + return + + new_items = file_item.split_sequence() + self._add_filepaths(new_items) + self._remove_item_by_ids(item_ids) + + def _on_remove_requested(self): + if self._multivalue: + return + + items_to_delete = self._files_view.get_selected_item_ids() + if items_to_delete: + self._remove_item_by_ids(items_to_delete) + + def _on_context_menu_requested(self, pos): + if self._multivalue: + return + + menu = QtWidgets.QMenu(self._files_view) + + if self._files_view.has_selected_sequence(): + split_action = QtWidgets.QAction("Split sequence", menu) + split_action.triggered.connect(self._on_split_request) + menu.addAction(split_action) + + remove_action = QtWidgets.QAction("Remove", menu) + remove_action.triggered.connect(self._on_remove_requested) + menu.addAction(remove_action) + + menu.popup(pos) + + def dragEnterEvent(self, event): + if self._multivalue: + return + + mime_data = event.mimeData() + if mime_data.hasUrls(): + filepaths = [] + for url in mime_data.urls(): + filepath = url.toLocalFile() + if os.path.exists(filepath): + filepaths.append(filepath) + + if self._files_proxy_model.are_valid_files(filepaths): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + full_data_value = mime_data.data("files_widget/full_data") + if self._handle_full_data_drag(full_data_value): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + def dragLeaveEvent(self, event): + event.accept() + + def dropEvent(self, event): + if self._multivalue: + return + + mime_data = event.mimeData() + if mime_data.hasUrls(): + event.accept() + filepaths = [] + for url in mime_data.urls(): + filepath = url.toLocalFile() + if os.path.exists(filepath): + filepaths.append(filepath) + + # Filter filepaths before passing it to model + filepaths = self._files_proxy_model.filter_valid_files(filepaths) + if filepaths: + self._add_filepaths(filepaths) + + if self._handle_full_data_drop( + mime_data.data("files_widget/full_data") + ): + event.setDropAction(QtCore.Qt.CopyAction) + event.accept() + + super(FilesWidget, self).dropEvent(event) + + def _handle_full_data_drag(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + return True + + def _handle_full_data_drop(self, value): + if value is None: + return False + + full_data = convert_bytes_to_json(value) + if full_data is None: + return False + + if full_data["id"] == self._files_model.id: + return False + + for item in full_data["items"]: + filepaths = [ + os.path.join(item["directory"], filename) + for filename in item["filenames"] + ] + filepaths = self._files_proxy_model.filter_valid_files(filepaths) + if filepaths: + self._add_filepaths(filepaths) + + if self._copy_modifiers_enabled(): + return False + return True + + def _copy_modifiers_enabled(self): + if ( + QtWidgets.QApplication.keyboardModifiers() + & QtCore.Qt.ControlModifier + ): + return True + return False + + def _add_filepaths(self, filepaths): + self._files_model.add_filepaths(filepaths) + + def _remove_item_by_ids(self, item_ids): + self._files_model.remove_item_by_ids(item_ids) + + def _update_visibility(self): + files_exists = self._files_proxy_model.rowCount() > 0 + if files_exists: + current_widget = self._files_view + else: + current_widget = self._empty_widget + self._layout.setCurrentWidget(current_widget) + self._files_view.update_remove_btn_visibility() diff --git a/openpype/widgets/attribute_defs/widgets.py b/openpype/tools/attribute_defs/widgets.py similarity index 63% rename from openpype/widgets/attribute_defs/widgets.py rename to openpype/tools/attribute_defs/widgets.py index 23f025967d..18e2e13d06 100644 --- a/openpype/widgets/attribute_defs/widgets.py +++ b/openpype/tools/attribute_defs/widgets.py @@ -1,25 +1,45 @@ import uuid +import copy -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore from openpype.lib.attribute_definitions import ( - AbtractAttrDef, + AbstractAttrDef, UnknownDef, + HiddenDef, NumberDef, TextDef, EnumDef, BoolDef, FileDef, + UIDef, UISeparatorDef, UILabelDef ) +from openpype.tools.utils import ( + CustomTextComboBox, + FocusSpinBox, + FocusDoubleSpinBox, +) from openpype.widgets.nice_checkbox import NiceCheckbox +from .files_widget import FilesWidget + def create_widget_for_attr_def(attr_def, parent=None): - if not isinstance(attr_def, AbtractAttrDef): + widget = _create_widget_for_attr_def(attr_def, parent) + if attr_def.hidden: + widget.setVisible(False) + + if attr_def.disabled: + widget.setEnabled(False) + return widget + + +def _create_widget_for_attr_def(attr_def, parent=None): + if not isinstance(attr_def, AbstractAttrDef): raise TypeError("Unexpected type \"{}\" expected \"{}\"".format( - str(type(attr_def)), AbtractAttrDef + str(type(attr_def)), AbstractAttrDef )) if isinstance(attr_def, NumberDef): @@ -37,6 +57,9 @@ def create_widget_for_attr_def(attr_def, parent=None): if isinstance(attr_def, UnknownDef): return UnknownAttrWidget(attr_def, parent) + if isinstance(attr_def, HiddenDef): + return HiddenAttrWidget(attr_def, parent) + if isinstance(attr_def, FileDef): return FileAttrWidget(attr_def, parent) @@ -51,6 +74,116 @@ def create_widget_for_attr_def(attr_def, parent=None): )) +class AttributeDefinitionsWidget(QtWidgets.QWidget): + """Create widgets for attribute definitions in grid layout. + + Widget creates input widgets for passed attribute definitions. + + Widget can't handle multiselection values. + """ + + def __init__(self, attr_defs=None, parent=None): + super(AttributeDefinitionsWidget, self).__init__(parent) + + self._widgets = [] + self._current_keys = set() + + self.set_attr_defs(attr_defs) + + def clear_attr_defs(self): + """Remove all existing widgets and reset layout if needed.""" + self._widgets = [] + self._current_keys = set() + + layout = self.layout() + if layout is not None: + if layout.count() == 0: + return + + while layout.count(): + item = layout.takeAt(0) + widget = item.widget() + if widget: + widget.setVisible(False) + widget.deleteLater() + + layout.deleteLater() + + new_layout = QtWidgets.QGridLayout() + new_layout.setColumnStretch(0, 0) + new_layout.setColumnStretch(1, 1) + self.setLayout(new_layout) + + def set_attr_defs(self, attr_defs): + """Replace current attribute definitions with passed.""" + self.clear_attr_defs() + if attr_defs: + self.add_attr_defs(attr_defs) + + def add_attr_defs(self, attr_defs): + """Add attribute definitions to current.""" + layout = self.layout() + + row = 0 + for attr_def in attr_defs: + if not isinstance(attr_def, UIDef): + if attr_def.key in self._current_keys: + raise KeyError( + "Duplicated key \"{}\"".format(attr_def.key)) + + self._current_keys.add(attr_def.key) + widget = create_widget_for_attr_def(attr_def, self) + self._widgets.append(widget) + + if attr_def.hidden: + continue + + expand_cols = 2 + if attr_def.is_value_def and attr_def.is_label_horizontal: + expand_cols = 1 + + col_num = 2 - expand_cols + + if attr_def.label: + label_widget = QtWidgets.QLabel(attr_def.label, self) + tooltip = attr_def.tooltip + if tooltip: + label_widget.setToolTip(tooltip) + layout.addWidget( + label_widget, row, 0, 1, expand_cols + ) + if not attr_def.is_label_horizontal: + row += 1 + + layout.addWidget( + widget, row, col_num, 1, expand_cols + ) + row += 1 + + def set_value(self, value): + new_value = copy.deepcopy(value) + unused_keys = set(new_value.keys()) + for widget in self._widgets: + attr_def = widget.attr_def + if attr_def.key not in new_value: + continue + unused_keys.remove(attr_def.key) + + widget_value = new_value[attr_def.key] + if widget_value is None: + widget_value = copy.deepcopy(attr_def.default) + widget.set_value(widget_value) + + def current_value(self): + output = {} + for widget in self._widgets: + attr_def = widget.attr_def + if not isinstance(attr_def, UIDef): + output[attr_def.key] = widget.current_value() + + return output + + class _BaseAttrDefWidget(QtWidgets.QWidget): # Type 'object' may not work with older PySide versions value_changed = QtCore.Signal(object, uuid.UUID) @@ -117,10 +250,10 @@ class NumberAttrWidget(_BaseAttrDefWidget): def _ui_init(self): decimals = self.attr_def.decimals if decimals > 0: - input_widget = QtWidgets.QDoubleSpinBox(self) + input_widget = FocusDoubleSpinBox(self) input_widget.setDecimals(decimals) else: - input_widget = QtWidgets.QSpinBox(self) + input_widget = FocusSpinBox(self) if self.attr_def.tooltip: input_widget.setToolTip(self.attr_def.tooltip) @@ -263,17 +396,20 @@ class BoolAttrWidget(_BaseAttrDefWidget): class EnumAttrWidget(_BaseAttrDefWidget): + def __init__(self, *args, **kwargs): + self._multivalue = False + super(EnumAttrWidget, self).__init__(*args, **kwargs) + def _ui_init(self): - input_widget = QtWidgets.QComboBox(self) + input_widget = CustomTextComboBox(self) combo_delegate = QtWidgets.QStyledItemDelegate(input_widget) input_widget.setItemDelegate(combo_delegate) if self.attr_def.tooltip: input_widget.setToolTip(self.attr_def.tooltip) - items = self.attr_def.items - for key, label in items.items(): - input_widget.addItem(label, key) + for item in self.attr_def.items: + input_widget.addItem(item["label"], item["value"]) idx = input_widget.findData(self.attr_def.default) if idx >= 0: @@ -288,6 +424,9 @@ class EnumAttrWidget(_BaseAttrDefWidget): def _on_value_change(self): new_value = self.current_value() + if self._multivalue: + self._multivalue = False + self._input_widget.set_custom_text(None) self.value_changed.emit(new_value, self.attr_def.id) def current_value(self): @@ -295,14 +434,23 @@ class EnumAttrWidget(_BaseAttrDefWidget): return self._input_widget.itemData(idx) def set_value(self, value, multivalue=False): + if multivalue: + set_value = set(value) + if len(set_value) == 1: + multivalue = False + value = tuple(set_value)[0] + if not multivalue: idx = self._input_widget.findData(value) cur_idx = self._input_widget.currentIndex() if idx != cur_idx and idx >= 0: self._input_widget.setCurrentIndex(idx) - else: - self._input_widget.lineEdit().setText("Multiselection") + custom_text = None + if multivalue: + custom_text = "< Multiselection >" + self._input_widget.set_custom_text(custom_text) + self._multivalue = multivalue class UnknownAttrWidget(_BaseAttrDefWidget): @@ -334,18 +482,37 @@ class UnknownAttrWidget(_BaseAttrDefWidget): self._input_widget.setText(str_value) +class HiddenAttrWidget(_BaseAttrDefWidget): + def _ui_init(self): + self.setVisible(False) + self._value = None + self._multivalue = False + + def setVisible(self, visible): + if visible: + visible = False + super(HiddenAttrWidget, self).setVisible(visible) + + def current_value(self): + if self._multivalue: + raise ValueError("{} can't output for multivalue.".format( + self.__class__.__name__ + )) + return self._value + + def set_value(self, value, multivalue=False): + self._value = copy.deepcopy(value) + self._multivalue = multivalue + + class FileAttrWidget(_BaseAttrDefWidget): def _ui_init(self): - self.multipath = self.attr_def.multipath - if self.multipath: - from .files_widget import MultiFilesWidget - - input_widget = MultiFilesWidget(self) - - else: - from .files_widget import SingleFileWidget - - input_widget = SingleFileWidget(self) + input_widget = FilesWidget( + self.attr_def.single_item, + self.attr_def.allow_sequences, + self.attr_def.extensions_label, + self + ) if self.attr_def.tooltip: input_widget.setToolTip(self.attr_def.tooltip) diff --git a/openpype/tools/context_dialog/window.py b/openpype/tools/context_dialog/window.py index 9e030853bf..86c53b55c5 100644 --- a/openpype/tools/context_dialog/window.py +++ b/openpype/tools/context_dialog/window.py @@ -1,10 +1,10 @@ import os import json -from Qt import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB +from qtpy import QtWidgets, QtCore, QtGui from openpype import style +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils.lib import center_window from openpype.tools.utils.assets_widget import SingleSelectAssetsWidget from openpype.tools.utils.constants import ( diff --git a/openpype/tools/creator/constants.py b/openpype/tools/creator/constants.py index 26a25dc010..5c4bbdcca3 100644 --- a/openpype/tools/creator/constants.py +++ b/openpype/tools/creator/constants.py @@ -1,4 +1,4 @@ -from Qt import QtCore +from qtpy import QtCore FAMILY_ROLE = QtCore.Qt.UserRole + 1 diff --git a/openpype/tools/creator/model.py b/openpype/tools/creator/model.py index ef61c6e0f0..7bb2757a11 100644 --- a/openpype/tools/creator/model.py +++ b/openpype/tools/creator/model.py @@ -1,8 +1,7 @@ import uuid -from Qt import QtGui, QtCore +from qtpy import QtGui, QtCore -from avalon import api -from openpype.pipeline import LegacyCreator +from openpype.pipeline import discover_legacy_creator_plugins from . constants import ( FAMILY_ROLE, @@ -22,8 +21,10 @@ class CreatorsModel(QtGui.QStandardItemModel): self._creators_by_id = {} items = [] - creators = api.discover(LegacyCreator) + creators = discover_legacy_creator_plugins() for creator in creators: + if not creator.enabled: + continue item_id = str(uuid.uuid4()) self._creators_by_id[item_id] = creator @@ -37,9 +38,10 @@ class CreatorsModel(QtGui.QStandardItemModel): if not items: item = QtGui.QStandardItem("No registered families") item.setEnabled(False) - item.setData(QtCore.Qt.ItemIsEnabled, False) + item.setData(False, QtCore.Qt.ItemIsEnabled) items.append(item) + items.sort(key=lambda item: item.text()) self.invisibleRootItem().appendRows(items) def get_creator_by_id(self, item_id): diff --git a/openpype/tools/creator/widgets.py b/openpype/tools/creator/widgets.py index 43df08496b..74f75811ff 100644 --- a/openpype/tools/creator/widgets.py +++ b/openpype/tools/creator/widgets.py @@ -1,13 +1,20 @@ import re import inspect -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui import qtawesome from openpype.pipeline.create import SUBSET_NAME_ALLOWED_SYMBOLS from openpype.tools.utils import ErrorMessageBox +if hasattr(QtGui, "QRegularExpressionValidator"): + RegularExpressionValidatorClass = QtGui.QRegularExpressionValidator + RegularExpressionClass = QtCore.QRegularExpression +else: + RegularExpressionValidatorClass = QtGui.QRegExpValidator + RegularExpressionClass = QtCore.QRegExp + class CreateErrorMessageBox(ErrorMessageBox): def __init__( @@ -82,12 +89,12 @@ class CreateErrorMessageBox(ErrorMessageBox): content_layout.addWidget(tb_widget) -class SubsetNameValidator(QtGui.QRegExpValidator): +class SubsetNameValidator(RegularExpressionValidatorClass): invalid = QtCore.Signal(set) pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) def __init__(self): - reg = QtCore.QRegExp(self.pattern) + reg = RegularExpressionClass(self.pattern) super(SubsetNameValidator, self).__init__(reg) def validate(self, text, pos): diff --git a/openpype/tools/creator/window.py b/openpype/tools/creator/window.py index 51cc66e715..57e2c49576 100644 --- a/openpype/tools/creator/window.py +++ b/openpype/tools/creator/window.py @@ -2,18 +2,17 @@ import sys import traceback import re -from Qt import QtWidgets, QtCore - -from avalon import api, io +from qtpy import QtWidgets, QtCore +from openpype.client import get_asset_by_name, get_subsets from openpype import style -from openpype.api import get_current_project_settings +from openpype.settings import get_current_project_settings from openpype.tools.utils.lib import qt_app_context +from openpype.pipeline import legacy_io from openpype.pipeline.create import ( SUBSET_NAME_ALLOWED_SYMBOLS, legacy_create, CreatorError, - LegacyCreator, ) from .model import CreatorsModel @@ -217,12 +216,12 @@ class CreatorWindow(QtWidgets.QDialog): self._set_valid_state(False) return + project_name = legacy_io.active_project() asset_doc = None if creator_plugin: # Get the asset from the database which match with the name - asset_doc = io.find_one( - {"name": asset_name, "type": "asset"}, - projection={"_id": 1} + asset_doc = get_asset_by_name( + project_name, asset_name, fields=["_id"] ) # Get plugin @@ -237,9 +236,8 @@ class CreatorWindow(QtWidgets.QDialog): self._set_valid_state(False) return - project_name = io.Session["AVALON_PROJECT"] asset_id = asset_doc["_id"] - task_name = io.Session["AVALON_TASK"] + task_name = legacy_io.Session["AVALON_TASK"] # Calculate subset name with Creator plugin subset_name = creator_plugin.get_subset_name( @@ -271,14 +269,13 @@ class CreatorWindow(QtWidgets.QDialog): self._subset_name_input.setText(subset_name) # Get all subsets of the current asset - subset_docs = io.find( - { - "type": "subset", - "parent": asset_id - }, - {"name": 1} + subset_docs = get_subsets( + project_name, asset_ids=[asset_id], fields=["name"] ) - existing_subset_names = set(subset_docs.distinct("name")) + existing_subset_names = { + subset_doc["name"] + for subset_doc in subset_docs + } existing_subset_names_low = set( _name.lower() for _name in existing_subset_names @@ -372,7 +369,7 @@ class CreatorWindow(QtWidgets.QDialog): self.setStyleSheet(style.load_stylesheet()) def refresh(self): - self._asset_name_input.setText(io.Session["AVALON_ASSET"]) + self._asset_name_input.setText(legacy_io.Session["AVALON_ASSET"]) self._creators_model.reset() @@ -385,7 +382,7 @@ class CreatorWindow(QtWidgets.QDialog): ) current_index = None family = None - task_name = io.Session.get("AVALON_TASK", None) + task_name = legacy_io.Session.get("AVALON_TASK", None) lowered_task_name = task_name.lower() if task_name: for _family, _task_names in pype_project_setting.items(): @@ -471,7 +468,7 @@ class CreatorWindow(QtWidgets.QDialog): self._msg_timer.start() -def show(debug=False, parent=None): +def show(parent=None): """Display asset creator GUI Arguments: @@ -488,24 +485,6 @@ def show(debug=False, parent=None): except (AttributeError, RuntimeError): pass - if debug: - from avalon import mock - for creator in mock.creators: - api.register_plugin(LegacyCreator, creator) - - import traceback - sys.excepthook = lambda typ, val, tb: traceback.print_last() - - io.install() - - any_project = next( - project for project in io.projects() - if project.get("active", True) is not False - ) - - api.Session["AVALON_PROJECT"] = any_project["name"] - module.project = any_project["name"] - with qt_app_context(): window = CreatorWindow(parent) window.refresh() diff --git a/openpype/tools/experimental_tools/dialog.py b/openpype/tools/experimental_tools/dialog.py index 0099492207..00b6ae07a4 100644 --- a/openpype/tools/experimental_tools/dialog.py +++ b/openpype/tools/experimental_tools/dialog.py @@ -1,4 +1,4 @@ -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui from openpype.style import ( load_stylesheet, diff --git a/openpype/tools/experimental_tools/tools_def.py b/openpype/tools/experimental_tools/tools_def.py index fa2971dc1d..5a5eec09ed 100644 --- a/openpype/tools/experimental_tools/tools_def.py +++ b/openpype/tools/experimental_tools/tools_def.py @@ -88,12 +88,10 @@ class ExperimentalTools: "publisher", "New publisher", "Combined creation and publishing into one tool.", - self._show_publisher - ), - ExperimentalTool( - "traypublisher", - "New Standalone Publisher", - "Standalone publisher using new publisher. Requires restart" + self._show_publisher, + hosts_filter=["blender", "maya", "nuke", "celaction", "flame", + "fusion", "harmony", "hiero", "resolve", + "tvpaint", "unreal"] ) ] @@ -164,9 +162,9 @@ class ExperimentalTools: def _show_publisher(self): if self._publisher_tool is None: - from openpype.tools import publisher + from openpype.tools.publisher.window import PublisherWindow - self._publisher_tool = publisher.PublisherWindow( + self._publisher_tool = PublisherWindow( parent=self._parent_widget ) diff --git a/openpype/tools/flickcharm.py b/openpype/tools/flickcharm.py index a5ea5a79d8..8d85dacce4 100644 --- a/openpype/tools/flickcharm.py +++ b/openpype/tools/flickcharm.py @@ -16,7 +16,7 @@ travelled only very slightly with the cursor. """ import copy -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui class FlickData(object): diff --git a/openpype/tools/launcher/actions.py b/openpype/tools/launcher/actions.py index 546bda1c34..61660ee9b7 100644 --- a/openpype/tools/launcher/actions.py +++ b/openpype/tools/launcher/actions.py @@ -1,11 +1,12 @@ import os -from Qt import QtWidgets, QtGui +from qtpy import QtWidgets, QtGui from openpype import PLUGINS_DIR from openpype import style -from openpype.api import Logger, resources +from openpype import resources from openpype.lib import ( + Logger, ApplictionExecutableNotFound, ApplicationLaunchFailed ) diff --git a/openpype/tools/launcher/constants.py b/openpype/tools/launcher/constants.py index 61f631759b..cb0049055c 100644 --- a/openpype/tools/launcher/constants.py +++ b/openpype/tools/launcher/constants.py @@ -1,4 +1,4 @@ -from Qt import QtCore +from qtpy import QtCore ACTION_ROLE = QtCore.Qt.UserRole diff --git a/openpype/tools/launcher/delegates.py b/openpype/tools/launcher/delegates.py index 7b53658727..02a40861d2 100644 --- a/openpype/tools/launcher/delegates.py +++ b/openpype/tools/launcher/delegates.py @@ -1,5 +1,5 @@ import time -from Qt import QtCore, QtWidgets, QtGui +from qtpy import QtCore, QtWidgets, QtGui from .constants import ( ANIMATION_START_ROLE, ANIMATION_STATE_ROLE, diff --git a/openpype/tools/launcher/lib.py b/openpype/tools/launcher/lib.py index c1392b7b8f..2507b6eddc 100644 --- a/openpype/tools/launcher/lib.py +++ b/openpype/tools/launcher/lib.py @@ -1,7 +1,7 @@ import os -from Qt import QtGui +from qtpy import QtGui import qtawesome -from openpype.api import resources +from openpype import resources ICON_CACHE = {} NOT_FOUND = type("NotFound", (object, ), {}) diff --git a/openpype/tools/launcher/models.py b/openpype/tools/launcher/models.py index 13567e7916..6c763544a9 100644 --- a/openpype/tools/launcher/models.py +++ b/openpype/tools/launcher/models.py @@ -6,9 +6,14 @@ import collections import time import appdirs -from Qt import QtCore, QtGui +from qtpy import QtCore, QtGui import qtawesome +from openpype.client import ( + get_projects, + get_project, + get_assets, +) from openpype.lib import JSONSettingRegistry from openpype.lib.applications import ( CUSTOM_LAUNCH_APP_GROUPS, @@ -81,13 +86,11 @@ class ActionModel(QtGui.QStandardItemModel): def get_application_actions(self): actions = [] - if not self.dbcon.Session.get("AVALON_PROJECT"): + if not self.dbcon.current_project(): return actions - project_doc = self.dbcon.find_one( - {"type": "project"}, - {"config.apps": True} - ) + project_name = self.dbcon.active_project() + project_doc = get_project(project_name, fields=["config.apps"]) if not project_doc: return actions @@ -278,18 +281,25 @@ class ActionModel(QtGui.QStandardItemModel): if not action_item: return - action = action_item.data(ACTION_ROLE) - actual_data = self._prepare_compare_data(action) + actions = action_item.data(ACTION_ROLE) + if not isinstance(actions, list): + actions = [actions] + + action_actions_data = [ + self._prepare_compare_data(action) + for action in actions + ] stored = self.launcher_registry.get_item("force_not_open_workfile") - if is_checked: - stored.append(actual_data) - else: - final_values = [] - for config in stored: - if config != actual_data: - final_values.append(config) - stored = final_values + for actual_data in action_actions_data: + if is_checked: + stored.append(actual_data) + else: + final_values = [] + for config in stored: + if config != actual_data: + final_values.append(config) + stored = final_values self.launcher_registry.set_item("force_not_open_workfile", stored) self.launcher_registry._get_item.cache_clear() @@ -326,21 +336,24 @@ class ActionModel(QtGui.QStandardItemModel): item (QStandardItem) stored (list) of dict """ - action = item.data(ACTION_ROLE) - if not self.is_application_action(action): + + actions = item.data(ACTION_ROLE) + if not isinstance(actions, list): + actions = [actions] + + if not self.is_application_action(actions[0]): return False - actual_data = self._prepare_compare_data(action) + action_actions_data = [ + self._prepare_compare_data(action) + for action in actions + ] for config in stored: - if config == actual_data: + if config in action_actions_data: return True - return False def _prepare_compare_data(self, action): - if isinstance(action, list) and action: - action = action[0] - compare_data = {} if action and action.label: compare_data = { @@ -448,7 +461,7 @@ class LauncherModel(QtCore.QObject): @property def project_name(self): """Current project name.""" - return self._dbcon.Session.get("AVALON_PROJECT") + return self._dbcon.current_project() @property def refreshing_assets(self): @@ -525,7 +538,7 @@ class LauncherModel(QtCore.QObject): current_project = self.project_name project_names = set() project_docs_by_name = {} - for project_doc in self._dbcon.projects(only_active=True): + for project_doc in get_projects(): project_name = project_doc["name"] project_names.add(project_name) project_docs_by_name[project_name] = project_doc @@ -649,9 +662,8 @@ class LauncherModel(QtCore.QObject): self._asset_refresh_thread = None def _refresh_assets(self): - asset_docs = list(self._dbcon.find( - {"type": "asset"}, - self._asset_projection + asset_docs = list(get_assets( + self._last_project_name, fields=self._asset_projection.keys() )) if not self._refreshing_assets: return diff --git a/openpype/tools/launcher/widgets.py b/openpype/tools/launcher/widgets.py index 62599664fe..a5bdd616b1 100644 --- a/openpype/tools/launcher/widgets.py +++ b/openpype/tools/launcher/widgets.py @@ -1,7 +1,7 @@ import copy import time import collections -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui import qtawesome from openpype.tools.flickcharm import FlickCharm @@ -173,7 +173,7 @@ class ActionBar(QtWidgets.QWidget): view.setResizeMode(QtWidgets.QListView.Adjust) view.setSelectionMode(QtWidgets.QListView.NoSelection) view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - view.setEditTriggers(QtWidgets.QListView.NoEditTriggers) + view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) view.setWrapping(True) view.setGridSize(QtCore.QSize(70, 75)) view.setIconSize(QtCore.QSize(30, 30)) @@ -312,11 +312,12 @@ class ActionBar(QtWidgets.QWidget): is_group = index.data(GROUP_ROLE) is_variant_group = index.data(VARIANT_GROUP_ROLE) + force_not_open_workfile = index.data(FORCE_NOT_OPEN_WORKFILE_ROLE) if not is_group and not is_variant_group: action = index.data(ACTION_ROLE) # Change data of application action if issubclass(action, ApplicationAction): - if index.data(FORCE_NOT_OPEN_WORKFILE_ROLE): + if force_not_open_workfile: action.data["start_last_workfile"] = False else: action.data.pop("start_last_workfile", None) @@ -385,10 +386,18 @@ class ActionBar(QtWidgets.QWidget): menu.addMenu(sub_menu) result = menu.exec_(QtGui.QCursor.pos()) - if result: - action = actions_mapping[result] - self._start_animation(index) - self.action_clicked.emit(action) + if not result: + return + + action = actions_mapping[result] + if issubclass(action, ApplicationAction): + if force_not_open_workfile: + action.data["start_last_workfile"] = False + else: + action.data.pop("start_last_workfile", None) + + self._start_animation(index) + self.action_clicked.emit(action) class ActionHistory(QtWidgets.QPushButton): @@ -414,7 +423,7 @@ class ActionHistory(QtWidgets.QPushButton): return widget = QtWidgets.QListWidget() - widget.setSelectionMode(widget.NoSelection) + widget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection) widget.setStyleSheet(""" * { font-family: "Courier New"; diff --git a/openpype/tools/launcher/window.py b/openpype/tools/launcher/window.py index d80b3eabf0..fcc8c0ba38 100644 --- a/openpype/tools/launcher/window.py +++ b/openpype/tools/launcher/window.py @@ -1,12 +1,11 @@ import copy import logging -from Qt import QtWidgets, QtCore, QtGui - -from avalon.api import AvalonMongoDB +from qtpy import QtWidgets, QtCore, QtGui from openpype import style -from openpype.api import resources +from openpype import resources +from openpype.pipeline import AvalonMongoDB import qtawesome from .models import ( @@ -41,7 +40,7 @@ class ProjectIconView(QtWidgets.QListView): # Workaround for scrolling being super slow or fast when # toggling between the two visual modes - self.setVerticalScrollMode(self.ScrollPerPixel) + self.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel) self.setObjectName("IconView") self._mode = None diff --git a/openpype/tools/libraryloader/app.py b/openpype/tools/libraryloader/app.py index b73b415128..bd10595333 100644 --- a/openpype/tools/libraryloader/app.py +++ b/openpype/tools/libraryloader/app.py @@ -1,9 +1,10 @@ import sys -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui -from avalon.api import AvalonMongoDB from openpype import style +from openpype.client import get_projects, get_project +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import lib as tools_lib from openpype.tools.loader.widgets import ( ThumbnailWidget, @@ -16,8 +17,6 @@ from openpype.tools.utils.assets_widget import MultiSelectAssetsWidget from openpype.modules import ModulesManager -from . import lib - module = sys.modules[__name__] module.window = None @@ -240,7 +239,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): def get_filtered_projects(self): projects = list() - for project in self.dbcon.projects(): + for project in get_projects(fields=["name", "data.library_project"]): is_library = project.get("data", {}).get("library_project", False) if ( (is_library and self.show_libraries) or @@ -260,14 +259,6 @@ class LibraryLoaderWindow(QtWidgets.QDialog): self.dbcon.Session["AVALON_PROJECT"] = project_name - _config = lib.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print( - "Config `%s` has no function `install`" % _config.__name__ - ) - self._subsets_widget.on_project_change(project_name) if self._repres_widget: self._repres_widget.on_project_change(project_name) @@ -313,14 +304,26 @@ class LibraryLoaderWindow(QtWidgets.QDialog): families = self._subsets_widget.get_subsets_families() self._families_filter_view.set_enabled_families(families) - def set_context(self, context, refresh=True): - self.echo("Setting context: {}".format(context)) - lib.schedule( - lambda: self._set_context(context, refresh=refresh), - 50, channel="mongo" - ) - # ------------------------------ + def set_context(self, context, refresh=True): + """Set the selection in the interface using a context. + The context must contain `asset` data by name. + + Args: + context (dict): The context to apply. + Returns: + None + """ + + asset_name = context.get("asset", None) + if asset_name is None: + return + + if refresh: + self._refresh_assets() + + self._assets_widget.select_asset_by_name(asset_name) + def _on_family_filter_change(self, families): self._subsets_widget.set_family_filters(families) @@ -333,10 +336,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): """Load assets from database""" if self.current_project is not None: # Ensure a project is loaded - project_doc = self.dbcon.find_one( - {"type": "project"}, - {"type": 1} - ) + project_doc = get_project(self.current_project, fields=["_id"]) assert project_doc, "This is a bug" self._families_filter_view.set_enabled_families(set()) @@ -381,7 +381,7 @@ class LibraryLoaderWindow(QtWidgets.QDialog): # Clear the version information on asset change self._version_info_widget.set_version(None) - self._thumbnail_widget.set_thumbnail(asset_ids) + self._thumbnail_widget.set_thumbnail("asset", asset_ids) self.data["state"]["assetIds"] = asset_ids @@ -436,34 +436,17 @@ class LibraryLoaderWindow(QtWidgets.QDialog): version_doc["_id"] for version_doc in version_docs ] + src_type = "version" if not thumbnail_src_ids: + src_type = "asset" thumbnail_src_ids = self._assets_widget.get_selected_asset_ids() - self._thumbnail_widget.set_thumbnail(thumbnail_src_ids) + self._thumbnail_widget.set_thumbnail(src_type, thumbnail_src_ids) version_ids = [doc["_id"] for doc in version_docs or []] if self._repres_widget: self._repres_widget.set_version_ids(version_ids) - def _set_context(self, context, refresh=True): - """Set the selection in the interface using a context. - The context must contain `asset` data by name. - - Args: - context (dict): The context to apply. - Returns: - None - """ - - asset_name = context.get("asset", None) - if asset_name is None: - return - - if refresh: - self._refresh_assets() - - self._assets_widget.select_asset_by_name(asset_name) - def _on_message_timeout(self): self._message_label.setText("") diff --git a/openpype/tools/libraryloader/lib.py b/openpype/tools/libraryloader/lib.py deleted file mode 100644 index 182b48893a..0000000000 --- a/openpype/tools/libraryloader/lib.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import importlib -import logging - -log = logging.getLogger(__name__) - - -# `find_config` from `pipeline` -def find_config(): - log.info("Finding configuration for project..") - - config = os.environ["AVALON_CONFIG"] - - if not config: - raise EnvironmentError( - "No configuration found in " - "the project nor environment" - ) - - log.info("Found %s, loading.." % config) - return importlib.import_module(config) diff --git a/openpype/tools/loader/__main__.py b/openpype/tools/loader/__main__.py index 146ba7fd10..acf357aa97 100644 --- a/openpype/tools/loader/__main__.py +++ b/openpype/tools/loader/__main__.py @@ -19,12 +19,10 @@ def my_exception_hook(exctype, value, traceback): if __name__ == '__main__': - os.environ["AVALON_MONGO"] = "mongodb://localhost:27017" os.environ["OPENPYPE_MONGO"] = "mongodb://localhost:27017" os.environ["AVALON_DB"] = "avalon" os.environ["AVALON_TIMEOUT"] = "1000" os.environ["OPENPYPE_DEBUG"] = "1" - os.environ["AVALON_CONFIG"] = "pype" os.environ["AVALON_ASSET"] = "Jungle" # Set the exception hook to our wrapping function diff --git a/openpype/tools/loader/app.py b/openpype/tools/loader/app.py index 923a1fabdb..302fe6c366 100644 --- a/openpype/tools/loader/app.py +++ b/openpype/tools/loader/app.py @@ -1,10 +1,15 @@ import sys +import traceback -from Qt import QtWidgets, QtCore -from avalon import api, io +from qtpy import QtWidgets, QtCore +from openpype.client import get_projects, get_project from openpype import style from openpype.lib import register_event_callback +from openpype.pipeline import ( + install_openpype_plugins, + legacy_io, +) from openpype.tools.utils import ( lib, PlaceholderLineEdit @@ -35,14 +40,14 @@ class LoaderWindow(QtWidgets.QDialog): def __init__(self, parent=None): super(LoaderWindow, self).__init__(parent) title = "Asset Loader 2.1" - project_name = api.Session.get("AVALON_PROJECT") + project_name = legacy_io.active_project() if project_name: title += " - {}".format(project_name) self.setWindowTitle(title) # Groups config - self.groups_config = lib.GroupsConfig(io) - self.family_config_cache = lib.FamilyConfigCache(io) + self.groups_config = lib.GroupsConfig(legacy_io) + self.family_config_cache = lib.FamilyConfigCache(legacy_io) # Enable minimize and maximize for app window_flags = QtCore.Qt.Window @@ -59,13 +64,13 @@ class LoaderWindow(QtWidgets.QDialog): # Assets widget assets_widget = MultiSelectAssetsWidget( - io, parent=left_side_splitter + legacy_io, parent=left_side_splitter ) assets_widget.set_current_asset_btn_visibility(True) # Families widget families_filter_view = FamilyListView( - io, self.family_config_cache, left_side_splitter + legacy_io, self.family_config_cache, left_side_splitter ) left_side_splitter.addWidget(assets_widget) left_side_splitter.addWidget(families_filter_view) @@ -75,7 +80,7 @@ class LoaderWindow(QtWidgets.QDialog): # --- Middle part --- # Subsets widget subsets_widget = SubsetWidget( - io, + legacy_io, self.groups_config, self.family_config_cache, tool_name=self.tool_name, @@ -86,8 +91,12 @@ class LoaderWindow(QtWidgets.QDialog): thumb_ver_splitter = QtWidgets.QSplitter(main_splitter) thumb_ver_splitter.setOrientation(QtCore.Qt.Vertical) - thumbnail_widget = ThumbnailWidget(io, parent=thumb_ver_splitter) - version_info_widget = VersionWidget(io, parent=thumb_ver_splitter) + thumbnail_widget = ThumbnailWidget( + legacy_io, parent=thumb_ver_splitter + ) + version_info_widget = VersionWidget( + legacy_io, parent=thumb_ver_splitter + ) thumb_ver_splitter.addWidget(thumbnail_widget) thumb_ver_splitter.addWidget(version_info_widget) @@ -104,7 +113,7 @@ class LoaderWindow(QtWidgets.QDialog): repres_widget = None if sync_server_enabled: repres_widget = RepresentationWidget( - io, self.tool_name, parent=thumb_ver_splitter + legacy_io, self.tool_name, parent=thumb_ver_splitter ) thumb_ver_splitter.addWidget(repres_widget) @@ -258,14 +267,17 @@ class LoaderWindow(QtWidgets.QDialog): # Refresh families config self._families_filter_view.refresh() # Change to context asset on context change - self._assets_widget.select_asset_by_name(io.Session["AVALON_ASSET"]) + self._assets_widget.select_asset_by_name( + legacy_io.Session["AVALON_ASSET"] + ) def _refresh(self): """Load assets from database""" # Ensure a project is loaded - project = io.find_one({"type": "project"}, {"type": 1}) - assert project, "Project was not found! This is a bug" + project_name = legacy_io.active_project() + project_doc = get_project(project_name, fields=["_id"]) + assert project_doc, "Project was not found! This is a bug" self._assets_widget.refresh() self._assets_widget.setFocus() @@ -304,7 +316,7 @@ class LoaderWindow(QtWidgets.QDialog): ) # Clear the version information on asset change - self._thumbnail_widget.set_thumbnail(asset_ids) + self._thumbnail_widget.set_thumbnail("asset", asset_ids) self._version_info_widget.set_version(None) self.data["state"]["assetIds"] = asset_ids @@ -361,10 +373,12 @@ class LoaderWindow(QtWidgets.QDialog): version_doc["_id"] for version_doc in version_docs ] + source_type = "version" if not thumbnail_src_ids: + source_type = "asset" thumbnail_src_ids = self._assets_widget.get_selected_asset_ids() - self._thumbnail_widget.set_thumbnail(thumbnail_src_ids) + self._thumbnail_widget.set_thumbnail(source_type, thumbnail_src_ids) if self._repres_widget is not None: version_ids = [doc["_id"] for doc in version_docs] @@ -561,17 +575,15 @@ def show(debug=False, parent=None, use_context=False): module.window = None if debug: - import traceback sys.excepthook = lambda typ, val, tb: traceback.print_last() - io.install() + legacy_io.install() any_project = next( - project for project in io.projects() - if project.get("active", True) is not False + project for project in get_projects(fields=["name"]) ) - api.Session["AVALON_PROJECT"] = any_project["name"] + legacy_io.Session["AVALON_PROJECT"] = any_project["name"] module.project = any_project["name"] with lib.qt_app_context(): @@ -579,7 +591,7 @@ def show(debug=False, parent=None, use_context=False): window.show() if use_context: - context = {"asset": api.Session["AVALON_ASSET"]} + context = {"asset": legacy_io.Session["AVALON_ASSET"]} window.set_context(context, refresh=True) else: window.refresh() @@ -603,19 +615,11 @@ def cli(args): print("Entering Project: %s" % project) - io.install() + legacy_io.install() # Store settings - api.Session["AVALON_PROJECT"] = project + legacy_io.Session["AVALON_PROJECT"] = project - from avalon import pipeline - - # Find the set config - _config = pipeline.find_config() - if hasattr(_config, "install"): - _config.install() - else: - print("Config `%s` has no function `install`" % - _config.__name__) + install_openpype_plugins(project) show() diff --git a/openpype/tools/loader/delegates.py b/openpype/tools/loader/delegates.py new file mode 100644 index 0000000000..0686fe78cd --- /dev/null +++ b/openpype/tools/loader/delegates.py @@ -0,0 +1,28 @@ +from qtpy import QtWidgets, QtGui, QtCore + + +class LoadedInSceneDelegate(QtWidgets.QStyledItemDelegate): + """Delegate for Loaded in Scene state columns. + + Shows "yes" or "no" for True or False values + Colorizes green or dark grey based on True or False values + + """ + + def __init__(self, *args, **kwargs): + super(LoadedInSceneDelegate, self).__init__(*args, **kwargs) + self._colors = { + True: QtGui.QColor(80, 170, 80), + False: QtGui.QColor(90, 90, 90) + } + + def displayText(self, value, locale): + return "yes" if value else "no" + + def initStyleOption(self, option, index): + super(LoadedInSceneDelegate, self).initStyleOption(option, index) + + # Colorize based on value + value = index.data(QtCore.Qt.DisplayRole) + color = self._colors[bool(value)] + option.palette.setBrush(QtGui.QPalette.Text, color) diff --git a/openpype/tools/loader/lib.py b/openpype/tools/loader/lib.py index 28e94237ec..d47bc7e07a 100644 --- a/openpype/tools/loader/lib.py +++ b/openpype/tools/loader/lib.py @@ -1,7 +1,9 @@ import inspect -from Qt import QtGui +from qtpy import QtGui import qtawesome +from openpype.lib.attribute_definitions import AbstractAttrDef +from openpype.tools.attribute_defs import AttributeDefinitionsDialog from openpype.tools.utils.widgets import ( OptionalAction, OptionDialog @@ -34,21 +36,30 @@ def get_options(action, loader, parent, repre_contexts): None when dialog was closed or cancelled, in all other cases {} if no options """ + # Pop option dialog options = {} loader_options = loader.get_options(repre_contexts) - if getattr(action, "optioned", False) and loader_options: + if not getattr(action, "optioned", False) or not loader_options: + return options + + if isinstance(loader_options[0], AbstractAttrDef): + qargparse_options = False + dialog = AttributeDefinitionsDialog(loader_options, parent) + else: + qargparse_options = True dialog = OptionDialog(parent) - dialog.setWindowTitle(action.label + " Options") dialog.create(loader_options) - if not dialog.exec_(): - return None + dialog.setWindowTitle(action.label + " Options") - # Get option - options = dialog.parse() + if not dialog.exec_(): + return None - return options + # Get option + if qargparse_options: + return dialog.parse() + return dialog.get_values() def add_representation_loaders_to_menu(loaders, menu, repre_contexts): diff --git a/openpype/tools/loader/model.py b/openpype/tools/loader/model.py index 6cc6fae1fb..5944808f8b 100644 --- a/openpype/tools/loader/model.py +++ b/openpype/tools/loader/model.py @@ -1,17 +1,31 @@ import copy import re import math +import time from uuid import uuid4 -from Qt import QtCore, QtGui +from qtpy import QtCore, QtGui import qtawesome -from avalon import schema -from openpype.pipeline import HeroVersionType +from openpype.client import ( + get_assets, + get_subsets, + get_last_versions, + get_versions, + get_hero_versions, + get_version_by_name, + get_representations +) +from openpype.pipeline import ( + registered_host, + HeroVersionType, + schema, +) from openpype.style import get_default_entity_icon_color from openpype.tools.utils.models import TreeModel, Item from openpype.tools.utils import lib +from openpype.host import ILoadHost from openpype.modules import ModulesManager from openpype.tools.utils.constants import ( @@ -36,6 +50,14 @@ def is_filtering_recursible(): class BaseRepresentationModel(object): """Methods for SyncServer useful in multiple models""" + # Cheap & hackish way how to avoid refreshing of whole sync server module + # on each selection change + _last_project = None + _modules_manager = None + _last_project_cache = 0 + _last_manager_cache = 0 + _max_project_cache_time = 30 + _max_manager_cache_time = 60 def reset_sync_server(self, project_name=None): """Sets/Resets sync server vars after every change (refresh.)""" @@ -45,28 +67,53 @@ class BaseRepresentationModel(object): remote_site = remote_provider = None if not project_name: - project_name = self.dbcon.Session["AVALON_PROJECT"] + project_name = self.dbcon.active_project() else: self.dbcon.Session["AVALON_PROJECT"] = project_name - if project_name: - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] + if not project_name: + self.repre_icons = repre_icons + self.sync_server = sync_server + self.active_site = active_site + self.active_provider = active_provider + self.remote_site = remote_site + self.remote_provider = remote_provider + return - if project_name in sync_server.get_enabled_projects(): - active_site = sync_server.get_active_site(project_name) - active_provider = sync_server.get_provider_for_site( - project_name, active_site) - if active_site == 'studio': # for studio use explicit icon - active_provider = 'studio' + now_time = time.time() + project_cache_diff = now_time - self._last_project_cache + if project_cache_diff > self._max_project_cache_time: + self._last_project = None - remote_site = sync_server.get_remote_site(project_name) - remote_provider = sync_server.get_provider_for_site( - project_name, remote_site) - if remote_site == 'studio': # for studio use explicit icon - remote_provider = 'studio' + if project_name == self._last_project: + return - repre_icons = lib.get_repre_icons() + self._last_project = project_name + self._last_project_cache = now_time + + manager_cache_diff = now_time - self._last_manager_cache + if manager_cache_diff > self._max_manager_cache_time: + self._modules_manager = None + + if self._modules_manager is None: + self._modules_manager = ModulesManager() + self._last_manager_cache = now_time + + sync_server = self._modules_manager.modules_by_name["sync_server"] + if sync_server.is_project_enabled(project_name, single=True): + active_site = sync_server.get_active_site(project_name) + active_provider = sync_server.get_provider_for_site( + project_name, active_site) + if active_site == 'studio': # for studio use explicit icon + active_provider = 'studio' + + remote_site = sync_server.get_remote_site(project_name) + remote_provider = sync_server.get_provider_for_site( + project_name, remote_site) + if remote_site == 'studio': # for studio use explicit icon + remote_provider = 'studio' + + repre_icons = lib.get_repre_icons() self.repre_icons = repre_icons self.sync_server = sync_server @@ -91,6 +138,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "duration", "handles", "step", + "loaded_in_scene", "repre_info" ] @@ -105,6 +153,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "duration": "Duration", "handles": "Handles", "step": "Step", + "loaded_in_scene": "In scene", "repre_info": "Availability" } @@ -161,9 +210,6 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): if subset_doc_projection: self.subset_doc_projection = subset_doc_projection - self.asset_doc_projection = asset_doc_projection - self.subset_doc_projection = subset_doc_projection - self.repre_icons = {} self.sync_server = None self.active_site = self.active_provider = None @@ -189,8 +235,14 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): self._doc_fetching_stop = False self._doc_payload = {} - self.doc_fetched.connect(self.on_doc_fetched) + self._host = registered_host() + self._loaded_representation_ids = set() + # Refresh loaded scene containers only every 3 seconds at most + self._host_loaded_refresh_timeout = 3 + self._host_loaded_refresh_time = 0 + + self.doc_fetched.connect(self._on_doc_fetched) self.refresh() def get_item_by_id(self, item_id): @@ -208,7 +260,7 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): def set_grouping(self, state): self._grouping = state - self.on_doc_fetched() + self._on_doc_fetched() def get_subsets_families(self): return self._doc_payload.get("subset_families") or set() @@ -218,57 +270,63 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): # because it also updates the information in other columns if index.column() == self.columns_index["version"]: item = index.internalPointer() - parent = item["_id"] + subset_id = item["_id"] if isinstance(value, HeroVersionType): - versions = list(self.dbcon.find({ - "type": {"$in": ["version", "hero_version"]}, - "parent": parent - }, sort=[("name", -1)])) - - version = None - last_version = None - for __version in versions: - if __version["type"] == "hero_version": - version = __version - elif last_version is None: - last_version = __version - - if version is not None and last_version is not None: - break - - _version = None - for __version in versions: - if __version["_id"] == version["version_id"]: - _version = __version - break - - version["data"] = _version["data"] - version["name"] = _version["name"] - version["is_from_latest"] = ( - last_version["_id"] == _version["_id"] - ) + version_doc = self._get_hero_version(subset_id) else: - version = self.dbcon.find_one({ - "name": value, - "type": "version", - "parent": parent - }) + project_name = self.dbcon.active_project() + version_doc = get_version_by_name( + project_name, value, subset_id + ) # update availability on active site when version changes - if self.sync_server.enabled and version: - query = self._repre_per_version_pipeline([version["_id"]], - self.active_site, - self.remote_site) - docs = list(self.dbcon.aggregate(query)) - if docs: - repre = docs.pop() - version["data"].update(self._get_repre_dict(repre)) + if self.sync_server.enabled and version_doc: + repres_info = list( + self.sync_server.get_repre_info_for_versions( + project_name, + [version_doc["_id"]], + self.active_site, + self.remote_site + ) + ) + if repres_info: + version_doc["data"].update( + self._get_repre_dict(repres_info[0])) - self.set_version(index, version) + self.set_version(index, version_doc) return super(SubsetsModel, self).setData(index, value, role) + def _get_hero_version(self, subset_id): + project_name = self.dbcon.active_project() + version_docs = get_versions( + project_name, subset_ids=[subset_id], hero=True + ) + standard_versions = [] + hero_version_doc = None + for version_doc in version_docs: + if version_doc["type"] == "hero_version": + hero_version_doc = version_doc + continue + standard_versions.append(version_doc) + + src_version_id = hero_version_doc["version_id"] + src_version = None + is_from_latest = True + for version_doc in reversed(sorted( + standard_versions, key=lambda item: item["name"] + )): + if version_doc["_id"] == src_version_id: + src_version = version_doc + break + is_from_latest = False + + hero_version_doc["data"] = src_version["data"] + hero_version_doc["name"] = src_version["name"] + hero_version_doc["is_from_latest"] = is_from_latest + return hero_version_doc + def set_version(self, index, version): """Update the version data of the given index. @@ -355,26 +413,25 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): item["repre_info"] = repre_info def _fetch(self): - asset_docs = self.dbcon.find( - { - "type": "asset", - "_id": {"$in": self._asset_ids} - }, - self.asset_doc_projection + project_name = self.dbcon.active_project() + asset_docs = get_assets( + project_name, + asset_ids=self._asset_ids, + fields=self.asset_doc_projection.keys() ) + asset_docs_by_id = { asset_doc["_id"]: asset_doc for asset_doc in asset_docs } subset_docs_by_id = {} - subset_docs = self.dbcon.find( - { - "type": "subset", - "parent": {"$in": self._asset_ids} - }, - self.subset_doc_projection + subset_docs = get_subsets( + project_name, + asset_ids=self._asset_ids, + fields=self.subset_doc_projection.keys() ) + subset_families = set() for subset_doc in subset_docs: if self._doc_fetching_stop: @@ -387,37 +444,13 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): subset_docs_by_id[subset_doc["_id"]] = subset_doc subset_ids = list(subset_docs_by_id.keys()) - _pipeline = [ - # Find all versions of those subsets - {"$match": { - "type": "version", - "parent": {"$in": subset_ids} - }}, - # Sorting versions all together - {"$sort": {"name": 1}}, - # Group them by "parent", but only take the last - {"$group": { - "_id": "$parent", - "_version_id": {"$last": "$_id"}, - "name": {"$last": "$name"}, - "type": {"$last": "$type"}, - "data": {"$last": "$data"}, - "locations": {"$last": "$locations"}, - "schema": {"$last": "$schema"} - }} - ] - last_versions_by_subset_id = dict() - for doc in self.dbcon.aggregate(_pipeline): - if self._doc_fetching_stop: - return - doc["parent"] = doc["_id"] - doc["_id"] = doc.pop("_version_id") - last_versions_by_subset_id[doc["parent"]] = doc + last_versions_by_subset_id = get_last_versions( + project_name, + subset_ids, + fields=["_id", "parent", "name", "type", "data", "schema"] + ) - hero_versions = self.dbcon.find({ - "type": "hero_version", - "parent": {"$in": subset_ids} - }) + hero_versions = get_hero_versions(project_name, subset_ids=subset_ids) missing_versions = [] for hero_version in hero_versions: version_id = hero_version["version_id"] @@ -426,10 +459,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): missing_versions_by_id = {} if missing_versions: - missing_version_docs = self.dbcon.find({ - "type": "version", - "_id": {"$in": missing_versions} - }) + missing_version_docs = get_versions( + project_name, version_ids=missing_versions + ) missing_versions_by_id = { missing_version_doc["_id"]: missing_version_doc for missing_version_doc in missing_version_docs @@ -452,32 +484,58 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): last_versions_by_subset_id[subset_id] = hero_version + # Check loaded subsets + loaded_subset_ids = set() + ids = self._loaded_representation_ids + if ids: + if self._doc_fetching_stop: + return + + # Get subset ids from loaded representations in workfile + # todo: optimize with aggregation query to distinct subset id + representations = get_representations(project_name, + representation_ids=ids, + fields=["parent"]) + version_ids = set(repre["parent"] for repre in representations) + versions = get_versions(project_name, + version_ids=version_ids, + fields=["parent"]) + loaded_subset_ids = set(version["parent"] for version in versions) + + if self._doc_fetching_stop: + return + + repre_info_by_version_id = {} + if self.sync_server.enabled: + versions_by_id = {} + for _subset_id, doc in last_versions_by_subset_id.items(): + versions_by_id[doc["_id"]] = doc + + repres_info = self.sync_server.get_repre_info_for_versions( + project_name, + list(versions_by_id.keys()), + self.active_site, + self.remote_site + ) + for repre_info in repres_info: + if self._doc_fetching_stop: + return + + version_id = repre_info["_id"] + doc = versions_by_id[version_id] + doc["active_provider"] = self.active_provider + doc["remote_provider"] = self.remote_provider + repre_info_by_version_id[version_id] = repre_info + self._doc_payload = { "asset_docs_by_id": asset_docs_by_id, "subset_docs_by_id": subset_docs_by_id, "subset_families": subset_families, - "last_versions_by_subset_id": last_versions_by_subset_id + "last_versions_by_subset_id": last_versions_by_subset_id, + "repre_info_by_version_id": repre_info_by_version_id, + "subsets_loaded_by_id": loaded_subset_ids } - if self.sync_server.enabled: - version_ids = set() - for _subset_id, doc in last_versions_by_subset_id.items(): - version_ids.add(doc["_id"]) - - query = self._repre_per_version_pipeline(list(version_ids), - self.active_site, - self.remote_site) - - repre_info = {} - for doc in self.dbcon.aggregate(query): - if self._doc_fetching_stop: - return - doc["active_provider"] = self.active_provider - doc["remote_provider"] = self.remote_provider - repre_info[doc["_id"]] = doc - - self._doc_payload["repre_info_by_version_id"] = repre_info - self.doc_fetched.emit() def fetch_subset_and_version(self): @@ -507,9 +565,23 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): self.doc_fetched.emit() return + # Collect scene container representations to compare loaded state + # This runs in the main thread because it involves the host DCC + if self._host: + time_since_refresh = time.time() - self._host_loaded_refresh_time + if time_since_refresh > self._host_loaded_refresh_timeout: + if isinstance(self._host, ILoadHost): + containers = self._host.get_containers() + else: + containers = self._host.ls() + + repre_ids = {con.get("representation") for con in containers} + self._loaded_representation_ids = repre_ids + self._host_loaded_refresh_time = time.time() + self.fetch_subset_and_version() - def on_doc_fetched(self): + def _on_doc_fetched(self): self.clear() self._items_by_id = {} self.beginResetModel() @@ -528,6 +600,10 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "repre_info_by_version_id" ) + subsets_loaded_by_id = self._doc_payload.get( + "subsets_loaded_by_id" + ) + if ( asset_docs_by_id is None or subset_docs_by_id is None @@ -542,7 +618,8 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): asset_docs_by_id, subset_docs_by_id, last_versions_by_subset_id, - repre_info_by_version_id + repre_info_by_version_id, + subsets_loaded_by_id ) self.endResetModel() self.refreshed.emit(True) @@ -570,8 +647,12 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): return merge_group def _fill_subset_items( - self, asset_docs_by_id, subset_docs_by_id, last_versions_by_subset_id, - repre_info_by_version_id + self, + asset_docs_by_id, + subset_docs_by_id, + last_versions_by_subset_id, + repre_info_by_version_id, + subsets_loaded_by_id ): _groups_tuple = self.groups_config.split_subsets_for_groups( subset_docs_by_id.values(), self._grouping @@ -595,6 +676,35 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): "index": self.index(group_item.row(), 0) } + def _add_subset_item(subset_doc, parent_item, parent_index): + last_version = last_versions_by_subset_id.get( + subset_doc["_id"] + ) + # do not show subset without version + if not last_version: + return + + data = copy.deepcopy(subset_doc) + data["subset"] = subset_doc["name"] + + asset_id = subset_doc["parent"] + data["asset"] = asset_docs_by_id[asset_id]["name"] + + data["last_version"] = last_version + data["loaded_in_scene"] = subset_doc["_id"] in subsets_loaded_by_id + + # Sync server data + data.update( + self._get_last_repre_info(repre_info_by_version_id, + last_version["_id"])) + + item = Item() + item.update(data) + self.add_child(item, parent_item) + + index = self.index(item.row(), 0, parent_index) + self.set_version(index, last_version) + subset_counter = 0 for group_name, subset_docs_by_name in subset_docs_by_group.items(): parent_item = group_item_by_name[group_name]["item"] @@ -617,31 +727,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): _parent_index = parent_index for subset_doc in subset_docs: - asset_id = subset_doc["parent"] - - data = copy.deepcopy(subset_doc) - data["subset"] = subset_name - data["asset"] = asset_docs_by_id[asset_id]["name"] - - last_version = last_versions_by_subset_id.get( - subset_doc["_id"] - ) - data["last_version"] = last_version - - # do not show subset without version - if not last_version: - continue - - data.update( - self._get_last_repre_info(repre_info_by_version_id, - last_version["_id"])) - - item = Item() - item.update(data) - self.add_child(item, _parent_item) - - index = self.index(item.row(), 0, _parent_index) - self.set_version(index, last_version) + _add_subset_item(subset_doc, + parent_item=_parent_item, + parent_index=_parent_index) for subset_name in sorted(subset_docs_without_group.keys()): subset_docs = subset_docs_without_group[subset_name] @@ -656,31 +744,9 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): subset_counter += 1 for subset_doc in subset_docs: - asset_id = subset_doc["parent"] - - data = copy.deepcopy(subset_doc) - data["subset"] = subset_name - data["asset"] = asset_docs_by_id[asset_id]["name"] - - last_version = last_versions_by_subset_id.get( - subset_doc["_id"] - ) - data["last_version"] = last_version - - # do not show subset without version - if not last_version: - continue - - data.update( - self._get_last_repre_info(repre_info_by_version_id, - last_version["_id"])) - - item = Item() - item.update(data) - self.add_child(item, parent_item) - - index = self.index(item.row(), 0, parent_index) - self.set_version(index, last_version) + _add_subset_item(subset_doc, + parent_item=parent_item, + parent_index=parent_index) def data(self, index, role): if not index.isValid(): @@ -808,83 +874,6 @@ class SubsetsModel(TreeModel, BaseRepresentationModel): return data - def _repre_per_version_pipeline(self, version_ids, - active_site, remote_site): - query = [ - {"$match": {"parent": {"$in": version_ids}, - "type": "representation", - "files.sites.name": {"$exists": 1}}}, - {"$unwind": "$files"}, - {'$addFields': { - 'order_local': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', active_site]} - } - } - }}, - {'$addFields': { - 'order_remote': { - '$filter': { - 'input': '$files.sites', 'as': 'p', - 'cond': {'$eq': ['$$p.name', remote_site]} - } - } - }}, - {'$addFields': { - 'progress_local': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_local.progress"}, - "$order_local.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_local.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$addFields': { - 'progress_remote': {"$arrayElemAt": [{ - '$cond': [ - {'$size': "$order_remote.progress"}, - "$order_remote.progress", - # if exists created_dt count is as available - {'$cond': [ - {'$size': "$order_remote.created_dt"}, - [1], - [0] - ]} - ]}, - 0 - ]} - }}, - {'$group': { # first group by repre - '_id': '$_id', - 'parent': {'$first': '$parent'}, - 'avail_ratio_local': { - '$first': { - '$divide': [{'$sum': "$progress_local"}, {'$sum': 1}] - } - }, - 'avail_ratio_remote': { - '$first': { - '$divide': [{'$sum': "$progress_remote"}, {'$sum': 1}] - } - } - }}, - {'$group': { # second group by parent, eg version_id - '_id': '$parent', - 'repre_count': {'$sum': 1}, # total representations - # fully available representation for site - 'avail_repre_local': {'$sum': "$avail_ratio_local"}, - 'avail_repre_remote': {'$sum': "$avail_ratio_remote"}, - }}, - ] - return query - class GroupMemberFilterProxyModel(QtCore.QSortFilterProxyModel): """Provide the feature of filtering group by the acceptance of members @@ -1000,7 +989,6 @@ class RepresentationSortProxyModel(GroupMemberFilterProxyModel): class RepresentationModel(TreeModel, BaseRepresentationModel): - doc_fetched = QtCore.Signal() refreshed = QtCore.Signal(bool) @@ -1026,33 +1014,43 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): "remote_site": "Remote" } - def __init__(self, dbcon, header, version_ids): + repre_projection = { + "_id": 1, + "name": 1, + "context.subset": 1, + "context.asset": 1, + "context.version": 1, + "context.representation": 1, + 'files.sites': 1 + } + + def __init__(self, dbcon, header): super(RepresentationModel, self).__init__() self.dbcon = dbcon self._data = [] self._header = header - self.version_ids = version_ids + self._version_ids = [] manager = ModulesManager() sync_server = active_site = remote_site = None active_provider = remote_provider = None - project = dbcon.Session["AVALON_PROJECT"] - if project: + project_name = dbcon.current_project() + if project_name: sync_server = manager.modules_by_name["sync_server"] - active_site = sync_server.get_active_site(project) - remote_site = sync_server.get_remote_site(project) + active_site = sync_server.get_active_site(project_name) + remote_site = sync_server.get_remote_site(project_name) # TODO refactor - active_provider = \ - sync_server.get_provider_for_site(project, - active_site) + active_provider = sync_server.get_provider_for_site( + project_name, active_site + ) if active_site == 'studio': active_provider = 'studio' - remote_provider = \ - sync_server.get_provider_for_site(project, - remote_site) + remote_provider = sync_server.get_provider_for_site( + project_name, remote_site + ) if remote_site == 'studio': remote_provider = 'studio' @@ -1063,7 +1061,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): self.remote_site = remote_site self.remote_provider = remote_provider - self.doc_fetched.connect(self.on_doc_fetched) + self.doc_fetched.connect(self._on_doc_fetched) self._docs = {} self._icons = lib.get_repre_icons() @@ -1074,7 +1072,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): self._items_by_id = {} def set_version_ids(self, version_ids): - self.version_ids = version_ids + self._version_ids = version_ids self.refresh() def data(self, index, role): @@ -1091,8 +1089,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): if index.column() == self.Columns.index("name"): if item.get("isMerged"): return item["icon"] - else: - return self._icons["repre"] + return self._icons["repre"] active_index = self.Columns.index("active_site") remote_index = self.Columns.index("remote_site") @@ -1108,12 +1105,12 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): # site added, sync in progress progress_str = "not avail." if progress >= 0: - # progress == 0 for isMerged is unavailable if progress == 0 and item.get("isMerged"): progress_str = "not avail." else: - progress_str = "{}% {}".format(int(progress * 100), - label) + progress_str = "{}% {}".format( + int(progress * 100), label + ) return progress_str @@ -1143,7 +1140,7 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): return super(RepresentationModel, self).data(index, role) - def on_doc_fetched(self): + def _on_doc_fetched(self): self.clear() self.beginResetModel() subsets = set() @@ -1153,10 +1150,9 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): group = None self._items_by_id = {} for doc in self._docs: - if len(self.version_ids) > 1: + if len(self._version_ids) > 1: group = repre_groups.get(doc["name"]) if not group: - group_item = Item() item_id = str(uuid4()) group_item.update({ @@ -1177,9 +1173,9 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): repre_groups_items[doc["name"]] = 0 group = group_item - progress = lib.get_progress_for_repre(doc, - self.active_site, - self.remote_site) + progress = lib.get_progress_for_repre( + doc, self.active_site, self.remote_site + ) active_site_icon = self._icons.get(self.active_provider) remote_site_icon = self._icons.get(self.remote_provider) @@ -1212,9 +1208,9 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): 'remote_site_progress': progress[self.remote_site] } if group: - group = self._sum_group_progress(doc["name"], group, - current_progress, - repre_groups_items) + group = self._sum_group_progress( + doc["name"], group, current_progress, repre_groups_items + ) self.add_child(item, group) @@ -1233,47 +1229,39 @@ class RepresentationModel(TreeModel, BaseRepresentationModel): return self._items_by_id.get(item_id) def refresh(self): - docs = [] - session_project = self.dbcon.Session['AVALON_PROJECT'] - if not session_project: + project_name = self.dbcon.current_project() + if not project_name: return - if self.version_ids: + repre_docs = [] + if self._version_ids: # Simple find here for now, expected to receive lower number of # representations and logic could be in Python - docs = list(self.dbcon.find( - {"type": "representation", "parent": {"$in": self.version_ids}, - "files.sites.name": {"$exists": 1}}, self.projection())) - self._docs = docs + repre_docs = list(get_representations( + project_name, + version_ids=self._version_ids, + fields=self.repre_projection.keys() + )) + + self._docs = repre_docs self.doc_fetched.emit() - @classmethod - def projection(cls): - return { - "_id": 1, - "name": 1, - "context.subset": 1, - "context.asset": 1, - "context.version": 1, - "context.representation": 1, - 'files.sites': 1 - } + def _sum_group_progress( + self, repre_name, group, current_item_progress, repre_groups_items + ): + """Update final group progress - def _sum_group_progress(self, repre_name, group, current_item_progress, - repre_groups_items): - """ - Update final group progress - Called after every item in group is added + Called after every item in group is added - Args: - repre_name(string) - group(dict): info about group of selected items - current_item_progress(dict): {'active_site_progress': XX, - 'remote_site_progress': YY} - repre_groups_items(dict) - Returns: - (dict): updated group info + Args: + repre_name(string) + group(dict): info about group of selected items + current_item_progress(dict): {'active_site_progress': XX, + 'remote_site_progress': YY} + repre_groups_items(dict) + Returns: + (dict): updated group info """ repre_groups_items[repre_name] += 1 diff --git a/openpype/tools/loader/widgets.py b/openpype/tools/loader/widgets.py index 42fb62b632..0c5c9391cf 100644 --- a/openpype/tools/loader/widgets.py +++ b/openpype/tools/loader/widgets.py @@ -5,10 +5,20 @@ import pprint import traceback import collections -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui -from openpype.api import Anatomy -from openpype.pipeline import HeroVersionType +from openpype.client import ( + get_subset_families, + get_subset_by_id, + get_subsets, + get_version_by_id, + get_versions, + get_representations, + get_thumbnail_id_from_source, + get_thumbnail, +) +from openpype.client.operations import OperationsSession, REMOVED_VALUE +from openpype.pipeline import HeroVersionType, Anatomy from openpype.pipeline.thumbnail import get_thumbnail_binary from openpype.pipeline.load import ( discover_loader_plugins, @@ -19,12 +29,14 @@ from openpype.pipeline.load import ( load_with_repre_context, load_with_subset_context, load_with_subset_contexts, + LoadError, IncompatibleLoaderError, ) from openpype.tools.utils import ( ErrorMessageBox, lib as tools_lib ) +from openpype.tools.utils.lib import checkstate_int_to_enum from openpype.tools.utils.delegates import ( VersionDelegate, PrettyTimeDelegate @@ -37,6 +49,12 @@ from openpype.tools.utils.views import ( TreeViewSpinner, DeselectableTreeView ) +from openpype.tools.utils.constants import ( + LOCAL_PROVIDER_ROLE, + REMOTE_PROVIDER_ROLE, + LOCAL_AVAILABILITY_ROLE, + REMOTE_AVAILABILITY_ROLE, +) from openpype.tools.assetlinks.widgets import SimpleLinkView from .model import ( @@ -48,13 +66,7 @@ from .model import ( ITEM_ID_ROLE ) from . import lib - -from openpype.tools.utils.constants import ( - LOCAL_PROVIDER_ROLE, - REMOTE_PROVIDER_ROLE, - LOCAL_AVAILABILITY_ROLE, - REMOTE_AVAILABILITY_ROLE -) +from .delegates import LoadedInSceneDelegate class OverlayFrame(QtWidgets.QFrame): @@ -159,6 +171,7 @@ class SubsetWidget(QtWidgets.QWidget): ("duration", 60), ("handles", 55), ("step", 10), + ("loaded_in_scene", 25), ("repre_info", 65) ) @@ -224,6 +237,10 @@ class SubsetWidget(QtWidgets.QWidget): column = model.Columns.index("repre_info") view.setItemDelegateForColumn(column, avail_delegate) + loaded_in_scene_delegate = LoadedInSceneDelegate(view) + column = model.Columns.index("loaded_in_scene") + view.setItemDelegateForColumn(column, loaded_in_scene_delegate) + layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(top_bar_layout) @@ -237,8 +254,7 @@ class SubsetWidget(QtWidgets.QWidget): self.model = model self.view = view - actual_project = dbcon.Session["AVALON_PROJECT"] - self.on_project_change(actual_project) + self.on_project_change(dbcon.current_project()) view.customContextMenuRequested.connect(self.on_context_menu) @@ -249,8 +265,8 @@ class SubsetWidget(QtWidgets.QWidget): group_checkbox.stateChanged.connect(self.set_grouping) - subset_filter.textChanged.connect(proxy.setFilterRegExp) - subset_filter.textChanged.connect(view.expandAll) + subset_filter.textChanged.connect(self._subset_changed) + model.refreshed.connect(self.refreshed) self.proxy = proxy @@ -278,6 +294,13 @@ class SubsetWidget(QtWidgets.QWidget): current_index=False): self.model.set_grouping(state) + def _subset_changed(self, text): + if hasattr(self.proxy, "setFilterRegularExpression"): + self.proxy.setFilterRegularExpression(text) + else: + self.proxy.setFilterRegExp(text) + self.view.expandAll() + def set_loading_state(self, loading, empty): view = self.view @@ -302,33 +325,23 @@ class SubsetWidget(QtWidgets.QWidget): item["version_document"] ) - subset_docs = list(self.dbcon.find( - { - "_id": {"$in": list(version_docs_by_subset_id.keys())}, - "type": "subset" - }, - { - "schema": 1, - "data.families": 1 - } + project_name = self.dbcon.active_project() + subset_docs = list(get_subsets( + project_name, + subset_ids=version_docs_by_subset_id.keys(), + fields=["schema", "data.families"] )) subset_docs_by_id = { subset_doc["_id"]: subset_doc for subset_doc in subset_docs } version_ids = list(version_docs_by_id.keys()) - repre_docs = self.dbcon.find( - # Query all representations for selected versions at once - { - "type": "representation", - "parent": {"$in": version_ids} - }, - # Query only name and parent from representation - { - "name": 1, - "parent": 1 - } + repre_docs = get_representations( + project_name, + version_ids=version_ids, + fields=["name", "parent"] ) + repre_docs_by_version_id = { version_id: [] for version_id in version_ids @@ -356,9 +369,10 @@ class SubsetWidget(QtWidgets.QWidget): enabled = False if project_name: self.model.reset_sync_server(project_name) - if self.model.sync_server: - enabled_proj = self.model.sync_server.get_enabled_projects() - enabled = project_name in enabled_proj + sync_server = self.model.sync_server + if sync_server: + enabled = sync_server.is_project_enabled(project_name, + single=True) lib.change_visibility(self.model, self.view, "repre_info", enabled) @@ -435,7 +449,8 @@ class SubsetWidget(QtWidgets.QWidget): # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) if self.tool_name: available_loaders = lib.remove_tool_name_from_loaders( available_loaders, self.tool_name @@ -444,6 +459,8 @@ class SubsetWidget(QtWidgets.QWidget): repre_loaders = [] subset_loaders = [] for loader in available_loaders: + if not loader.enabled: + continue # Skip if its a SubsetLoader. if issubclass(loader, SubsetLoaderPlugin): subset_loaders.append(loader) @@ -508,7 +525,7 @@ class SubsetWidget(QtWidgets.QWidget): if not one_item_selected: # Filter loaders from first subset by intersected combinations for repre, loader in first_loaders: - if (repre["name"], loader) not in found_combinations: + if (repre["name"].lower(), loader) not in found_combinations: continue loaders.append((repre, loader)) @@ -566,28 +583,43 @@ class SubsetWidget(QtWidgets.QWidget): # same representation available # Trigger - repre_ids = [] + project_name = self.dbcon.active_project() + subset_name_by_version_id = dict() for item in items: - representation = self.dbcon.find_one( - { - "type": "representation", - "name": representation_name, - "parent": item["version_document"]["_id"] - }, - {"_id": 1} - ) - if not representation: - self.echo("Subset '{}' has no representation '{}'".format( - item["subset"], representation_name - )) - continue - repre_ids.append(representation["_id"]) + version_id = item["version_document"]["_id"] + subset_name_by_version_id[version_id] = item["subset"] + + version_ids = set(subset_name_by_version_id.keys()) + repre_docs = get_representations( + project_name, + representation_names=[representation_name], + version_ids=version_ids, + fields=["_id", "parent"] + ) + + repre_ids = [] + for repre_doc in repre_docs: + repre_ids.append(repre_doc["_id"]) + + # keep only version ids without representation with that name + version_id = repre_doc["parent"] + version_ids.discard(version_id) + + if version_ids: + # report versions that didn't have valid representation + joined_subset_names = ", ".join([ + '"{}"'.format(subset_name_by_version_id[version_id]) + for version_id in version_ids + ]) + self.echo("Subsets {} don't have representation '{}'".format( + joined_subset_names, representation_name + )) # get contexts only for selected menu option repre_contexts = get_repres_contexts(repre_ids, self.dbcon) - options = lib.get_options(action, loader, self, - list(repre_contexts.values())) - + options = lib.get_options( + action, loader, self, list(repre_contexts.values()) + ) error_info = _load_representations_by_loader( loader, repre_contexts, options=options ) @@ -599,26 +631,30 @@ class SubsetWidget(QtWidgets.QWidget): box.show() def group_subsets(self, name, asset_ids, items): - field = "data.subsetGroup" + subset_ids = { + item["_id"] + for item in items + if item.get("_id") + } + if not subset_ids: + return if name: - update = {"$set": {field: name}} self.echo("Group subsets to '%s'.." % name) else: - update = {"$unset": {field: ""}} self.echo("Ungroup subsets..") - subsets = list() - for item in items: - subsets.append(item["subset"]) + project_name = self.dbcon.active_project() + op_session = OperationsSession() + for subset_id in subset_ids: + op_session.update_entity( + project_name, + "subset", + subset_id, + {"data.subsetGroup": name or REMOVED_VALUE} + ) - for asset_id in asset_ids: - filtr = { - "type": "subset", - "parent": asset_id, - "name": {"$in": subsets}, - } - self.dbcon.update_many(filtr, update) + op_session.commit() def echo(self, message): print(message) @@ -661,27 +697,21 @@ class VersionTextEdit(QtWidgets.QTextEdit): print("Querying..") + project_name = self.dbcon.active_project() if not version_doc: - version_doc = self.dbcon.find_one({ - "_id": version_id, - "type": {"$in": ["version", "hero_version"]} - }) + version_doc = get_version_by_id(project_name, version_id) assert version_doc, "Not a valid version id" if version_doc["type"] == "hero_version": - _version_doc = self.dbcon.find_one({ - "_id": version_doc["version_id"], - "type": "version" - }) + _version_doc = get_version_by_id( + project_name, version_doc["version_id"] + ) version_doc["data"] = _version_doc["data"] version_doc["name"] = HeroVersionType( _version_doc["name"] ) - subset = self.dbcon.find_one({ - "_id": version_doc["parent"], - "type": "subset" - }) + subset = get_subset_by_id(project_name, version_doc["parent"]) assert subset, "No valid subset parent for version" # Define readable creation timestamp @@ -752,7 +782,7 @@ class VersionTextEdit(QtWidgets.QTextEdit): if not source: return - project_name = self.dbcon.Session["AVALON_PROJECT"] + project_name = self.dbcon.current_project() if self._anatomy is None or self._anatomy.project_name != project_name: self._anatomy = Anatomy(project_name) @@ -833,24 +863,19 @@ class ThumbnailWidget(QtWidgets.QLabel): QtCore.Qt.SmoothTransformation ) - def set_thumbnail(self, doc_id=None): - if not doc_id: + def set_thumbnail(self, src_type, doc_ids): + if not doc_ids: self.set_pixmap() return - if isinstance(doc_id, (list, tuple)): - if len(doc_id) < 1: - self.set_pixmap() - return - doc_id = doc_id[0] + src_id = doc_ids[0] - doc = self.dbcon.find_one( - {"_id": doc_id}, - {"data.thumbnail_id"} + project_name = self.dbcon.active_project() + thumbnail_id = get_thumbnail_id_from_source( + project_name, + src_type, + src_id, ) - thumbnail_id = None - if doc: - thumbnail_id = doc.get("data", {}).get("thumbnail_id") if thumbnail_id == self.current_thumb_id: if self.current_thumbnail is None: self.set_pixmap() @@ -861,9 +886,7 @@ class ThumbnailWidget(QtWidgets.QLabel): self.set_pixmap() return - thumbnail_ent = self.dbcon.find_one( - {"type": "thumbnail", "_id": thumbnail_id} - ) + thumbnail_ent = get_thumbnail(project_name, thumbnail_id) if not thumbnail_ent: return @@ -917,21 +940,9 @@ class FamilyModel(QtGui.QStandardItemModel): def refresh(self): families = set() - if self.dbcon.Session.get("AVALON_PROJECT"): - result = list(self.dbcon.aggregate([ - {"$match": { - "type": "subset" - }}, - {"$project": { - "family": {"$arrayElemAt": ["$data.families", 0]} - }}, - {"$group": { - "_id": "family_group", - "families": {"$addToSet": "$family"} - }} - ])) - if result: - families = set(result[0]["families"]) + project_name = self.dbcon.current_project() + if project_name: + families = get_subset_families(project_name) root_item = self.invisibleRootItem() @@ -1058,7 +1069,10 @@ class FamilyListView(QtWidgets.QListView): checked_families = [] for row in range(model.rowCount()): index = model.index(row, 0) - if index.data(QtCore.Qt.CheckStateRole) == QtCore.Qt.Checked: + checked = checkstate_int_to_enum( + index.data(QtCore.Qt.CheckStateRole) + ) + if checked == QtCore.Qt.Checked: family = index.data(QtCore.Qt.DisplayRole) checked_families.append(family) @@ -1092,13 +1106,15 @@ class FamilyListView(QtWidgets.QListView): self.blockSignals(True) for index in indexes: - index_state = index.data(QtCore.Qt.CheckStateRole) + index_state = checkstate_int_to_enum( + index.data(QtCore.Qt.CheckStateRole) + ) if index_state == state: continue new_state = state if new_state is None: - if index_state == QtCore.Qt.Checked: + if index_state in QtCore.Qt.Checked: new_state = QtCore.Qt.Unchecked else: new_state = QtCore.Qt.Checked @@ -1176,7 +1192,7 @@ class RepresentationWidget(QtWidgets.QWidget): headers = [item[0] for item in self.default_widths] - model = RepresentationModel(self.dbcon, headers, []) + model = RepresentationModel(self.dbcon, headers) proxy_model = RepresentationSortProxyModel(self) proxy_model.setSourceModel(model) @@ -1213,8 +1229,8 @@ class RepresentationWidget(QtWidgets.QWidget): self.proxy_model = proxy_model self.sync_server_enabled = False - actual_project = dbcon.Session["AVALON_PROJECT"] - self.on_project_change(actual_project) + + self.on_project_change(dbcon.current_project()) self.model.refresh() @@ -1228,9 +1244,10 @@ class RepresentationWidget(QtWidgets.QWidget): enabled = False if project_name: self.model.reset_sync_server(project_name) - if self.model.sync_server: - enabled_proj = self.model.sync_server.get_enabled_projects() - enabled = project_name in enabled_proj + sync_server = self.model.sync_server + if sync_server: + enabled = sync_server.is_project_enabled(project_name, + single=True) self.sync_server_enabled = enabled lib.change_visibility(self.model, self.tree_view, @@ -1243,23 +1260,22 @@ class RepresentationWidget(QtWidgets.QWidget): for item in items: repre_ids.append(item["_id"]) - repre_docs = list(self.dbcon.find( - { - "type": "representation", - "_id": {"$in": repre_ids} - }, - { - "name": 1, - "parent": 1 - } + project_name = self.dbcon.active_project() + repre_docs = list(get_representations( + project_name, + representation_ids=repre_ids, + fields=["name", "parent"] )) + version_ids = [ repre_doc["parent"] for repre_doc in repre_docs ] - version_docs = self.dbcon.find({ - "_id": {"$in": version_ids} - }) + version_docs = get_versions( + project_name, + version_ids=version_ids, + hero=True + ) version_docs_by_id = {} version_docs_by_subset_id = collections.defaultdict(list) @@ -1269,15 +1285,10 @@ class RepresentationWidget(QtWidgets.QWidget): version_docs_by_id[version_id] = version_doc version_docs_by_subset_id[subset_id].append(version_doc) - subset_docs = list(self.dbcon.find( - { - "_id": {"$in": list(version_docs_by_subset_id.keys())}, - "type": "subset" - }, - { - "schema": 1, - "data.families": 1 - } + subset_docs = list(get_subsets( + project_name, + subset_ids=version_docs_by_subset_id.keys(), + fields=["schema", "data.families"] )) subset_docs_by_id = { subset_doc["_id"]: subset_doc @@ -1351,10 +1362,13 @@ class RepresentationWidget(QtWidgets.QWidget): selected_side = self._get_selected_side(point_index, rows) # Get all representation->loader combinations available for the # index under the cursor, so we can list the user the options. - available_loaders = discover_loader_plugins() + project_name = self.dbcon.active_project() + available_loaders = discover_loader_plugins(project_name) filtered_loaders = [] for loader in available_loaders: + if not loader.enabled: + continue # Skip subset loaders if issubclass(loader, SubsetLoaderPlugin): continue @@ -1446,13 +1460,12 @@ class RepresentationWidget(QtWidgets.QWidget): self._process_action(items, menu, point) def _process_action(self, items, menu, point): - """ - Show the context action menu and process selected + """Show the context action menu and process selected - Args: - items(dict): menu items - menu(OptionalMenu) - point(PointIndex) + Args: + items(dict): menu items + menu(OptionalMenu) + point(PointIndex) """ global_point = self.tree_view.mapToGlobal(point) action = menu.exec_(global_point) @@ -1467,22 +1480,22 @@ class RepresentationWidget(QtWidgets.QWidget): repre_ids = [] data_by_repre_id = {} selected_side = action_representation.get("selected_side") + site_name = "{}_site_name".format(selected_side) + is_sync_loader = tools_lib.is_sync_loader(loader) for item in items: - if tools_lib.is_sync_loader(loader): - site_name = "{}_site_name".format(selected_side) - data = { - "_id": item.get("_id"), - "site_name": item.get(site_name), - "project_name": self.dbcon.Session["AVALON_PROJECT"] - } + repre_id = item["_id"] + repre_ids.append(repre_id) + if not is_sync_loader: + continue - if not data["site_name"]: - continue + data_site_name = item.get(site_name) + if not data_site_name: + continue - data_by_repre_id[data["_id"]] = data - - repre_ids.append(item.get("_id")) + data_by_repre_id[repre_id] = { + "site_name": data_site_name + } repre_contexts = get_repres_contexts(repre_ids, self.dbcon) options = lib.get_options(action, loader, self, @@ -1564,16 +1577,22 @@ def _load_representations_by_loader(loader, repre_contexts, return for repre_context in repre_contexts.values(): + version_doc = repre_context["version"] + if version_doc["type"] == "hero_version": + version_name = "Hero" + else: + version_name = version_doc.get("name") try: if data_by_repre_id: - _id = repre_context["representation"]["_id"] - data = data_by_repre_id.get(_id) + repre_id = repre_context["representation"]["_id"] + data = data_by_repre_id.get(repre_id) options.update(data) load_with_repre_context( loader, repre_context, options=options ) + except IncompatibleLoaderError as exc: print(exc) error_info.append(( @@ -1581,20 +1600,23 @@ def _load_representations_by_loader(loader, repre_contexts, None, repre_context["representation"]["name"], repre_context["subset"]["name"], - repre_context["version"]["name"] + version_name )) except Exception as exc: - exc_type, exc_value, exc_traceback = sys.exc_info() - formatted_traceback = "".join(traceback.format_exception( - exc_type, exc_value, exc_traceback - )) + formatted_traceback = None + if not isinstance(exc, LoadError): + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join(traceback.format_exception( + exc_type, exc_value, exc_traceback + )) + error_info.append(( str(exc), formatted_traceback, repre_context["representation"]["name"], repre_context["subset"]["name"], - repre_context["version"]["name"] + version_name )) return error_info @@ -1613,7 +1635,7 @@ def _load_subsets_by_loader(loader, subset_contexts, options, error_info = [] if options is None: # not load when cancelled - return + return error_info if loader.is_multiple_contexts_compatible: subset_names = [] @@ -1628,13 +1650,14 @@ def _load_subsets_by_loader(loader, subset_contexts, options, subset_contexts, options=options ) + except Exception as exc: - exc_type, exc_value, exc_traceback = sys.exc_info() - formatted_traceback = "".join( - traceback.format_exception( + formatted_traceback = None + if not isinstance(exc, LoadError): + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join(traceback.format_exception( exc_type, exc_value, exc_traceback - ) - ) + )) error_info.append(( str(exc), formatted_traceback, @@ -1654,13 +1677,15 @@ def _load_subsets_by_loader(loader, subset_contexts, options, subset_context, options=options ) + except Exception as exc: - exc_type, exc_value, exc_traceback = sys.exc_info() - formatted_traceback = "\n".join( - traceback.format_exception( + formatted_traceback = None + if not isinstance(exc, LoadError): + exc_type, exc_value, exc_traceback = sys.exc_info() + formatted_traceback = "".join(traceback.format_exception( exc_type, exc_value, exc_traceback - ) - ) + )) + error_info.append(( str(exc), formatted_traceback, diff --git a/openpype/tools/mayalookassigner/app.py b/openpype/tools/mayalookassigner/app.py index 0e633a21e3..f9508657e5 100644 --- a/openpype/tools/mayalookassigner/app.py +++ b/openpype/tools/mayalookassigner/app.py @@ -2,10 +2,11 @@ import sys import time import logging -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore -from avalon import io +from openpype.client import get_last_version_by_subset_id from openpype import style +from openpype.pipeline import legacy_io from openpype.tools.utils.lib import qt_app_context from openpype.hosts.maya.api.lib import assign_look_by_version @@ -211,6 +212,7 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): selection = self.assign_selected.isChecked() asset_nodes = self.asset_outliner.get_nodes(selection=selection) + project_name = legacy_io.active_project() start = time.time() for i, (asset, item) in enumerate(asset_nodes.items()): @@ -222,19 +224,20 @@ class MayaLookAssignerWindow(QtWidgets.QWidget): assign_look = next((subset for subset in item["looks"] if subset["name"] in looks), None) if not assign_look: - self.echo("{} No matching selected " - "look for {}".format(prefix, asset)) + self.echo( + "{} No matching selected look for {}".format(prefix, asset) + ) continue # Get the latest version of this asset's look subset - version = io.find_one({"type": "version", - "parent": assign_look["_id"]}, - sort=[("name", -1)]) + version = get_last_version_by_subset_id( + project_name, assign_look["_id"], fields=["_id"] + ) subset_name = assign_look["name"] - self.echo("{} Assigning {} to {}\t".format(prefix, - subset_name, - asset)) + self.echo("{} Assigning {} to {}\t".format( + prefix, subset_name, asset + )) nodes = item["nodes"] if cmds.pluginInfo('vrayformaya', query=True, loaded=True): diff --git a/openpype/tools/mayalookassigner/commands.py b/openpype/tools/mayalookassigner/commands.py index 78fd51c7a3..2e7a51efde 100644 --- a/openpype/tools/mayalookassigner/commands.py +++ b/openpype/tools/mayalookassigner/commands.py @@ -2,12 +2,14 @@ from collections import defaultdict import logging import os -from bson.objectid import ObjectId import maya.cmds as cmds -from avalon import io, api - -from openpype.pipeline import remove_container +from openpype.client import get_asset_by_id +from openpype.pipeline import ( + legacy_io, + remove_container, + registered_host, +) from openpype.hosts.maya.api import lib from .vray_proxies import get_alembic_ids_cache @@ -79,7 +81,7 @@ def get_all_asset_nodes(): list: list of dictionaries """ - host = api.registered_host() + host = registered_host() nodes = [] for container in host.ls(): @@ -157,9 +159,9 @@ def create_items_from_nodes(nodes): log.warning("No id hashes") return asset_view_items + project_name = legacy_io.active_project() for _id, id_nodes in id_hashes.items(): - asset = io.find_one({"_id": ObjectId(_id)}, - projection={"name": True}) + asset = get_asset_by_id(project_name, _id, fields=["name"]) # Skip if asset id is not found if not asset: @@ -176,10 +178,12 @@ def create_items_from_nodes(nodes): namespace = get_namespace_from_node(node) namespaces.add(namespace) - asset_view_items.append({"label": asset["name"], - "asset": asset, - "looks": looks, - "namespaces": namespaces}) + asset_view_items.append({ + "label": asset["name"], + "asset": asset, + "looks": looks, + "namespaces": namespaces + }) return asset_view_items @@ -192,7 +196,7 @@ def remove_unused_looks(): """ - host = api.registered_host() + host = registered_host() unused = [] for container in host.ls(): diff --git a/openpype/tools/mayalookassigner/models.py b/openpype/tools/mayalookassigner/models.py index 77a3c8a590..ed6a68bee0 100644 --- a/openpype/tools/mayalookassigner/models.py +++ b/openpype/tools/mayalookassigner/models.py @@ -1,6 +1,6 @@ from collections import defaultdict -from Qt import QtCore +from qtpy import QtCore import qtawesome from openpype.tools.utils import models diff --git a/openpype/tools/mayalookassigner/views.py b/openpype/tools/mayalookassigner/views.py index 8e676ebc7f..489c194f60 100644 --- a/openpype/tools/mayalookassigner/views.py +++ b/openpype/tools/mayalookassigner/views.py @@ -1,4 +1,4 @@ -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore class View(QtWidgets.QTreeView): @@ -10,7 +10,7 @@ class View(QtWidgets.QTreeView): # view settings self.setAlternatingRowColors(False) self.setSortingEnabled(True) - self.setSelectionMode(self.ExtendedSelection) + self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) def get_indices(self): diff --git a/openpype/tools/mayalookassigner/vray_proxies.py b/openpype/tools/mayalookassigner/vray_proxies.py index 25621fc652..889396e555 100644 --- a/openpype/tools/mayalookassigner/vray_proxies.py +++ b/openpype/tools/mayalookassigner/vray_proxies.py @@ -6,18 +6,21 @@ import logging import json import six -from bson.objectid import ObjectId import alembic.Abc from maya import cmds -from avalon import io, api - +from openpype.client import ( + get_representation_by_name, + get_last_version_by_subset_name, +) from openpype.pipeline import ( + legacy_io, load_container, loaders_from_representation, discover_loader_plugins, get_representation_path, + registered_host, ) from openpype.hosts.maya.api import lib @@ -155,11 +158,12 @@ def get_look_relationships(version_id): Returns: dict: Dictionary of relations. - """ - json_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "json"}) + + project_name = legacy_io.active_project() + json_representation = get_representation_by_name( + project_name, representation_name="json", version_id=version_id + ) # Load relationships shader_relation = get_representation_path(json_representation) @@ -182,13 +186,15 @@ def load_look(version_id): list of shader nodes. """ + + project_name = legacy_io.active_project() # Get representations of shader file and relationships - look_representation = io.find_one({"type": "representation", - "parent": version_id, - "name": "ma"}) + look_representation = get_representation_by_name( + project_name, representation_name="ma", version_id=version_id + ) # See if representation is already loaded, if so reuse it. - host = api.registered_host() + host = registered_host() representation_id = str(look_representation['_id']) for container in host.ls(): if (container['loader'] == "LookLoader" and @@ -216,36 +222,6 @@ def load_look(version_id): return shader_nodes -def get_latest_version(asset_id, subset): - # type: (str, str) -> dict - """Get latest version of subset. - - Args: - asset_id (str): Asset ID - subset (str): Subset name. - - Returns: - Latest version - - Throws: - RuntimeError: When subset or version doesn't exist. - - """ - subset = io.find_one({"name": subset, - "parent": ObjectId(asset_id), - "type": "subset"}) - if not subset: - raise RuntimeError("Subset does not exist: %s" % subset) - - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) - if not version: - raise RuntimeError("Version does not exist.") - - return version - - def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): # type: (str, str) -> None """Assign look to vray proxy. @@ -271,13 +247,20 @@ def vrayproxy_assign_look(vrayproxy, subset="lookDefault"): asset_id = node_id.split(":", 1)[0] node_ids_by_asset_id[asset_id].add(node_id) + project_name = legacy_io.active_project() for asset_id, node_ids in node_ids_by_asset_id.items(): # Get latest look version - try: - version = get_latest_version(asset_id, subset=subset) - except RuntimeError as exc: - print(exc) + version = get_last_version_by_subset_name( + project_name, + subset_name=subset, + asset_id=asset_id, + fields=["_id"] + ) + if not version: + print("Didn't find last version for subset name {}".format( + subset + )) continue relationships = get_look_relationships(version["_id"]) diff --git a/openpype/tools/mayalookassigner/widgets.py b/openpype/tools/mayalookassigner/widgets.py index 10e573342a..f2df17e68c 100644 --- a/openpype/tools/mayalookassigner/widgets.py +++ b/openpype/tools/mayalookassigner/widgets.py @@ -1,7 +1,7 @@ import logging from collections import defaultdict -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore from openpype.tools.utils.models import TreeModel from openpype.tools.utils.lib import ( diff --git a/openpype/tools/project_manager/project_manager/__init__.py b/openpype/tools/project_manager/project_manager/__init__.py index 6e44afd841..ac4e3d5f39 100644 --- a/openpype/tools/project_manager/project_manager/__init__.py +++ b/openpype/tools/project_manager/project_manager/__init__.py @@ -44,7 +44,7 @@ from .window import ProjectManagerWindow def main(): import sys - from Qt import QtWidgets + from qtpy import QtWidgets app = QtWidgets.QApplication([]) diff --git a/openpype/tools/project_manager/project_manager/constants.py b/openpype/tools/project_manager/project_manager/constants.py index 7ca4aa9492..72512d797b 100644 --- a/openpype/tools/project_manager/project_manager/constants.py +++ b/openpype/tools/project_manager/project_manager/constants.py @@ -1,5 +1,5 @@ import re -from Qt import QtCore +from qtpy import QtCore # Item identifier (unique ID - uuid4 is used) diff --git a/openpype/tools/project_manager/project_manager/delegates.py b/openpype/tools/project_manager/project_manager/delegates.py index 31487ff132..79e9554b0f 100644 --- a/openpype/tools/project_manager/project_manager/delegates.py +++ b/openpype/tools/project_manager/project_manager/delegates.py @@ -1,4 +1,4 @@ -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore from .widgets import ( NameTextEdit, @@ -205,3 +205,9 @@ class ToolsDelegate(QtWidgets.QStyledItemDelegate): def setModelData(self, editor, model, index): model.setData(index, editor.value(), QtCore.Qt.EditRole) + + def displayText(self, value, locale): + if value: + return ", ".join(value) + else: + return diff --git a/openpype/tools/project_manager/project_manager/model.py b/openpype/tools/project_manager/project_manager/model.py index 1c3ec089f6..29a26f700f 100644 --- a/openpype/tools/project_manager/project_manager/model.py +++ b/openpype/tools/project_manager/project_manager/model.py @@ -5,7 +5,16 @@ from uuid import uuid4 from pymongo import UpdateOne, DeleteOne -from Qt import QtCore, QtGui +from qtpy import QtCore, QtGui + +from openpype.client import ( + get_projects, + get_project, + get_assets, + get_asset_ids_with_subsets, +) +from openpype.client.operations import CURRENT_ASSET_DOC_SCHEMA +from openpype.lib import Logger from .constants import ( IDENTIFIER_ROLE, @@ -18,8 +27,6 @@ from .constants import ( ) from .style import ResourceCache -from openpype.lib import CURRENT_DOC_SCHEMAS - class ProjectModel(QtGui.QStandardItemModel): """Load possible projects to modify from MongoDB. @@ -46,12 +53,8 @@ class ProjectModel(QtGui.QStandardItemModel): self._items_by_name[None] = none_project new_project_items.append(none_project) - project_docs = self.dbcon.projects( - projection={"name": 1}, - only_active=True - ) project_names = set() - for project_doc in project_docs: + for project_doc in get_projects(fields=["name"]): project_name = project_doc.get("name") if not project_name: continue @@ -185,6 +188,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): for key in self.multiselection_columns } + self._log = None # TODO Reset them on project change self._current_project = None self._root_item = None @@ -194,6 +198,12 @@ class HierarchyModel(QtCore.QAbstractItemModel): self._reset_root_item() + @property + def log(self): + if self._log is None: + self._log = Logger.get_logger("ProjectManagerModel") + return self._log + @property def items_by_id(self): return self._items_by_id @@ -245,10 +255,11 @@ class HierarchyModel(QtCore.QAbstractItemModel): return # Find project'd document - project_doc = self.dbcon.database[project_name].find_one( - {"type": "project"}, - ProjectItem.query_projection + project_doc = get_project( + project_name, + fields=list(ProjectItem.query_projection.keys()) ) + # Skip if project document does not exist # - this shouldn't happen using only UI elements if not project_doc: @@ -259,9 +270,8 @@ class HierarchyModel(QtCore.QAbstractItemModel): self.add_item(project_item) # Query all assets of the project - asset_docs = self.dbcon.database[project_name].find( - {"type": "asset"}, - AssetItem.query_projection + asset_docs = get_assets( + project_name, fields=AssetItem.query_projection.keys() ) asset_docs_by_id = { asset_doc["_id"]: asset_doc @@ -272,31 +282,16 @@ class HierarchyModel(QtCore.QAbstractItemModel): # if asset item can be modified (name and hierarchy change) # - the same must be applied to all it's parents asset_ids = list(asset_docs_by_id.keys()) - result = [] + asset_ids_with_subsets = [] if asset_ids: - result = self.dbcon.database[project_name].aggregate([ - { - "$match": { - "type": "subset", - "parent": {"$in": asset_ids} - } - }, - { - "$group": { - "_id": "$parent", - "count": {"$sum": 1} - } - } - ]) + asset_ids_with_subsets = get_asset_ids_with_subsets( + project_name, asset_ids=asset_ids + ) asset_modifiable = { - asset_id: True + asset_id: asset_id not in asset_ids_with_subsets for asset_id in asset_docs_by_id.keys() } - for item in result: - asset_id = item["_id"] - count = item["count"] - asset_modifiable[asset_id] = count < 1 # Store assets by their visual parent to be able create their hierarchy asset_docs_by_parent_id = collections.defaultdict(list) @@ -1367,6 +1362,9 @@ class HierarchyModel(QtCore.QAbstractItemModel): to_process = collections.deque() to_process.append(project_item) + created_count = 0 + updated_count = 0 + removed_count = 0 bulk_writes = [] while to_process: parent = to_process.popleft() @@ -1381,6 +1379,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): insert_list.append(item) elif item.data(REMOVED_ROLE): + removed_count += 1 if item.data(HIERARCHY_CHANGE_ABLE_ROLE): bulk_writes.append(DeleteOne( {"_id": item.asset_id} @@ -1394,6 +1393,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): else: update_data = item.update_data() if update_data: + updated_count += 1 bulk_writes.append(UpdateOne( {"_id": item.asset_id}, update_data @@ -1406,11 +1406,21 @@ class HierarchyModel(QtCore.QAbstractItemModel): result = project_col.insert_many(new_docs) for idx, mongo_id in enumerate(result.inserted_ids): + created_count += 1 insert_list[idx].mongo_id = mongo_id + if sum([created_count, updated_count, removed_count]) == 0: + self.log.info("Nothing has changed") + return + if bulk_writes: project_col.bulk_write(bulk_writes) + self.log.info(( + "Save finished." + " Created {} | Updated {} | Removed {} asset documents" + ).format(created_count, updated_count, removed_count)) + self.refresh_project() def copy_mime_data(self, indexes): @@ -1447,12 +1457,7 @@ class HierarchyModel(QtCore.QAbstractItemModel): mimedata.setData("application/copy_task", encoded_data) return mimedata - def paste_mime_data(self, index, mime_data): - if not index.isValid(): - return - - item_id = index.data(IDENTIFIER_ROLE) - item = self._items_by_id[item_id] + def _paste_mime_data(self, item, mime_data): if not isinstance(item, (AssetItem, TaskItem)): return @@ -1486,6 +1491,25 @@ class HierarchyModel(QtCore.QAbstractItemModel): task_item = TaskItem(task_data, True) self.add_item(task_item, parent) + def paste(self, indexes, mime_data): + + # Get the selected Assets uniquely + items = set() + for index in indexes: + if not index.isValid(): + return + item_id = index.data(IDENTIFIER_ROLE) + item = self._items_by_id[item_id] + + # Do not copy into the Task Item so get parent Asset instead + if isinstance(item, TaskItem): + item = item.parent() + + items.add(item) + + for item in items: + self._paste_mime_data(item, mime_data) + class BaseItem: """Base item for HierarchyModel. @@ -1819,12 +1843,16 @@ class AssetItem(BaseItem): } query_projection = { "_id": 1, - "data.tasks": 1, - "data.visualParent": 1, - "schema": 1, - "name": 1, + "schema": 1, "type": 1, + "parent": 1, + + "data.visualParent": 1, + "data.parents": 1, + + "data.tasks": 1, + "data.frameStart": 1, "data.frameEnd": 1, "data.fps": 1, @@ -1835,7 +1863,7 @@ class AssetItem(BaseItem): "data.clipIn": 1, "data.clipOut": 1, "data.pixelAspect": 1, - "data.tools_env": 1 + "data.tools_env": 1, } def __init__(self, asset_doc): @@ -1931,7 +1959,7 @@ class AssetItem(BaseItem): } schema_name = ( self._origin_asset_doc.get("schema") - or CURRENT_DOC_SCHEMAS["asset"] + or CURRENT_ASSET_DOC_SCHEMA ) doc = { diff --git a/openpype/tools/project_manager/project_manager/multiselection_combobox.py b/openpype/tools/project_manager/project_manager/multiselection_combobox.py index f776831298..4b5d468982 100644 --- a/openpype/tools/project_manager/project_manager/multiselection_combobox.py +++ b/openpype/tools/project_manager/project_manager/multiselection_combobox.py @@ -1,4 +1,6 @@ -from Qt import QtCore, QtWidgets +from qtpy import QtCore, QtWidgets + +from openpype.tools.utils.lib import checkstate_int_to_enum class ComboItemDelegate(QtWidgets.QStyledItemDelegate): @@ -87,7 +89,9 @@ class MultiSelectionComboBox(QtWidgets.QComboBox): return index_flags = current_index.flags() - state = current_index.data(QtCore.Qt.CheckStateRole) + state = checkstate_int_to_enum( + current_index.data(QtCore.Qt.CheckStateRole) + ) new_state = None if event.type() == QtCore.QEvent.MouseButtonRelease: @@ -184,7 +188,9 @@ class MultiSelectionComboBox(QtWidgets.QComboBox): def value(self): items = list() for idx in range(self.count()): - state = self.itemData(idx, role=QtCore.Qt.CheckStateRole) + state = checkstate_int_to_enum( + self.itemData(idx, role=QtCore.Qt.CheckStateRole) + ) if state == QtCore.Qt.Checked: items.append( self.itemData(idx, role=QtCore.Qt.UserRole) @@ -194,7 +200,9 @@ class MultiSelectionComboBox(QtWidgets.QComboBox): def checked_items_text(self): items = list() for idx in range(self.count()): - state = self.itemData(idx, role=QtCore.Qt.CheckStateRole) + state = checkstate_int_to_enum( + self.itemData(idx, role=QtCore.Qt.CheckStateRole) + ) if state == QtCore.Qt.Checked: items.append(self.itemText(idx)) return items diff --git a/openpype/tools/project_manager/project_manager/style.py b/openpype/tools/project_manager/project_manager/style.py index 4405d05960..6445bc341d 100644 --- a/openpype/tools/project_manager/project_manager/style.py +++ b/openpype/tools/project_manager/project_manager/style.py @@ -1,5 +1,5 @@ import os -from Qt import QtGui +from qtpy import QtGui import qtawesome from openpype.tools.utils import paint_image_with_color diff --git a/openpype/tools/project_manager/project_manager/view.py b/openpype/tools/project_manager/project_manager/view.py index 74f5a06b71..fa08943ea5 100644 --- a/openpype/tools/project_manager/project_manager/view.py +++ b/openpype/tools/project_manager/project_manager/view.py @@ -1,8 +1,9 @@ import collections from queue import Queue -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui +from openpype.client import get_project from .delegates import ( NumberDelegate, NameDelegate, @@ -27,7 +28,7 @@ class NameDef: class NumberDef: def __init__(self, minimum=None, maximum=None, decimals=None): self.minimum = 0 if minimum is None else minimum - self.maximum = 999999 if maximum is None else maximum + self.maximum = 999999999 if maximum is None else maximum self.decimals = 0 if decimals is None else decimals @@ -47,12 +48,8 @@ class ProjectDocCache: def set_project(self, project_name): self.project_doc = None - if not project_name: - return - - self.project_doc = self.dbcon.database[project_name].find_one( - {"type": "project"} - ) + if project_name: + self.project_doc = get_project(project_name) class ToolsCache: @@ -137,8 +134,9 @@ class HierarchyView(QtWidgets.QTreeView): main_delegate = QtWidgets.QStyledItemDelegate() self.setItemDelegate(main_delegate) self.setAlternatingRowColors(True) - self.setSelectionMode(HierarchyView.ExtendedSelection) + self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + self.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers) column_delegates = {} column_key_to_index = {} @@ -195,13 +193,13 @@ class HierarchyView(QtWidgets.QTreeView): for idx, width in widths_by_idx.items(): self.setColumnWidth(idx, width) - def set_project(self, project_name): + def set_project(self, project_name, force=False): # Trigger helpers first self._project_doc_cache.set_project(project_name) self._tools_cache.refresh() # Trigger update of model after all data for delegates are filled - self._source_model.set_project(project_name) + self._source_model.set_project(project_name, force) def _on_project_reset(self): self.header_init() @@ -301,16 +299,6 @@ class HierarchyView(QtWidgets.QTreeView): def rowsInserted(self, parent_index, start, end): super(HierarchyView, self).rowsInserted(parent_index, start, end) - for row in range(start, end + 1): - for key, column in self._column_key_to_index.items(): - if key not in self.persistent_columns: - continue - col_index = self._source_model.index(row, column, parent_index) - if bool( - self._source_model.flags(col_index) - & QtCore.Qt.ItemIsEditable - ): - self.openPersistentEditor(col_index) # Expand parent on insert if not self.isExpanded(parent_index): @@ -365,20 +353,24 @@ class HierarchyView(QtWidgets.QTreeView): event.accept() def _copy_items(self, indexes=None): + clipboard = QtWidgets.QApplication.clipboard() try: if indexes is None: indexes = self.selectedIndexes() mime_data = self._source_model.copy_mime_data(indexes) - QtWidgets.QApplication.clipboard().setMimeData(mime_data) + clipboard.setMimeData(mime_data) self._show_message("Tasks copied") except ValueError as exc: + # Change clipboard to contain empty data + empty_mime_data = QtCore.QMimeData() + clipboard.setMimeData(empty_mime_data) self._show_message(str(exc)) def _paste_items(self): - index = self.currentIndex() mime_data = QtWidgets.QApplication.clipboard().mimeData() - self._source_model.paste_mime_data(index, mime_data) + rows = self.selectionModel().selectedRows() + self._source_model.paste(rows, mime_data) def _delete_items(self, indexes=None): if indexes is None: @@ -386,7 +378,7 @@ class HierarchyView(QtWidgets.QTreeView): self._source_model.delete_indexes(indexes) def _on_ctrl_shift_enter_pressed(self): - self._add_task_and_edit() + self.add_task_and_edit() def add_asset(self, parent_index=None): if parent_index is None: @@ -428,9 +420,9 @@ class HierarchyView(QtWidgets.QTreeView): self.edit(new_index) def _add_task_action(self): - self._add_task_and_edit() + self.add_task_and_edit() - def _add_task_and_edit(self): + def add_task_and_edit(self): new_index = self.add_task() if new_index is None: return diff --git a/openpype/tools/project_manager/project_manager/widgets.py b/openpype/tools/project_manager/project_manager/widgets.py index 39ea833961..06ae06e4d2 100644 --- a/openpype/tools/project_manager/project_manager/widgets.py +++ b/openpype/tools/project_manager/project_manager/widgets.py @@ -1,22 +1,22 @@ import re +from openpype.client import get_projects, create_project from .constants import ( NAME_ALLOWED_SYMBOLS, NAME_REGEX ) -from openpype.lib import ( - create_project, +from openpype.client.operations import ( PROJECT_NAME_ALLOWED_SYMBOLS, - PROJECT_NAME_REGEX + PROJECT_NAME_REGEX, ) from openpype.style import load_stylesheet +from openpype.pipeline import AvalonMongoDB from openpype.tools.utils import ( PlaceholderLineEdit, get_warning_pixmap ) -from avalon.api import AvalonMongoDB -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui class NameTextEdit(QtWidgets.QLineEdit): @@ -265,22 +265,16 @@ class CreateProjectDialog(QtWidgets.QDialog): project_name = self.project_name_input.text() project_code = self.project_code_input.text() library_project = self.library_project_input.isChecked() - create_project(project_name, project_code, library_project, self.dbcon) + create_project(project_name, project_code, library_project) self.done(1) def _get_existing_projects(self): project_names = set() project_codes = set() - for project_name in self.dbcon.database.collection_names(): - # Each collection will have exactly one project document - project_doc = self.dbcon.database[project_name].find_one( - {"type": "project"}, - {"name": 1, "data.code": 1} - ) - if not project_doc: - continue - + for project_doc in get_projects( + inactive=True, fields=["name", "data.code"] + ): project_name = project_doc.get("name") if not project_name: continue diff --git a/openpype/tools/project_manager/project_manager/window.py b/openpype/tools/project_manager/project_manager/window.py index bdf32c7415..942bdaeec3 100644 --- a/openpype/tools/project_manager/project_manager/window.py +++ b/openpype/tools/project_manager/project_manager/window.py @@ -1,4 +1,11 @@ -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui + +from openpype import resources +from openpype.style import load_stylesheet +from openpype.widgets import PasswordDialog +from openpype.lib import is_admin_password_required, Logger +from openpype.pipeline import AvalonMongoDB +from openpype.pipeline.project_folders import create_project_folders from . import ( ProjectModel, @@ -13,17 +20,6 @@ from . import ( ) from .widgets import ConfirmProjectDeletion from .style import ResourceCache -from openpype.style import load_stylesheet -from openpype.lib import is_admin_password_required -from openpype.widgets import PasswordDialog - -from openpype import resources -from openpype.api import ( - get_project_basic_paths, - create_project_folders, - Logger -) -from avalon.api import AvalonMongoDB class ProjectManagerWindow(QtWidgets.QWidget): @@ -184,14 +180,14 @@ class ProjectManagerWindow(QtWidgets.QWidget): self.resize(1200, 600) self.setStyleSheet(load_stylesheet()) - def _set_project(self, project_name=None): + def _set_project(self, project_name=None, force=False): self._create_folders_btn.setEnabled(project_name is not None) self._remove_projects_btn.setEnabled(project_name is not None) self._add_asset_btn.setEnabled(project_name is not None) self._add_task_btn.setEnabled(project_name is not None) self._save_btn.setEnabled(project_name is not None) self._project_proxy_model.set_filter_default(project_name is not None) - self.hierarchy_view.set_project(project_name) + self.hierarchy_view.set_project(project_name, force) def _current_project(self): row = self._project_combobox.currentIndex() @@ -229,11 +225,11 @@ class ProjectManagerWindow(QtWidgets.QWidget): self._project_combobox.setCurrentIndex(row) selected_project = self._current_project() - self._set_project(selected_project) + self._set_project(selected_project, True) def _on_project_change(self): selected_project = self._current_project() - self._set_project(selected_project) + self._set_project(selected_project, False) def _on_project_refresh(self): self.refresh_projects() @@ -245,26 +241,23 @@ class ProjectManagerWindow(QtWidgets.QWidget): self.hierarchy_view.add_asset() def _on_add_task(self): - self.hierarchy_view.add_task() + self.hierarchy_view.add_task_and_edit() def _on_create_folders(self): project_name = self._current_project() if not project_name: return - qm = QtWidgets.QMessageBox - ans = qm.question(self, - "OpenPype Project Manager", - "Confirm to create starting project folders?", - qm.Yes | qm.No) - if ans == qm.Yes: + result = QtWidgets.QMessageBox.question( + self, + "OpenPype Project Manager", + "Confirm to create starting project folders?", + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No + ) + if result == QtWidgets.QMessageBox.Yes: try: - # Get paths based on presets - basic_paths = get_project_basic_paths(project_name) - if not basic_paths: - pass # Invoking OpenPype API to create the project folders - create_project_folders(basic_paths, project_name) + create_project_folders(project_name) except Exception as exc: self.log.warning( "Cannot create starting folders: {}".format(exc), diff --git a/openpype/tools/publisher/__init__.py b/openpype/tools/publisher/__init__.py index a7b597eece..e69de29bb2 100644 --- a/openpype/tools/publisher/__init__.py +++ b/openpype/tools/publisher/__init__.py @@ -1,7 +0,0 @@ -from .app import show -from .window import PublisherWindow - -__all__ = ( - "show", - "PublisherWindow" -) diff --git a/openpype/tools/publisher/constants.py b/openpype/tools/publisher/constants.py index dc44aade45..b2bfd7dd5c 100644 --- a/openpype/tools/publisher/constants.py +++ b/openpype/tools/publisher/constants.py @@ -1,8 +1,12 @@ -from Qt import QtCore +from qtpy import QtCore # ID of context item in instance view CONTEXT_ID = "context" CONTEXT_LABEL = "Options" +# Not showed anywhere - used as identifier +CONTEXT_GROUP = "__ContextGroup__" + +CONVERTOR_ITEM_GROUP = "Incompatible subsets" # Allowed symbols for subset name (and variant) # - characters, numbers, unsercore and dash @@ -16,7 +20,11 @@ INSTANCE_ID_ROLE = QtCore.Qt.UserRole + 1 SORT_VALUE_ROLE = QtCore.Qt.UserRole + 2 IS_GROUP_ROLE = QtCore.Qt.UserRole + 3 CREATOR_IDENTIFIER_ROLE = QtCore.Qt.UserRole + 4 -FAMILY_ROLE = QtCore.Qt.UserRole + 5 +CREATOR_THUMBNAIL_ENABLED_ROLE = QtCore.Qt.UserRole + 5 +FAMILY_ROLE = QtCore.Qt.UserRole + 6 +GROUP_ROLE = QtCore.Qt.UserRole + 7 +CONVERTER_IDENTIFIER_ROLE = QtCore.Qt.UserRole + 8 +CREATOR_SORT_ROLE = QtCore.Qt.UserRole + 9 __all__ = ( @@ -28,5 +36,9 @@ __all__ = ( "SORT_VALUE_ROLE", "IS_GROUP_ROLE", "CREATOR_IDENTIFIER_ROLE", - "FAMILY_ROLE" + "CREATOR_THUMBNAIL_ENABLED_ROLE", + "CREATOR_SORT_ROLE", + "FAMILY_ROLE", + "GROUP_ROLE", + "CONVERTER_IDENTIFIER_ROLE", ) diff --git a/openpype/tools/publisher/control.py b/openpype/tools/publisher/control.py index 6707feac9c..023a20ca5e 100644 --- a/openpype/tools/publisher/control.py +++ b/openpype/tools/publisher/control.py @@ -1,30 +1,56 @@ import os import copy -import inspect import logging import traceback import collections +import uuid +import tempfile +import shutil +from abc import ABCMeta, abstractmethod, abstractproperty -import weakref -try: - from weakref import WeakMethod -except Exception: - from openpype.lib.python_2_comp import WeakMethod - -import avalon.api +import six import pyblish.api -from openpype.pipeline import PublishValidationError -from openpype.pipeline.create import CreateContext - -from Qt import QtCore +from openpype.client import ( + get_assets, + get_asset_by_id, + get_subsets, +) +from openpype.lib.events import EventSystem +from openpype.lib.attribute_definitions import ( + serialize_attr_defs, + deserialize_attr_defs, +) +from openpype.pipeline import ( + PublishValidationError, + KnownPublishError, + registered_host, + legacy_io, + get_process_id, +) +from openpype.pipeline.create import ( + CreateContext, + AutoCreator, + HiddenCreator, + Creator, +) +from openpype.pipeline.create.context import ( + CreatorsOperationFailed, + ConvertorsOperationFailed, +) # Define constant for plugin orders offset PLUGIN_ORDER_OFFSET = 0.5 +class CardMessageTypes: + standard = None + error = "error" + + class MainThreadItem: """Callback with args and kwargs.""" + def __init__(self, callback, *args, **kwargs): self.callback = callback self.args = args @@ -34,64 +60,9 @@ class MainThreadItem: self.callback(*self.args, **self.kwargs) -class MainThreadProcess(QtCore.QObject): - """Qt based main thread process executor. - - Has timer which controls each 50ms if there is new item to process. - - This approach gives ability to update UI meanwhile plugin is in progress. - """ - - count_timeout = 2 - - def __init__(self): - super(MainThreadProcess, self).__init__() - self._items_to_process = collections.deque() - - timer = QtCore.QTimer() - timer.setInterval(0) - - timer.timeout.connect(self._execute) - - self._timer = timer - self._switch_counter = self.count_timeout - - def process(self, func, *args, **kwargs): - item = MainThreadItem(func, *args, **kwargs) - self.add_item(item) - - def add_item(self, item): - self._items_to_process.append(item) - - def _execute(self): - if not self._items_to_process: - return - - if self._switch_counter > 0: - self._switch_counter -= 1 - return - - self._switch_counter = self.count_timeout - - item = self._items_to_process.popleft() - item.process() - - def start(self): - if not self._timer.isActive(): - self._timer.start() - - def stop(self): - if self._timer.isActive(): - self._timer.stop() - - def clear(self): - if self._timer.isActive(): - self._timer.stop() - self._items_to_process = collections.deque() - - class AssetDocsCache: """Cache asset documents for creation part.""" + projection = { "_id": True, "name": True, @@ -102,46 +73,104 @@ class AssetDocsCache: def __init__(self, controller): self._controller = controller self._asset_docs = None + self._asset_docs_hierarchy = None self._task_names_by_asset_name = {} - - @property - def dbcon(self): - return self._controller.dbcon + self._asset_docs_by_name = {} + self._full_asset_docs_by_name = {} def reset(self): self._asset_docs = None + self._asset_docs_hierarchy = None self._task_names_by_asset_name = {} + self._asset_docs_by_name = {} + self._full_asset_docs_by_name = {} def _query(self): - if self._asset_docs is None: - asset_docs = list(self.dbcon.find( - {"type": "asset"}, - self.projection - )) - task_names_by_asset_name = {} - for asset_doc in asset_docs: - asset_name = asset_doc["name"] - asset_tasks = asset_doc.get("data", {}).get("tasks") or {} - task_names_by_asset_name[asset_name] = list(asset_tasks.keys()) - self._asset_docs = asset_docs - self._task_names_by_asset_name = task_names_by_asset_name + if self._asset_docs is not None: + return + + project_name = self._controller.project_name + asset_docs = list(get_assets( + project_name, fields=self.projection.keys() + )) + asset_docs_by_name = {} + task_names_by_asset_name = {} + for asset_doc in asset_docs: + if "data" not in asset_doc: + asset_doc["data"] = {"tasks": {}, "visualParent": None} + elif "tasks" not in asset_doc["data"]: + asset_doc["data"]["tasks"] = {} + + asset_name = asset_doc["name"] + asset_tasks = asset_doc["data"]["tasks"] + task_names_by_asset_name[asset_name] = list(asset_tasks.keys()) + asset_docs_by_name[asset_name] = asset_doc + + self._asset_docs = asset_docs + self._asset_docs_by_name = asset_docs_by_name + self._task_names_by_asset_name = task_names_by_asset_name def get_asset_docs(self): self._query() return copy.deepcopy(self._asset_docs) + def get_asset_hierarchy(self): + """Prepare asset documents into hierarchy. + + Convert ObjectId to string. Asset id is not used during whole + process of publisher but asset name is used rather. + + Returns: + Dict[Union[str, None]: Any]: Mapping of parent id to it's children. + Top level assets have parent id 'None'. + """ + + if self._asset_docs_hierarchy is None: + _queue = collections.deque(self.get_asset_docs()) + + output = collections.defaultdict(list) + while _queue: + asset_doc = _queue.popleft() + asset_doc["_id"] = str(asset_doc["_id"]) + parent_id = asset_doc["data"]["visualParent"] + if parent_id is not None: + parent_id = str(parent_id) + asset_doc["data"]["visualParent"] = parent_id + output[parent_id].append(asset_doc) + self._asset_docs_hierarchy = output + return copy.deepcopy(self._asset_docs_hierarchy) + def get_task_names_by_asset_name(self): self._query() return copy.deepcopy(self._task_names_by_asset_name) + def get_asset_by_name(self, asset_name): + self._query() + asset_doc = self._asset_docs_by_name.get(asset_name) + if asset_doc is None: + return None + return copy.deepcopy(asset_doc) + + def get_full_asset_by_name(self, asset_name): + self._query() + if asset_name not in self._full_asset_docs_by_name: + asset_doc = self._asset_docs_by_name.get(asset_name) + project_name = self._controller.project_name + full_asset_doc = get_asset_by_id(project_name, asset_doc["_id"]) + self._full_asset_docs_by_name[asset_name] = full_asset_doc + return copy.deepcopy(self._full_asset_docs_by_name[asset_name]) + class PublishReport: """Report for single publishing process. Report keeps current state of publishing and currently processed plugin. """ + def __init__(self, controller): self.controller = controller + self._create_discover_result = None + self._convert_discover_result = None self._publish_discover_result = None self._plugin_data = [] self._plugin_data_with_plugin = [] @@ -151,15 +180,24 @@ class PublishReport: self._all_instances_by_id = {} self._current_context = None - def reset(self, context, publish_discover_result=None): + def reset(self, context, create_context): """Reset report and clear all data.""" - self._publish_discover_result = publish_discover_result + + self._create_discover_result = create_context.creator_discover_result + self._convert_discover_result = ( + create_context.convertor_discover_result + ) + self._publish_discover_result = create_context.publish_discover_result self._plugin_data = [] self._plugin_data_with_plugin = [] self._current_plugin_data = {} self._all_instances_by_id = {} self._current_context = context + for plugin in create_context.publish_plugins_mismatch_targets: + plugin_data = self._add_plugin_data_item(plugin) + plugin_data["skipped"] = True + def add_plugin_iter(self, plugin, context): """Add report about single iteration of plugin.""" for instance in context: @@ -202,6 +240,7 @@ class PublishReport: "name": plugin.__name__, "label": label, "order": plugin.order, + "targets": list(plugin.targets), "instances_data": [], "actions_data": [], "skipped": False, @@ -214,13 +253,15 @@ class PublishReport: def add_result(self, result): """Handle result of one plugin and it's instance.""" + instance = result["instance"] instance_id = None if instance is not None: instance_id = instance.id self._current_plugin_data["instances_data"].append({ "id": instance_id, - "logs": self._extract_instance_log_items(result) + "logs": self._extract_instance_log_items(result), + "process_time": result["duration"] }) def add_action_result(self, action, result): @@ -258,9 +299,19 @@ class PublishReport: if plugin not in self._stored_plugins: plugins_data.append(self._create_plugin_data_item(plugin)) - crashed_file_paths = {} + reports = [] + if self._create_discover_result is not None: + reports.append(self._create_discover_result) + + if self._convert_discover_result is not None: + reports.append(self._convert_discover_result) + if self._publish_discover_result is not None: - items = self._publish_discover_result.crashed_file_paths.items() + reports.append(self._publish_discover_result) + + crashed_file_paths = {} + for report in reports: + items = report.crashed_file_paths.items() for filepath, exc_info in items: crashed_file_paths[filepath] = "".join( traceback.format_exception(*exc_info) @@ -270,12 +321,17 @@ class PublishReport: "plugins_data": plugins_data, "instances": instances_details, "context": self._extract_context_data(self._current_context), - "crashed_file_paths": crashed_file_paths + "crashed_file_paths": crashed_file_paths, + "id": str(uuid.uuid4()), + "report_version": "1.0.0" } def _extract_context_data(self, context): + context_label = "Context" + if context is not None: + context_label = context.data.get("label") return { - "label": context.data.get("label") + "label": context_label } def _extract_instance_data(self, instance, exists): @@ -342,44 +398,1223 @@ class PublishReport: return output -class PublisherController: +class PublishPluginsProxy: + """Wrapper around publish plugin. + + Prepare mapping for publish plugins and actions. Also can create + serializable data for plugin actions so UI don't have to have access to + them. + + This object is created in process where publishing is actually running. + + Notes: + Actions have id but single action can be used on multiple plugins so + to run an action is needed combination of plugin and action. + + Args: + plugins [List[pyblish.api.Plugin]]: Discovered plugins that will be + processed. + """ + + def __init__(self, plugins): + plugins_by_id = {} + actions_by_id = {} + action_ids_by_plugin_id = {} + for plugin in plugins: + plugin_id = plugin.id + plugins_by_id[plugin_id] = plugin + + action_ids = set() + action_ids_by_plugin_id[plugin_id] = action_ids + + actions = getattr(plugin, "actions", None) or [] + for action in actions: + action_id = action.id + action_ids.add(action_id) + actions_by_id[action_id] = action + + self._plugins_by_id = plugins_by_id + self._actions_by_id = actions_by_id + self._action_ids_by_plugin_id = action_ids_by_plugin_id + + def get_action(self, action_id): + return self._actions_by_id[action_id] + + def get_plugin(self, plugin_id): + return self._plugins_by_id[plugin_id] + + def get_plugin_id(self, plugin): + """Get id of plugin based on plugin object. + + It's used for validation errors report. + + Args: + plugin (pyblish.api.Plugin): Publish plugin for which id should be + returned. + + Returns: + str: Plugin id. + """ + + return plugin.id + + def get_plugin_action_items(self, plugin_id): + """Get plugin action items for plugin by it's id. + + Args: + plugin_id (str): Publish plugin id. + + Returns: + List[PublishPluginActionItem]: Items with information about publish + plugin actions. + """ + + return [ + self._create_action_item(self._actions_by_id[action_id], plugin_id) + for action_id in self._action_ids_by_plugin_id[plugin_id] + ] + + def _create_action_item(self, action, plugin_id): + label = action.label or action.__name__ + icon = getattr(action, "icon", None) + return PublishPluginActionItem( + action.id, + plugin_id, + action.active, + action.on, + label, + icon + ) + + +class PublishPluginActionItem: + """Representation of publish plugin action. + + Data driven object which is used as proxy for controller and UI. + + Args: + action_id (str): Action id. + plugin_id (str): Plugin id. + active (bool): Action is active. + on_filter (str): Actions have 'on' attribte which define when can be + action triggered (e.g. 'all', 'failed', ...). + label (str): Action's label. + icon (Union[str, None]) Action's icon. + """ + + def __init__(self, action_id, plugin_id, active, on_filter, label, icon): + self.action_id = action_id + self.plugin_id = plugin_id + self.active = active + self.on_filter = on_filter + self.label = label + self.icon = icon + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Union[str,bool,None]]: Serialized object. + """ + + return { + "action_id": self.action_id, + "plugin_id": self.plugin_id, + "active": self.active, + "on_filter": self.on_filter, + "label": self.label, + "icon": self.icon + } + + @classmethod + def from_data(cls, data): + """Create object from data. + + Args: + data (Dict[str, Union[str,bool,None]]): Data used to recreate + object. + + Returns: + PublishPluginActionItem: Object created using data. + """ + + return cls(**data) + + +class ValidationErrorItem: + """Data driven validation error item. + + Prepared data container with information about validation error and it's + source plugin. + + Can be converted to raw data and recreated should be used for controller + and UI connection. + + Args: + instance_id (str): Id of pyblish instance to which is validation error + connected. + instance_label (str): Prepared instance label. + plugin_id (str): Id of pyblish Plugin which triggered the validation + error. Id is generated using 'PublishPluginsProxy'. + """ + + def __init__( + self, + instance_id, + instance_label, + plugin_id, + context_validation, + title, + description, + detail, + ): + self.instance_id = instance_id + self.instance_label = instance_label + self.plugin_id = plugin_id + self.context_validation = context_validation + self.title = title + self.description = description + self.detail = detail + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Union[str, bool, None]]: Serialized object data. + """ + + return { + "instance_id": self.instance_id, + "instance_label": self.instance_label, + "plugin_id": self.plugin_id, + "context_validation": self.context_validation, + "title": self.title, + "description": self.description, + "detail": self.detail, + } + + @classmethod + def from_result(cls, plugin_id, error, instance): + """Create new object based on resukt from controller. + + Returns: + ValidationErrorItem: New object with filled data. + """ + + instance_label = None + instance_id = None + if instance is not None: + instance_label = ( + instance.data.get("label") or instance.data.get("name") + ) + instance_id = instance.id + + return cls( + instance_id, + instance_label, + plugin_id, + instance is None, + error.title, + error.description, + error.detail, + ) + + @classmethod + def from_data(cls, data): + return cls(**data) + + +class PublishValidationErrorsReport: + """Publish validation errors report that can be parsed to raw data. + + Args: + error_items (List[ValidationErrorItem]): List of validation errors. + plugin_action_items (Dict[str, PublishPluginActionItem]): Action items + by plugin id. + """ + + def __init__(self, error_items, plugin_action_items): + self._error_items = error_items + self._plugin_action_items = plugin_action_items + + def __iter__(self): + for item in self._error_items: + yield item + + def group_items_by_title(self): + """Group errors by plugin and their titles. + + Items are grouped by plugin and title -> same title from different + plugin is different item. Items are ordered by plugin order. + + Returns: + List[Dict[str, Any]]: List where each item title, instance + information related to title and possible plugin actions. + """ + + ordered_plugin_ids = [] + error_items_by_plugin_id = collections.defaultdict(list) + for error_item in self._error_items: + plugin_id = error_item.plugin_id + if plugin_id not in ordered_plugin_ids: + ordered_plugin_ids.append(plugin_id) + error_items_by_plugin_id[plugin_id].append(error_item) + + grouped_error_items = [] + for plugin_id in ordered_plugin_ids: + plugin_action_items = self._plugin_action_items[plugin_id] + error_items = error_items_by_plugin_id[plugin_id] + + titles = [] + error_items_by_title = collections.defaultdict(list) + for error_item in error_items: + title = error_item.title + if title not in titles: + titles.append(error_item.title) + error_items_by_title[title].append(error_item) + + for title in titles: + grouped_error_items.append({ + "plugin_action_items": list(plugin_action_items), + "error_items": error_items_by_title[title], + "title": title + }) + return grouped_error_items + + def to_data(self): + """Serialize object to dictionary. + + Returns: + Dict[str, Any]: Serialized data. + """ + + error_items = [ + item.to_data() + for item in self._error_items + ] + + plugin_action_items = { + plugin_id: [ + action_item.to_data() + for action_item in action_items + ] + for plugin_id, action_items in self._plugin_action_items.items() + } + + return { + "error_items": error_items, + "plugin_action_items": plugin_action_items + } + + @classmethod + def from_data(cls, data): + """Recreate object from data. + + Args: + data (dict[str, Any]): Data to recreate object. Can be created + using 'to_data' method. + + Returns: + PublishValidationErrorsReport: New object based on data. + """ + + error_items = [ + ValidationErrorItem.from_data(error_item) + for error_item in data["error_items"] + ] + plugin_action_items = [ + PublishPluginActionItem.from_data(action_item) + for action_item in data["plugin_action_items"] + ] + return cls(error_items, plugin_action_items) + + +class PublishValidationErrors: + """Object to keep track about validation errors by plugin.""" + + def __init__(self): + self._plugins_proxy = None + self._error_items = [] + self._plugin_action_items = {} + + def __bool__(self): + return self.has_errors + + @property + def has_errors(self): + """At least one error was added.""" + + return bool(self._error_items) + + def reset(self, plugins_proxy): + """Reset object to default state. + + Args: + plugins_proxy (PublishPluginsProxy): Proxy which store plugins, + actions by ids and create mapping of action ids by plugin ids. + """ + + self._plugins_proxy = plugins_proxy + self._error_items = [] + self._plugin_action_items = {} + + def create_report(self): + """Create report based on currently existing errors. + + Returns: + PublishValidationErrorsReport: Validation error report with all + error information and publish plugin action items. + """ + + return PublishValidationErrorsReport( + self._error_items, self._plugin_action_items + ) + + def add_error(self, plugin, error, instance): + """Add error from pyblish result. + + Args: + plugin (pyblish.api.Plugin): Plugin which triggered error. + error (ValidationException): Validation error. + instance (Union[pyblish.api.Instance, None]): Instance on which was + error raised or None if was raised on context. + """ + + # Make sure the cached report is cleared + plugin_id = self._plugins_proxy.get_plugin_id(plugin) + self._error_items.append( + ValidationErrorItem.from_result(plugin_id, error, instance) + ) + if plugin_id in self._plugin_action_items: + return + + plugin_actions = self._plugins_proxy.get_plugin_action_items( + plugin_id + ) + self._plugin_action_items[plugin_id] = plugin_actions + + +class CreatorType: + def __init__(self, name): + self.name = name + + def __str__(self): + return self.name + + def __eq__(self, other): + return self.name == str(other) + + def __ne__(self, other): + # This is implemented only because of Python 2 + return not self == other + + +class CreatorTypes: + base = CreatorType("base") + auto = CreatorType("auto") + hidden = CreatorType("hidden") + artist = CreatorType("artist") + + @classmethod + def from_str(cls, value): + for creator_type in ( + cls.base, + cls.auto, + cls.hidden, + cls.artist + ): + if value == creator_type: + return creator_type + raise ValueError("Unknown type \"{}\"".format(str(value))) + + +class CreatorItem: + """Wrapper around Creator plugin. + + Object can be serialized and recreated. + """ + + def __init__( + self, + identifier, + creator_type, + family, + label, + group_label, + icon, + description, + detailed_description, + default_variant, + default_variants, + create_allow_context_change, + create_allow_thumbnail, + show_order, + pre_create_attributes_defs, + ): + self.identifier = identifier + self.creator_type = creator_type + self.family = family + self.label = label + self.group_label = group_label + self.icon = icon + self.description = description + self.detailed_description = detailed_description + self.default_variant = default_variant + self.default_variants = default_variants + self.create_allow_context_change = create_allow_context_change + self.create_allow_thumbnail = create_allow_thumbnail + self.show_order = show_order + self.pre_create_attributes_defs = pre_create_attributes_defs + + def get_group_label(self): + return self.group_label + + @classmethod + def from_creator(cls, creator): + if isinstance(creator, AutoCreator): + creator_type = CreatorTypes.auto + elif isinstance(creator, HiddenCreator): + creator_type = CreatorTypes.hidden + elif isinstance(creator, Creator): + creator_type = CreatorTypes.artist + else: + creator_type = CreatorTypes.base + + description = None + detail_description = None + default_variant = None + default_variants = None + pre_create_attr_defs = None + create_allow_context_change = None + create_allow_thumbnail = None + show_order = creator.order + if creator_type is CreatorTypes.artist: + description = creator.get_description() + detail_description = creator.get_detail_description() + default_variant = creator.get_default_variant() + default_variants = creator.get_default_variants() + pre_create_attr_defs = creator.get_pre_create_attr_defs() + create_allow_context_change = creator.create_allow_context_change + create_allow_thumbnail = creator.create_allow_thumbnail + show_order = creator.show_order + + identifier = creator.identifier + return cls( + identifier, + creator_type, + creator.family, + creator.label or identifier, + creator.get_group_label(), + creator.get_icon(), + description, + detail_description, + default_variant, + default_variants, + create_allow_context_change, + create_allow_thumbnail, + show_order, + pre_create_attr_defs, + ) + + def to_data(self): + pre_create_attributes_defs = None + if self.pre_create_attributes_defs is not None: + pre_create_attributes_defs = serialize_attr_defs( + self.pre_create_attributes_defs + ) + + return { + "identifier": self.identifier, + "creator_type": str(self.creator_type), + "family": self.family, + "label": self.label, + "group_label": self.group_label, + "icon": self.icon, + "description": self.description, + "detailed_description": self.detailed_description, + "default_variant": self.default_variant, + "default_variants": self.default_variants, + "create_allow_context_change": self.create_allow_context_change, + "create_allow_thumbnail": self.create_allow_thumbnail, + "show_order": self.show_order, + "pre_create_attributes_defs": pre_create_attributes_defs, + } + + @classmethod + def from_data(cls, data): + pre_create_attributes_defs = data["pre_create_attributes_defs"] + if pre_create_attributes_defs is not None: + data["pre_create_attributes_defs"] = deserialize_attr_defs( + pre_create_attributes_defs + ) + + data["creator_type"] = CreatorTypes.from_str(data["creator_type"]) + return cls(**data) + + +@six.add_metaclass(ABCMeta) +class AbstractPublisherController(object): + """Publisher tool controller. + + Define what must be implemented to be able use Publisher functionality. + + Goal is to have "data driven" controller that can be used to control UI + running in different process. That lead to some disadvantages like UI can't + access objects directly but by using wrappers that can be serialized. + """ + + @abstractproperty + def log(self): + """Controller's logger object. + + Returns: + logging.Logger: Logger object that can be used for logging. + """ + + pass + + @abstractproperty + def event_system(self): + """Inner event system for publisher controller.""" + + pass + + @abstractproperty + def project_name(self): + """Current context project name. + + Returns: + str: Name of project. + """ + + pass + + @abstractproperty + def current_asset_name(self): + """Current context asset name. + + Returns: + Union[str, None]: Name of asset. + """ + + pass + + @abstractproperty + def current_task_name(self): + """Current context task name. + + Returns: + Union[str, None]: Name of task. + """ + + pass + + @abstractproperty + def host_is_valid(self): + """Host is valid for creation part. + + Host must have implemented certain functionality to be able create + in Publisher tool. + + Returns: + bool: Host can handle creation of instances. + """ + + pass + + @abstractproperty + def instances(self): + """Collected/created instances. + + Returns: + List[CreatedInstance]: List of created instances. + """ + + pass + + @abstractmethod + def get_context_title(self): + """Get context title for artist shown at the top of main window. + + Returns: + Union[str, None]: Context title for window or None. In case of None + a warning is displayed (not nice for artists). + """ + + pass + + @abstractmethod + def get_asset_docs(self): + pass + + @abstractmethod + def get_asset_hierarchy(self): + pass + + @abstractmethod + def get_task_names_by_asset_names(self, asset_names): + pass + + @abstractmethod + def get_existing_subset_names(self, asset_name): + pass + + @abstractmethod + def reset(self): + """Reset whole controller. + + This should reset create context, publish context and all variables + that are related to it. + """ + + pass + + @abstractmethod + def get_creator_attribute_definitions(self, instances): + pass + + @abstractmethod + def get_publish_attribute_definitions(self, instances, include_context): + pass + + @abstractmethod + def get_creator_icon(self, identifier): + """Receive creator's icon by identifier. + + Args: + identifier (str): Creator's identifier. + + Returns: + Union[str, None]: Creator's icon string. + """ + + pass + + @abstractmethod + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + pass + + @abstractmethod + def create( + self, creator_identifier, subset_name, instance_data, options + ): + """Trigger creation by creator identifier. + + Should also trigger refresh of instanes. + + Args: + creator_identifier (str): Identifier of Creator plugin. + subset_name (str): Calculated subset name. + instance_data (Dict[str, Any]): Base instance data with variant, + asset name and task name. + options (Dict[str, Any]): Data from pre-create attributes. + """ + + pass + + @abstractmethod + def save_changes(self): + """Save changes in create context.""" + + pass + + @abstractmethod + def remove_instances(self, instance_ids): + """Remove list of instances from create context.""" + # TODO expect instance ids + + pass + + @abstractproperty + def publish_has_finished(self): + """Has publishing finished. + + Returns: + bool: If publishing finished and all plugins were iterated. + """ + + pass + + @abstractproperty + def publish_is_running(self): + """Publishing is running right now. + + Returns: + bool: If publishing is in progress. + """ + + pass + + @abstractproperty + def publish_has_validated(self): + """Publish validation passed. + + Returns: + bool: If publishing passed last possible validation order. + """ + + pass + + @abstractproperty + def publish_has_crashed(self): + """Publishing crashed for any reason. + + Returns: + bool: Publishing crashed. + """ + + pass + + @abstractproperty + def publish_has_validation_errors(self): + """During validation happened at least one validation error. + + Returns: + bool: Validation error was raised during validation. + """ + + pass + + @abstractproperty + def publish_max_progress(self): + """Get maximum possible progress number. + + Returns: + int: Number that can be used as 100% of publish progress bar. + """ + + pass + + @abstractproperty + def publish_progress(self): + """Current progress number. + + Returns: + int: Current progress value from 0 to 'publish_max_progress'. + """ + + pass + + @abstractproperty + def publish_error_msg(self): + """Current error message which cause fail of publishing. + + Returns: + Union[str, None]: Message which will be showed to artist or + None. + """ + + pass + + @abstractmethod + def get_publish_report(self): + pass + + @abstractmethod + def get_validation_errors(self): + pass + + @abstractmethod + def publish(self): + """Trigger publishing without any order limitations.""" + + pass + + @abstractmethod + def validate(self): + """Trigger publishing which will stop after validation order.""" + + pass + + @abstractmethod + def stop_publish(self): + """Stop publishing can be also used to pause publishing. + + Pause of publishing is possible only if all plugins successfully + finished. + """ + + pass + + @abstractmethod + def run_action(self, plugin_id, action_id): + """Trigger pyblish action on a plugin. + + Args: + plugin_id (str): Id of publish plugin. + action_id (str): Id of publish action. + """ + + pass + + @abstractproperty + def convertor_items(self): + pass + + @abstractmethod + def trigger_convertor_items(self, convertor_identifiers): + pass + + @abstractmethod + def get_thumbnail_paths_for_instances(self, instance_ids): + pass + + @abstractmethod + def set_thumbnail_paths_for_instances(self, thumbnail_path_mapping): + pass + + @abstractmethod + def set_comment(self, comment): + """Set comment on pyblish context. + + Set "comment" key on current pyblish.api.Context data. + + Args: + comment (str): Artist's comment. + """ + + pass + + @abstractmethod + def emit_card_message( + self, message, message_type=CardMessageTypes.standard + ): + """Emit a card message which can have a lifetime. + + This is for UI purposes. Method can be extended to more arguments + in future e.g. different message timeout or type (color). + + Args: + message (str): Message that will be showed. + """ + + pass + + @abstractmethod + def get_thumbnail_temp_dir_path(self): + """Return path to directory where thumbnails can be temporary stored. + + Returns: + str: Path to a directory. + """ + + pass + + @abstractmethod + def clear_thumbnail_temp_dir_path(self): + """Remove content of thumbnail temp directory.""" + + pass + + +class BasePublisherController(AbstractPublisherController): + """Implement common logic for controllers. + + Implement event system, logger and common attributes. Attributes are + triggering value changes so anyone can listen to their topics. + + Prepare implementation for creator items. Controller must implement just + their filling by '_collect_creator_items'. + + All prepared implementation is based on calling super '__init__'. + """ + + def __init__(self): + self._log = None + self._event_system = None + + # Host is valid for creation + self._host_is_valid = False + + # Any other exception that happened during publishing + self._publish_error_msg = None + # Publishing is in progress + self._publish_is_running = False + # Publishing is over validation order + self._publish_has_validated = False + + self._publish_has_validation_errors = False + self._publish_has_crashed = False + # All publish plugins are processed + self._publish_has_finished = False + self._publish_max_progress = 0 + self._publish_progress = 0 + + # Controller must '_collect_creator_items' to fill the value + self._creator_items = None + + @property + def log(self): + """Controller's logger object. + + Returns: + logging.Logger: Logger object that can be used for logging. + """ + + if self._log is None: + self._log = logging.getLogget(self.__class__.__name__) + return self._log + + @property + def event_system(self): + """Inner event system for publisher controller. + + Is used for communication with UI. Event system is autocreated. + + Known topics: + "show.detailed.help" - Detailed help requested (UI related). + "show.card.message" - Show card message request (UI related). + "instances.refresh.finished" - Instances are refreshed. + "plugins.refresh.finished" - Plugins refreshed. + "publish.reset.finished" - Publish context reset finished. + "controller.reset.finished" - Controller reset finished. + "publish.process.started" - Publishing started. Can be started from + paused state. + "publish.process.stopped" - Publishing stopped/paused process. + "publish.process.plugin.changed" - Plugin state has changed. + "publish.process.instance.changed" - Instance state has changed. + "publish.has_validated.changed" - Attr 'publish_has_validated' + changed. + "publish.is_running.changed" - Attr 'publish_is_running' changed. + "publish.has_crashed.changed" - Attr 'publish_has_crashed' changed. + "publish.publish_error.changed" - Attr 'publish_error' + "publish.has_validation_errors.changed" - Attr + 'has_validation_errors' changed. + "publish.max_progress.changed" - Attr 'publish_max_progress' + changed. + "publish.progress.changed" - Attr 'publish_progress' changed. + "publish.host_is_valid.changed" - Attr 'host_is_valid' changed. + "publish.finished.changed" - Attr 'publish_has_finished' changed. + + Returns: + EventSystem: Event system which can trigger callbacks for topics. + """ + + if self._event_system is None: + self._event_system = EventSystem() + return self._event_system + + def _emit_event(self, topic, data=None): + if data is None: + data = {} + self.event_system.emit(topic, data, "controller") + + def _get_host_is_valid(self): + return self._host_is_valid + + def _set_host_is_valid(self, value): + if self._host_is_valid != value: + self._host_is_valid = value + self._emit_event("publish.host_is_valid.changed", {"value": value}) + + def _get_publish_has_finished(self): + return self._publish_has_finished + + def _set_publish_has_finished(self, value): + if self._publish_has_finished != value: + self._publish_has_finished = value + self._emit_event("publish.finished.changed", {"value": value}) + + def _get_publish_is_running(self): + return self._publish_is_running + + def _set_publish_is_running(self, value): + if self._publish_is_running != value: + self._publish_is_running = value + self._emit_event("publish.is_running.changed", {"value": value}) + + def _get_publish_has_validated(self): + return self._publish_has_validated + + def _set_publish_has_validated(self, value): + if self._publish_has_validated != value: + self._publish_has_validated = value + self._emit_event("publish.has_validated.changed", {"value": value}) + + def _get_publish_has_crashed(self): + return self._publish_has_crashed + + def _set_publish_has_crashed(self, value): + if self._publish_has_crashed != value: + self._publish_has_crashed = value + self._emit_event("publish.has_crashed.changed", {"value": value}) + + def _get_publish_has_validation_errors(self): + return self._publish_has_validation_errors + + def _set_publish_has_validation_errors(self, value): + if self._publish_has_validation_errors != value: + self._publish_has_validation_errors = value + self._emit_event( + "publish.has_validation_errors.changed", + {"value": value} + ) + + def _get_publish_max_progress(self): + return self._publish_max_progress + + def _set_publish_max_progress(self, value): + if self._publish_max_progress != value: + self._publish_max_progress = value + self._emit_event("publish.max_progress.changed", {"value": value}) + + def _get_publish_progress(self): + return self._publish_progress + + def _set_publish_progress(self, value): + if self._publish_progress != value: + self._publish_progress = value + self._emit_event("publish.progress.changed", {"value": value}) + + def _get_publish_error_msg(self): + return self._publish_error_msg + + def _set_publish_error_msg(self, value): + if self._publish_error_msg != value: + self._publish_error_msg = value + self._emit_event("publish.publish_error.changed", {"value": value}) + + host_is_valid = property( + _get_host_is_valid, _set_host_is_valid + ) + publish_has_finished = property( + _get_publish_has_finished, _set_publish_has_finished + ) + publish_is_running = property( + _get_publish_is_running, _set_publish_is_running + ) + publish_has_validated = property( + _get_publish_has_validated, _set_publish_has_validated + ) + publish_has_crashed = property( + _get_publish_has_crashed, _set_publish_has_crashed + ) + publish_has_validation_errors = property( + _get_publish_has_validation_errors, _set_publish_has_validation_errors + ) + publish_max_progress = property( + _get_publish_max_progress, _set_publish_max_progress + ) + publish_progress = property( + _get_publish_progress, _set_publish_progress + ) + publish_error_msg = property( + _get_publish_error_msg, _set_publish_error_msg + ) + + def _reset_attributes(self): + """Reset most of attributes that can be reset.""" + + self.publish_is_running = False + self.publish_has_validated = False + self.publish_has_crashed = False + self.publish_has_validation_errors = False + self.publish_has_finished = False + + self.publish_error_msg = None + self.publish_progress = 0 + + @property + def creator_items(self): + """Creators that can be shown in create dialog.""" + if self._creator_items is None: + self._creator_items = self._collect_creator_items() + return self._creator_items + + @abstractmethod + def _collect_creator_items(self): + """Receive CreatorItems to work with. + + Returns: + Dict[str, CreatorItem]: Creator items by their identifier. + """ + + pass + + def get_creator_icon(self, identifier): + """Function to receive icon for creator identifier. + + Args: + str: Creator's identifier for which should be icon returned. + """ + + creator_item = self.creator_items.get(identifier) + if creator_item is not None: + return creator_item.icon + return None + + def get_thumbnail_temp_dir_path(self): + """Return path to directory where thumbnails can be temporary stored. + + Returns: + str: Path to a directory. + """ + + return os.path.join( + tempfile.gettempdir(), + "publisher_thumbnails", + get_process_id() + ) + + def clear_thumbnail_temp_dir_path(self): + """Remove content of thumbnail temp directory.""" + + dirpath = self.get_thumbnail_temp_dir_path() + if os.path.exists(dirpath): + shutil.rmtree(dirpath) + + +class PublisherController(BasePublisherController): """Middleware between UI, CreateContext and publish Context. Handle both creation and publishing parts. Args: - dbcon (AvalonMongoDB): Connection to mongo with context. headless (bool): Headless publishing. ATM not implemented or used. """ - def __init__(self, dbcon=None, headless=False): - self.log = logging.getLogger("PublisherController") - self.host = avalon.api.registered_host() - self.headless = headless - self.create_context = CreateContext( - self.host, dbcon, headless=headless, reset=False + _log = None + + def __init__(self, headless=False): + super(PublisherController, self).__init__() + + self._host = registered_host() + self._headless = headless + + self._create_context = CreateContext( + self._host, headless=headless, reset=False ) + self._publish_plugins_proxy = None + # pyblish.api.Context self._publish_context = None # Pyblish report self._publish_report = PublishReport(self) # Store exceptions of validation error - self._publish_validation_errors = [] - # Currently processing plugin errors - self._publish_current_plugin_validation_errors = None - # Any other exception that happened during publishing - self._publish_error = None - # Publishing is in progress - self._publish_is_running = False - # Publishing is over validation order - self._publish_validated = False + self._publish_validation_errors = PublishValidationErrors() + # Publishing should stop at validation stage self._publish_up_validation = False - # All publish plugins are processed - self._publish_finished = False - self._publish_max_progress = 0 - self._publish_progress = 0 # This information is not much important for controller but for widget # which can change (and set) the comment. self._publish_comment_is_set = False @@ -391,23 +1626,9 @@ class PublisherController: pyblish.api.ValidatorOrder + PLUGIN_ORDER_OFFSET ) - # Qt based main thread processor - self._main_thread_processor = MainThreadProcess() # Plugin iterator self._main_thread_iter = None - # Variables where callbacks are stored - self._instances_refresh_callback_refs = set() - self._plugins_refresh_callback_refs = set() - - self._publish_reset_callback_refs = set() - self._publish_started_callback_refs = set() - self._publish_validated_callback_refs = set() - self._publish_stopped_callback_refs = set() - - self._publish_instance_changed_callback_refs = set() - self._publish_plugin_changed_callback_refs = set() - # State flags to prevent executing method which is already in progress self._resetting_plugins = False self._resetting_instances = False @@ -417,105 +1638,74 @@ class PublisherController: @property def project_name(self): - """Current project context.""" - return self.dbcon.Session["AVALON_PROJECT"] + """Current project context defined by host. + + Returns: + str: Project name. + """ + + if not hasattr(self._host, "get_current_context"): + return legacy_io.active_project() + + return self._host.get_current_context()["project_name"] @property - def dbcon(self): - """Pointer to AvalonMongoDB in creator context.""" - return self.create_context.dbcon + def current_asset_name(self): + """Current context asset name defined by host. + + Returns: + Union[str, None]: Asset name or None if asset is not set. + """ + + if not hasattr(self._host, "get_current_context"): + return legacy_io.Session["AVALON_ASSET"] + + return self._host.get_current_context()["asset_name"] + + @property + def current_task_name(self): + """Current context task name defined by host. + + Returns: + Union[str, None]: Task name or None if task is not set. + """ + + if not hasattr(self._host, "get_current_context"): + return legacy_io.Session["AVALON_TASK"] + + return self._host.get_current_context()["task_name"] @property def instances(self): """Current instances in create context.""" - return self.create_context.instances + return self._create_context.instances_by_id @property - def creators(self): + def convertor_items(self): + return self._create_context.convertor_items_by_id + + @property + def _creators(self): """All creators loaded in create context.""" - return self.create_context.creators + + return self._create_context.creators @property - def manual_creators(self): - """Creators that can be shown in create dialog.""" - return self.create_context.manual_creators - - @property - def host_is_valid(self): - """Host is valid for creation.""" - return self.create_context.host_is_valid - - @property - def publish_plugins(self): + def _publish_plugins(self): """Publish plugins.""" - return self.create_context.publish_plugins - - @property - def plugins_with_defs(self): - """Publish plugins with possible attribute definitions.""" - return self.create_context.plugins_with_defs - - def _create_reference(self, callback): - if inspect.ismethod(callback): - ref = WeakMethod(callback) - elif callable(callback): - ref = weakref.ref(callback) - else: - raise TypeError("Expected function or method got {}".format( - str(type(callback)) - )) - return ref - - def add_instances_refresh_callback(self, callback): - """Callbacks triggered on instances refresh.""" - ref = self._create_reference(callback) - self._instances_refresh_callback_refs.add(ref) - - def add_plugins_refresh_callback(self, callback): - """Callbacks triggered on plugins refresh.""" - ref = self._create_reference(callback) - self._plugins_refresh_callback_refs.add(ref) + return self._create_context.publish_plugins # --- Publish specific callbacks --- - def add_publish_reset_callback(self, callback): - """Callbacks triggered on publishing reset.""" - ref = self._create_reference(callback) - self._publish_reset_callback_refs.add(ref) - - def add_publish_started_callback(self, callback): - """Callbacks triggered on publishing start.""" - ref = self._create_reference(callback) - self._publish_started_callback_refs.add(ref) - - def add_publish_validated_callback(self, callback): - """Callbacks triggered on passing last possible validation order.""" - ref = self._create_reference(callback) - self._publish_validated_callback_refs.add(ref) - - def add_instance_change_callback(self, callback): - """Callbacks triggered before next publish instance process.""" - ref = self._create_reference(callback) - self._publish_instance_changed_callback_refs.add(ref) - - def add_plugin_change_callback(self, callback): - """Callbacks triggered before next plugin processing.""" - ref = self._create_reference(callback) - self._publish_plugin_changed_callback_refs.add(ref) - - def add_publish_stopped_callback(self, callback): - """Callbacks triggered on publishing stop (any reason).""" - ref = self._create_reference(callback) - self._publish_stopped_callback_refs.add(ref) - def get_asset_docs(self): """Get asset documents from cache for whole project.""" return self._asset_docs_cache.get_asset_docs() def get_context_title(self): """Get context title for artist shown at the top of main window.""" + context_title = None - if hasattr(self.host, "get_context_title"): - context_title = self.host.get_context_title() + if hasattr(self._host, "get_context_title"): + context_title = self._host.get_context_title() if context_title is None: context_title = os.environ.get("AVALON_APP_NAME") @@ -526,14 +1716,8 @@ class PublisherController: def get_asset_hierarchy(self): """Prepare asset documents into hierarchy.""" - _queue = collections.deque(self.get_asset_docs()) - output = collections.defaultdict(list) - while _queue: - asset_doc = _queue.popleft() - parent_id = asset_doc["data"]["visualParent"] - output[parent_id].append(asset_doc) - return output + return self._asset_docs_cache.get_asset_hierarchy() def get_task_names_by_asset_names(self, asset_names): """Prepare task names by asset name.""" @@ -547,33 +1731,45 @@ class PublisherController: ) return result - def _trigger_callbacks(self, callbacks, *args, **kwargs): - """Helper method to trigger callbacks stored by their rerence.""" - # Trigger reset callbacks - to_remove = set() - for ref in callbacks: - callback = ref() - if callback: - callback(*args, **kwargs) - else: - to_remove.add(ref) + def get_existing_subset_names(self, asset_name): + project_name = self.project_name + asset_doc = self._asset_docs_cache.get_asset_by_name(asset_name) + if not asset_doc: + return None - for ref in to_remove: - callbacks.remove(ref) + asset_id = asset_doc["_id"] + subset_docs = get_subsets( + project_name, asset_ids=[asset_id], fields=["name"] + ) + return { + subset_doc["name"] + for subset_doc in subset_docs + } def reset(self): """Reset everything related to creation and publishing.""" - # Stop publishing self.stop_publish() + self.host_is_valid = self._create_context.host_is_valid + + self._create_context.reset_preparation() + # Reset avalon context - self.create_context.reset_avalon_context() + self._create_context.reset_current_context() + + self._asset_docs_cache.reset() self._reset_plugins() # Publish part must be reset after plugins self._reset_publish() self._reset_instances() + self._create_context.reset_finalization() + + self._emit_event("controller.reset.finished") + + self.emit_card_message("Refreshed..") + def _reset_plugins(self): """Reset to initial state.""" if self._resetting_plugins: @@ -581,11 +1777,19 @@ class PublisherController: self._resetting_plugins = True - self.create_context.reset_plugins() + self._create_context.reset_plugins() + # Reset creator items + self._creator_items = None self._resetting_plugins = False - self._trigger_callbacks(self._plugins_refresh_callback_refs) + self._emit_event("plugins.refresh.finished") + + def _collect_creator_items(self): + return { + identifier: CreatorItem.from_creator(creator) + for identifier, creator in self._create_context.creators.items() + } def _reset_instances(self): """Reset create instances.""" @@ -594,22 +1798,90 @@ class PublisherController: self._resetting_instances = True - self.create_context.reset_context_data() - with self.create_context.bulk_instances_collection(): - self.create_context.reset_instances() - self.create_context.execute_autocreators() + self._create_context.reset_context_data() + with self._create_context.bulk_instances_collection(): + try: + self._create_context.reset_instances() + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.collection.failed", + { + "title": "Instance collection failed", + "failed_info": exc.failed_info + } + ) + + try: + self._create_context.find_convertor_items() + except ConvertorsOperationFailed as exc: + self._emit_event( + "convertors.find.failed", + { + "title": "Collection of unsupported subset failed", + "failed_info": exc.failed_info + } + ) + + try: + self._create_context.execute_autocreators() + + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.create.failed", + { + "title": "AutoCreation failed", + "failed_info": exc.failed_info + } + ) self._resetting_instances = False - self._trigger_callbacks(self._instances_refresh_callback_refs) + self._on_create_instance_change() + + def get_thumbnail_paths_for_instances(self, instance_ids): + thumbnail_paths_by_instance_id = ( + self._create_context.thumbnail_paths_by_instance_id + ) + return { + instance_id: thumbnail_paths_by_instance_id.get(instance_id) + for instance_id in instance_ids + } + + def set_thumbnail_paths_for_instances(self, thumbnail_path_mapping): + thumbnail_paths_by_instance_id = ( + self._create_context.thumbnail_paths_by_instance_id + ) + for instance_id, thumbnail_path in thumbnail_path_mapping.items(): + thumbnail_paths_by_instance_id[instance_id] = thumbnail_path + + self._emit_event( + "instance.thumbnail.changed", + { + "mapping": thumbnail_path_mapping + } + ) + + def emit_card_message( + self, message, message_type=CardMessageTypes.standard + ): + self._emit_event( + "show.card.message", + { + "message": message, + "message_type": message_type + } + ) def get_creator_attribute_definitions(self, instances): """Collect creator attribute definitions for multuple instances. Args: - instances(list): List of created instances for + instances(List[CreatedInstance]): List of created instances for which should be attribute definitions returned. """ + + # NOTE it would be great if attrdefs would have hash method implemented + # so they could be used as keys in dictionary output = [] _attr_defs = {} for instance in instances: @@ -641,9 +1913,10 @@ class PublisherController: which should be attribute definitions returned. include_context(bool): Add context specific attribute definitions. """ + _tmp_items = [] if include_context: - _tmp_items.append(self.create_context) + _tmp_items.append(self._create_context) for instance in instances: _tmp_items.append(instance) @@ -673,7 +1946,7 @@ class PublisherController: attr_values.append((item, value)) output = [] - for plugin in self.plugins_with_defs: + for plugin in self._create_context.plugins_with_defs: plugin_name = plugin.__name__ if plugin_name not in all_defs_by_plugin_name: continue @@ -684,86 +1957,148 @@ class PublisherController: )) return output - def get_icon_for_family(self, family): - """TODO rename to get creator icon.""" - creator = self.creators.get(family) - if creator is not None: - return creator.get_icon() - return None + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + creator = self._creators[creator_identifier] + project_name = self.project_name + asset_doc = self._asset_docs_cache.get_full_asset_by_name(asset_name) + instance = None + if instance_id: + instance = self.instances[instance_id] + + return creator.get_subset_name( + variant, task_name, asset_doc, project_name, instance=instance + ) + + def trigger_convertor_items(self, convertor_identifiers): + self.save_changes() + + success = True + try: + self._create_context.run_convertors(convertor_identifiers) + + except ConvertorsOperationFailed as exc: + success = False + self._emit_event( + "convertors.convert.failed", + { + "title": "Conversion failed", + "failed_info": exc.failed_info + } + ) + + if success: + self.emit_card_message("Conversion finished") + else: + self.emit_card_message("Conversion failed", CardMessageTypes.error) + + self.reset() def create( self, creator_identifier, subset_name, instance_data, options ): """Trigger creation and refresh of instances in UI.""" - creator = self.creators[creator_identifier] - creator.create(subset_name, instance_data, options) - self._trigger_callbacks(self._instances_refresh_callback_refs) + success = True + try: + self._create_context.create_with_unified_error( + creator_identifier, subset_name, instance_data, options + ) + + except CreatorsOperationFailed as exc: + success = False + self._emit_event( + "instances.create.failed", + { + "title": "Creation failed", + "failed_info": exc.failed_info + } + ) + + self._on_create_instance_change() + return success def save_changes(self): """Save changes happened during creation.""" - if self.create_context.host_is_valid: - self.create_context.save_changes() + if not self._create_context.host_is_valid: + return - def remove_instances(self, instances): - """""" + try: + self._create_context.save_changes() + + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.save.failed", + { + "title": "Instances save failed", + "failed_info": exc.failed_info + } + ) + + def remove_instances(self, instance_ids): + """Remove instances based on instance ids. + + Args: + instance_ids (List[str]): List of instance ids to remove. + """ # QUESTION Expect that instances are really removed? In that case save # reset is not required and save changes too. self.save_changes() - self.create_context.remove_instances(instances) + self._remove_instances_from_context(instance_ids) - self._trigger_callbacks(self._instances_refresh_callback_refs) + self._on_create_instance_change() - # --- Publish specific implementations --- - @property - def publish_has_finished(self): - return self._publish_finished + def _remove_instances_from_context(self, instance_ids): + instances_by_id = self._create_context.instances_by_id + instances = [ + instances_by_id[instance_id] + for instance_id in instance_ids + ] + try: + self._create_context.remove_instances(instances) + except CreatorsOperationFailed as exc: + self._emit_event( + "instances.remove.failed", + { + "title": "Instance removement failed", + "failed_info": exc.failed_info + } + ) - @property - def publish_is_running(self): - return self._publish_is_running - - @property - def publish_has_validated(self): - return self._publish_validated - - @property - def publish_has_crashed(self): - return bool(self._publish_error) - - @property - def publish_has_validation_errors(self): - return bool(self._publish_validation_errors) - - @property - def publish_max_progress(self): - return self._publish_max_progress - - @property - def publish_progress(self): - return self._publish_progress - - @property - def publish_comment_is_set(self): - return self._publish_comment_is_set - - def get_publish_crash_error(self): - return self._publish_error + def _on_create_instance_change(self): + self._emit_event("instances.refresh.finished") def get_publish_report(self): - return self._publish_report.get_report(self.publish_plugins) + return self._publish_report.get_report(self._publish_plugins) def get_validation_errors(self): - return self._publish_validation_errors + return self._publish_validation_errors.create_report() def _reset_publish(self): - self._publish_is_running = False - self._publish_validated = False + self._reset_attributes() + self._publish_up_validation = False - self._publish_finished = False self._publish_comment_is_set = False - self._main_thread_processor.clear() + self._main_thread_iter = self._publish_iterator() self._publish_context = pyblish.api.Context() # Make sure "comment" is set on publish context @@ -772,24 +2107,30 @@ class PublisherController: # - must not be used for changing CreatedInstances during publishing! # QUESTION # - pop the key after first collector using it would be safest option? - self._publish_context.data["create_context"] = self.create_context + self._publish_context.data["create_context"] = self._create_context - self._publish_report.reset( - self._publish_context, - self.create_context.publish_discover_result + self._publish_plugins_proxy = PublishPluginsProxy( + self._publish_plugins ) - self._publish_validation_errors = [] - self._publish_current_plugin_validation_errors = None - self._publish_error = None - self._publish_max_progress = len(self.publish_plugins) - self._publish_progress = 0 + self._publish_report.reset(self._publish_context, self._create_context) + self._publish_validation_errors.reset(self._publish_plugins_proxy) - self._trigger_callbacks(self._publish_reset_callback_refs) + self.publish_max_progress = len(self._publish_plugins) + + self._emit_event("publish.reset.finished") def set_comment(self, comment): - self._publish_context.data["comment"] = comment - self._publish_comment_is_set = True + """Set comment from ui to pyblish context. + + This should be called always before publishing is started but should + happen only once on first publish start thus variable + '_publish_comment_is_set' is used to keep track about the information. + """ + + if not self._publish_comment_is_set: + self._publish_context.data["comment"] = comment + self._publish_comment_is_set = True def publish(self): """Run publishing.""" @@ -798,37 +2139,42 @@ class PublisherController: def validate(self): """Run publishing and stop after Validation.""" - if self._publish_validated: + if self.publish_has_validated: return self._publish_up_validation = True self._start_publish() def _start_publish(self): """Start or continue in publishing.""" - if self._publish_is_running: + if self.publish_is_running: return # Make sure changes are saved self.save_changes() - self._publish_is_running = True - self._trigger_callbacks(self._publish_started_callback_refs) - self._main_thread_processor.start() + self.publish_is_running = True + + self._emit_event("publish.process.started") + self._publish_next_process() def _stop_publish(self): """Stop or pause publishing.""" - self._publish_is_running = False - self._main_thread_processor.stop() - self._trigger_callbacks(self._publish_stopped_callback_refs) + self.publish_is_running = False + + self._emit_event("publish.process.stopped") def stop_publish(self): """Stop publishing process (any reason).""" - if self._publish_is_running: + + if self.publish_is_running: self._stop_publish() - def run_action(self, plugin, action): + def run_action(self, plugin_id, action_id): # TODO handle result in UI + plugin = self._publish_plugins_proxy.get_plugin(plugin_id) + action = self._publish_plugins_proxy.get_action(action_id) + result = pyblish.plugin.process( plugin, self._publish_context, None, action.id ) @@ -842,21 +2188,24 @@ class PublisherController: # There are validation errors and validation is passed # - can't do any progree if ( - self._publish_validated - and self._publish_validation_errors + self.publish_has_validated + and self.publish_has_validation_errors ): item = MainThreadItem(self.stop_publish) # Any unexpected error happened # - everything should stop - elif self._publish_error: + elif self.publish_has_crashed: item = MainThreadItem(self.stop_publish) # Everything is ok so try to get new processing item else: item = next(self._main_thread_iter) - self._main_thread_processor.add_item(item) + self._process_main_thread_item(item) + + def _process_main_thread_item(self, item): + item() def _publish_iterator(self): """Main logic center of publishing. @@ -871,32 +2220,24 @@ class PublisherController: QUESTION: Does validate button still make sense? """ - for idx, plugin in enumerate(self.publish_plugins): + for idx, plugin in enumerate(self._publish_plugins): self._publish_progress = idx - # Reset current plugin validations error - self._publish_current_plugin_validation_errors = None - # Check if plugin is over validation order - if not self._publish_validated: - self._publish_validated = ( + if not self.publish_has_validated: + self.publish_has_validated = ( plugin.order >= self._validation_order ) - # Trigger callbacks when validation stage is passed - if self._publish_validated: - self._trigger_callbacks( - self._publish_validated_callback_refs - ) # Stop if plugin is over validation order and process # should process up to validation. - if self._publish_up_validation and self._publish_validated: + if self._publish_up_validation and self.publish_has_validated: yield MainThreadItem(self.stop_publish) # Stop if validation is over and validation errors happened if ( - self._publish_validated - and self._publish_validation_errors + self.publish_has_validated + and self.publish_has_validation_errors ): yield MainThreadItem(self.stop_publish) @@ -904,9 +2245,14 @@ class PublisherController: self._publish_report.add_plugin_iter(plugin, self._publish_context) # Trigger callback that new plugin is going to be processed - self._trigger_callbacks( - self._publish_plugin_changed_callback_refs, plugin + plugin_label = plugin.__name__ + if hasattr(plugin, "label") and plugin.label: + plugin_label = plugin.label + self._emit_event( + "publish.process.plugin.changed", + {"plugin_label": plugin_label} ) + # Plugin is instance plugin if plugin.__instanceEnabled__: instances = pyblish.logic.instances_by_plugin( @@ -920,11 +2266,15 @@ class PublisherController: if instance.data.get("publish") is False: continue - self._trigger_callbacks( - self._publish_instance_changed_callback_refs, - self._publish_context, - instance + instance_label = ( + instance.data.get("label") + or instance.data["name"] ) + self._emit_event( + "publish.process.instance.changed", + {"instance_label": instance_label} + ) + yield MainThreadItem( self._process_and_continue, plugin, instance ) @@ -936,10 +2286,14 @@ class PublisherController: [plugin], families ) if plugins: - self._trigger_callbacks( - self._publish_instance_changed_callback_refs, - self._publish_context, - None + instance_label = ( + self._publish_context.data.get("label") + or self._publish_context.data.get("name") + or "Context" + ) + self._emit_event( + "publish.process.instance.changed", + {"instance_label": instance_label} ) yield MainThreadItem( self._process_and_continue, plugin, None @@ -948,24 +2302,17 @@ class PublisherController: self._publish_report.set_plugin_skipped() # Cleanup of publishing process - self._publish_finished = True - self._publish_progress = self._publish_max_progress + self.publish_has_finished = True + self.publish_progress = self.publish_max_progress yield MainThreadItem(self.stop_publish) def _add_validation_error(self, result): - if self._publish_current_plugin_validation_errors is None: - self._publish_current_plugin_validation_errors = { - "plugin": result["plugin"], - "errors": [] - } - self._publish_validation_errors.append( - self._publish_current_plugin_validation_errors - ) - - self._publish_current_plugin_validation_errors["errors"].append({ - "exception": result["error"], - "instance": result["instance"] - }) + self.publish_has_validation_errors = True + self._publish_validation_errors.add_error( + result["plugin"], + result["error"], + result["instance"] + ) def _process_and_continue(self, plugin, instance): result = pyblish.plugin.process( @@ -978,18 +2325,23 @@ class PublisherController: if exception: if ( isinstance(exception, PublishValidationError) - and not self._publish_validated + and not self.publish_has_validated ): self._add_validation_error(result) else: - self._publish_error = exception + if isinstance(exception, KnownPublishError): + msg = str(exception) + else: + msg = ( + "Something went wrong. Send report" + " to your supervisor or OpenPype." + ) + self.publish_error_msg = msg + self.publish_has_crashed = True self._publish_next_process() - def reset_project_data_cache(self): - self._asset_docs_cache.reset() - def collect_families_from_instances(instances, only_active=False): """Collect all families for passed publish instances. diff --git a/openpype/tools/publisher/control_qt.py b/openpype/tools/publisher/control_qt.py new file mode 100644 index 0000000000..132b42f9ec --- /dev/null +++ b/openpype/tools/publisher/control_qt.py @@ -0,0 +1,448 @@ +import collections +from abc import abstractmethod, abstractproperty + +from qtpy import QtCore + +from openpype.lib.events import Event +from openpype.pipeline.create import CreatedInstance + +from .control import ( + MainThreadItem, + PublisherController, + BasePublisherController, +) + + +class MainThreadProcess(QtCore.QObject): + """Qt based main thread process executor. + + Has timer which controls each 50ms if there is new item to process. + + This approach gives ability to update UI meanwhile plugin is in progress. + """ + + count_timeout = 2 + + def __init__(self): + super(MainThreadProcess, self).__init__() + self._items_to_process = collections.deque() + + timer = QtCore.QTimer() + timer.setInterval(0) + + timer.timeout.connect(self._execute) + + self._timer = timer + self._switch_counter = self.count_timeout + + def process(self, func, *args, **kwargs): + item = MainThreadItem(func, *args, **kwargs) + self.add_item(item) + + def add_item(self, item): + self._items_to_process.append(item) + + def _execute(self): + if not self._items_to_process: + return + + if self._switch_counter > 0: + self._switch_counter -= 1 + return + + self._switch_counter = self.count_timeout + + item = self._items_to_process.popleft() + item.process() + + def start(self): + if not self._timer.isActive(): + self._timer.start() + + def stop(self): + if self._timer.isActive(): + self._timer.stop() + + def clear(self): + if self._timer.isActive(): + self._timer.stop() + self._items_to_process = collections.deque() + + +class QtPublisherController(PublisherController): + def __init__(self, *args, **kwargs): + self._main_thread_processor = MainThreadProcess() + + super(QtPublisherController, self).__init__(*args, **kwargs) + + self.event_system.add_callback( + "publish.process.started", self._qt_on_publish_start + ) + self.event_system.add_callback( + "publish.process.stopped", self._qt_on_publish_stop + ) + + def _reset_publish(self): + super(QtPublisherController, self)._reset_publish() + self._main_thread_processor.clear() + + def _process_main_thread_item(self, item): + self._main_thread_processor.add_item(item) + + def _qt_on_publish_start(self): + self._main_thread_processor.start() + + def _qt_on_publish_stop(self): + self._main_thread_processor.stop() + + +class QtRemotePublishController(BasePublisherController): + """Abstract Remote controller for Qt UI. + + This controller should be used in process where UI is running and should + listen and ask for data on a client side. + + All objects that are used during UI processing should be able to convert + on client side to json serializable data and then recreated here. Keep in + mind that all changes made here should be send back to client controller + before critical actions. + + ATM Was not tested and will require some changes. All code written here is + based on theoretical idea how it could work. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._created_instances = {} + self._thumbnail_paths_by_instance_id = None + + def _reset_attributes(self): + super()._reset_attributes() + self._thumbnail_paths_by_instance_id = None + + @abstractmethod + def _get_serialized_instances(self): + """Receive serialized instances from client process. + + Returns: + List[Dict[str, Any]]: Serialized instances. + """ + + pass + + def _on_create_instance_change(self): + serialized_instances = self._get_serialized_instances() + + created_instances = {} + for serialized_data in serialized_instances: + item = CreatedInstance.deserialize_on_remote(serialized_data) + created_instances[item.id] = item + + self._created_instances = created_instances + self._emit_event("instances.refresh.finished") + + def remote_events_handler(self, event_data): + event = Event.from_data(event_data) + + # Topics that cause "replication" of controller changes + if event.topic == "publish.max_progress.changed": + self.publish_max_progress = event["value"] + return + + if event.topic == "publish.progress.changed": + self.publish_progress = event["value"] + return + + if event.topic == "publish.has_validated.changed": + self.publish_has_validated = event["value"] + return + + if event.topic == "publish.is_running.changed": + self.publish_is_running = event["value"] + return + + if event.topic == "publish.publish_error.changed": + self.publish_error_msg = event["value"] + return + + if event.topic == "publish.has_crashed.changed": + self.publish_has_crashed = event["value"] + return + + if event.topic == "publish.has_validation_errors.changed": + self.publish_has_validation_errors = event["value"] + return + + if event.topic == "publish.finished.changed": + self.publish_has_finished = event["value"] + return + + if event.topic == "publish.host_is_valid.changed": + self.host_is_valid = event["value"] + return + + # Don't skip because UI want know about it too + if event.topic == "instance.thumbnail.changed": + for instance_id, path in event["mapping"].items(): + self.thumbnail_paths_by_instance_id[instance_id] = path + + # Topics that can be just passed by because are not affecting + # controller itself + # - "show.card.message" + # - "show.detailed.help" + # - "publish.reset.finished" + # - "instances.refresh.finished" + # - "plugins.refresh.finished" + # - "controller.reset.finished" + # - "publish.process.started" + # - "publish.process.stopped" + # - "publish.process.plugin.changed" + # - "publish.process.instance.changed" + self.event_system.emit_event(event) + + @abstractproperty + def project_name(self): + """Current context project name from client. + + Returns: + str: Name of project. + """ + + pass + + @abstractproperty + def current_asset_name(self): + """Current context asset name from client. + + Returns: + Union[str, None]: Name of asset. + """ + + pass + + @abstractproperty + def current_task_name(self): + """Current context task name from client. + + Returns: + Union[str, None]: Name of task. + """ + + pass + + @property + def instances(self): + """Collected/created instances. + + Returns: + List[CreatedInstance]: List of created instances. + """ + + return self._created_instances + + def get_context_title(self): + """Get context title for artist shown at the top of main window. + + Returns: + Union[str, None]: Context title for window or None. In case of None + a warning is displayed (not nice for artists). + """ + + pass + + def get_asset_docs(self): + pass + + def get_asset_hierarchy(self): + pass + + def get_task_names_by_asset_names(self, asset_names): + pass + + def get_existing_subset_names(self, asset_name): + pass + + @property + def thumbnail_paths_by_instance_id(self): + if self._thumbnail_paths_by_instance_id is None: + self._thumbnail_paths_by_instance_id = ( + self._collect_thumbnail_paths_by_instance_id() + ) + return self._thumbnail_paths_by_instance_id + + def get_thumbnail_path_for_instance(self, instance_id): + return self.thumbnail_paths_by_instance_id.get(instance_id) + + def set_thumbnail_path_for_instance(self, instance_id, thumbnail_path): + self._set_thumbnail_path_on_context(self, instance_id, thumbnail_path) + + @abstractmethod + def _collect_thumbnail_paths_by_instance_id(self): + """Collect thumbnail paths by instance id in remote controller. + + These should be collected from 'CreatedContext' there. + + Returns: + Dict[str, str]: Mapping of thumbnail path by instance id. + """ + + pass + + @abstractmethod + def _set_thumbnail_path_on_context(self, instance_id, thumbnail_path): + """Send change of thumbnail path in remote controller. + + That should trigger event 'instance.thumbnail.changed' which is + captured and handled in default implementation in this class. + """ + + pass + + @abstractmethod + def get_subset_name( + self, + creator_identifier, + variant, + task_name, + asset_name, + instance_id=None + ): + """Get subset name based on passed data. + + Args: + creator_identifier (str): Identifier of creator which should be + responsible for subset name creation. + variant (str): Variant value from user's input. + task_name (str): Name of task for which is instance created. + asset_name (str): Name of asset for which is instance created. + instance_id (Union[str, None]): Existing instance id when subset + name is updated. + """ + + pass + + @abstractmethod + def create( + self, creator_identifier, subset_name, instance_data, options + ): + """Trigger creation by creator identifier. + + Should also trigger refresh of instanes. + + Args: + creator_identifier (str): Identifier of Creator plugin. + subset_name (str): Calculated subset name. + instance_data (Dict[str, Any]): Base instance data with variant, + asset name and task name. + options (Dict[str, Any]): Data from pre-create attributes. + """ + + pass + + def _get_instance_changes_for_client(self): + """Preimplemented method to receive instance changes for client.""" + + created_instance_changes = {} + for instance_id, instance in self._created_instances.items(): + created_instance_changes[instance_id] = ( + instance.remote_changes() + ) + return created_instance_changes + + @abstractmethod + def _send_instance_changes_to_client(self): + instance_changes = self._get_instance_changes_for_client() + # Implement to send 'instance_changes' value to client + + @abstractmethod + def save_changes(self): + """Save changes happened during creation.""" + + self._send_instance_changes_to_client() + + @abstractmethod + def remove_instances(self, instance_ids): + """Remove list of instances from create context.""" + # TODO add Args: + + pass + + @abstractmethod + def get_publish_report(self): + pass + + @abstractmethod + def get_validation_errors(self): + pass + + @abstractmethod + def reset(self): + """Reset whole controller. + + This should reset create context, publish context and all variables + that are related to it. + """ + + self._send_instance_changes_to_client() + pass + + @abstractmethod + def publish(self): + """Trigger publishing without any order limitations.""" + + self._send_instance_changes_to_client() + pass + + @abstractmethod + def validate(self): + """Trigger publishing which will stop after validation order.""" + + self._send_instance_changes_to_client() + pass + + @abstractmethod + def stop_publish(self): + """Stop publishing can be also used to pause publishing. + + Pause of publishing is possible only if all plugins successfully + finished. + """ + + pass + + @abstractmethod + def run_action(self, plugin_id, action_id): + """Trigger pyblish action on a plugin. + + Args: + plugin_id (str): Id of publish plugin. + action_id (str): Id of publish action. + """ + + pass + + @abstractmethod + def set_comment(self, comment): + """Set comment on pyblish context. + + Set "comment" key on current pyblish.api.Context data. + + Args: + comment (str): Artist's comment. + """ + + pass + + @abstractmethod + def emit_card_message(self, message): + """Emit a card message which can have a lifetime. + + This is for UI purposes. Method can be extended to more arguments + in future e.g. different message timeout or type (color). + + Args: + message (str): Message that will be showed. + """ + + pass diff --git a/openpype/tools/publisher/publish_report_viewer/__init__.py b/openpype/tools/publisher/publish_report_viewer/__init__.py index ce1cc3729c..2c51e5d736 100644 --- a/openpype/tools/publisher/publish_report_viewer/__init__.py +++ b/openpype/tools/publisher/publish_report_viewer/__init__.py @@ -1,3 +1,5 @@ +from qtpy import QtWidgets + from .report_items import ( PublishReport ) @@ -16,4 +18,13 @@ __all__ = ( "PublishReportViewerWidget", "PublishReportViewerWindow", + + "main", ) + + +def main(): + app = QtWidgets.QApplication([]) + window = PublishReportViewerWindow() + window.show() + return app.exec_() diff --git a/openpype/tools/publisher/publish_report_viewer/constants.py b/openpype/tools/publisher/publish_report_viewer/constants.py index 8fbb9342ca..529ecfc5c0 100644 --- a/openpype/tools/publisher/publish_report_viewer/constants.py +++ b/openpype/tools/publisher/publish_report_viewer/constants.py @@ -1,4 +1,4 @@ -from Qt import QtCore +from qtpy import QtCore ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 diff --git a/openpype/tools/publisher/publish_report_viewer/delegates.py b/openpype/tools/publisher/publish_report_viewer/delegates.py index 9cd4f52174..6cd0886e6b 100644 --- a/openpype/tools/publisher/publish_report_viewer/delegates.py +++ b/openpype/tools/publisher/publish_report_viewer/delegates.py @@ -1,5 +1,5 @@ import collections -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui from .constants import ( ITEM_IS_GROUP_ROLE, ITEM_ERRORED_ROLE, @@ -201,10 +201,10 @@ class GroupItemDelegate(QtWidgets.QStyledItemDelegate): style = QtWidgets.QApplicaion.style() style.proxy().drawPrimitive( - style.PE_PanelItemViewItem, option, painter, widget + QtWidgets.QStyle.PE_PanelItemViewItem, option, painter, widget ) _rect = style.proxy().subElementRect( - style.SE_ItemViewItemText, option, widget + QtWidgets.QStyle.SE_ItemViewItemText, option, widget ) bg_rect = QtCore.QRectF(option.rect) bg_rect.setY(_rect.y()) @@ -265,7 +265,7 @@ class GroupItemDelegate(QtWidgets.QStyledItemDelegate): else: style = QtWidgets.QApplicaion.style() _rect = style.proxy().subElementRect( - style.SE_ItemViewItemText, option, widget + QtWidgets.QStyle.SE_ItemViewItemText, option, widget ) bg_rect = QtCore.QRectF(option.rect) diff --git a/openpype/tools/publisher/publish_report_viewer/model.py b/openpype/tools/publisher/publish_report_viewer/model.py index a88129a358..37da4ab3f2 100644 --- a/openpype/tools/publisher/publish_report_viewer/model.py +++ b/openpype/tools/publisher/publish_report_viewer/model.py @@ -1,8 +1,9 @@ import uuid -from Qt import QtCore, QtGui +from qtpy import QtCore, QtGui import pyblish.api +from openpype.tools.utils.lib import html_escape from .constants import ( ITEM_ID_ROLE, ITEM_IS_GROUP_ROLE, @@ -45,7 +46,8 @@ class InstancesModel(QtGui.QStandardItemModel): all_removed = True for instance_item in instance_items: item = QtGui.QStandardItem(instance_item.label) - item.setData(instance_item.label, ITEM_LABEL_ROLE) + instance_label = html_escape(instance_item.label) + item.setData(instance_label, ITEM_LABEL_ROLE) item.setData(instance_item.errored, ITEM_ERRORED_ROLE) item.setData(instance_item.id, ITEM_ID_ROLE) item.setData(instance_item.removed, INSTANCE_REMOVED_ROLE) diff --git a/openpype/tools/publisher/publish_report_viewer/report_items.py b/openpype/tools/publisher/publish_report_viewer/report_items.py index b47d14da25..206f999bac 100644 --- a/openpype/tools/publisher/publish_report_viewer/report_items.py +++ b/openpype/tools/publisher/publish_report_viewer/report_items.py @@ -79,14 +79,12 @@ class PublishReport: context_data = data["context"] context_data["name"] = "context" - context_data["label"] = context_data["label"] or "Context" + context_data["label"] = context_data.get("label") or "Context" logs = [] plugins_items_by_id = {} - plugins_id_order = [] for plugin_data in data["plugins_data"]: item = PluginItem(plugin_data) - plugins_id_order.append(item.id) plugins_items_by_id[item.id] = item for instance_data_item in plugin_data["instances_data"]: instance_id = instance_data_item["id"] @@ -95,6 +93,14 @@ class PublishReport: copy.deepcopy(log_item_data), item.id, instance_id ) logs.append(log_item) + sorted_plugins = sorted( + plugins_items_by_id.values(), + key=lambda item: item.order + ) + plugins_id_order = [ + plugin_item.id + for plugin_item in sorted_plugins + ] logs_by_instance_id = collections.defaultdict(list) for log_item in logs: diff --git a/openpype/tools/publisher/publish_report_viewer/widgets.py b/openpype/tools/publisher/publish_report_viewer/widgets.py index fd226ea0e4..dc449b6b69 100644 --- a/openpype/tools/publisher/publish_report_viewer/widgets.py +++ b/openpype/tools/publisher/publish_report_viewer/widgets.py @@ -1,4 +1,5 @@ -from Qt import QtWidgets, QtCore, QtGui +from math import ceil +from qtpy import QtWidgets, QtCore, QtGui from openpype.widgets.nice_checkbox import NiceCheckbox @@ -26,6 +27,9 @@ class PluginLoadReportModel(QtGui.QStandardItemModel): parent = self.invisibleRootItem() parent.removeRows(0, parent.rowCount()) + if report is None: + return + new_items = [] new_items_by_filepath = {} for filepath in report.crashed_plugin_paths.keys(): @@ -72,11 +76,11 @@ class PluginLoadReportWidget(QtWidgets.QWidget): super(PluginLoadReportWidget, self).__init__(parent) view = QtWidgets.QTreeView(self) - view.setEditTriggers(view.NoEditTriggers) + view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) view.setTextElideMode(QtCore.Qt.ElideLeft) view.setHeaderHidden(True) view.setAlternatingRowColors(True) - view.setVerticalScrollMode(view.ScrollPerPixel) + view.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel) model = PluginLoadReportModel() view.setModel(model) @@ -137,13 +141,85 @@ class PluginLoadReportWidget(QtWidgets.QWidget): self._model.set_report(report) +class ZoomPlainText(QtWidgets.QPlainTextEdit): + min_point_size = 1.0 + max_point_size = 200.0 + + def __init__(self, *args, **kwargs): + super(ZoomPlainText, self).__init__(*args, **kwargs) + + anim_timer = QtCore.QTimer() + anim_timer.setInterval(20) + + anim_timer.timeout.connect(self._scaling_callback) + + self._anim_timer = anim_timer + self._scheduled_scalings = 0 + self._point_size = None + + def wheelEvent(self, event): + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers != QtCore.Qt.ControlModifier: + super(ZoomPlainText, self).wheelEvent(event) + return + + degrees = float(event.delta()) / 8 + steps = int(ceil(degrees / 5)) + self._scheduled_scalings += steps + if (self._scheduled_scalings * steps < 0): + self._scheduled_scalings = steps + + self._anim_timer.start() + + def _scaling_callback(self): + if self._scheduled_scalings == 0: + self._anim_timer.stop() + return + + factor = 1.0 + (self._scheduled_scalings / 300) + font = self.font() + + if self._point_size is None: + point_size = font.pointSizeF() + else: + point_size = self._point_size + + point_size *= factor + min_hit = False + max_hit = False + if point_size < self.min_point_size: + point_size = self.min_point_size + min_hit = True + elif point_size > self.max_point_size: + point_size = self.max_point_size + max_hit = True + + self._point_size = point_size + + font.setPointSizeF(point_size) + # Using 'self.setFont(font)' would not be propagated when stylesheets + # are applied on this widget + self.setStyleSheet("font-size: {}pt".format(font.pointSize())) + + if ( + (max_hit and self._scheduled_scalings > 0) + or (min_hit and self._scheduled_scalings < 0) + ): + self._scheduled_scalings = 0 + + elif self._scheduled_scalings > 0: + self._scheduled_scalings -= 1 + else: + self._scheduled_scalings += 1 + + class DetailsWidget(QtWidgets.QWidget): def __init__(self, parent): super(DetailsWidget, self).__init__(parent) - output_widget = QtWidgets.QPlainTextEdit(self) - output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) + output_widget = ZoomPlainText(self) output_widget.setObjectName("PublishLogConsole") + output_widget.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) @@ -268,7 +344,7 @@ class DetailsPopup(QtWidgets.QDialog): self.closed.emit() -class PublishReportViewerWidget(QtWidgets.QWidget): +class PublishReportViewerWidget(QtWidgets.QFrame): def __init__(self, parent=None): super(PublishReportViewerWidget, self).__init__(parent) @@ -296,8 +372,10 @@ class PublishReportViewerWidget(QtWidgets.QWidget): instances_view.setModel(instances_proxy) instances_view.setIndentation(0) instances_view.setHeaderHidden(True) - instances_view.setEditTriggers(QtWidgets.QTreeView.NoEditTriggers) - instances_view.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection) + instances_view.setEditTriggers( + QtWidgets.QAbstractItemView.NoEditTriggers) + instances_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) instances_view.setExpandsOnDoubleClick(False) instances_delegate = GroupItemDelegate(instances_view) @@ -317,8 +395,10 @@ class PublishReportViewerWidget(QtWidgets.QWidget): plugins_view.setModel(plugins_proxy) plugins_view.setIndentation(0) plugins_view.setHeaderHidden(True) - plugins_view.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection) - plugins_view.setEditTriggers(QtWidgets.QTreeView.NoEditTriggers) + plugins_view.setSelectionMode( + QtWidgets.QAbstractItemView.ExtendedSelection) + plugins_view.setEditTriggers( + QtWidgets.QAbstractItemView.NoEditTriggers) plugins_view.setExpandsOnDoubleClick(False) plugins_delegate = GroupItemDelegate(plugins_view) diff --git a/openpype/tools/publisher/publish_report_viewer/window.py b/openpype/tools/publisher/publish_report_viewer/window.py index 678884677c..127a65dd9b 100644 --- a/openpype/tools/publisher/publish_report_viewer/window.py +++ b/openpype/tools/publisher/publish_report_viewer/window.py @@ -1,11 +1,12 @@ import os import json import six +import uuid + import appdirs -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui from openpype import style -from openpype.lib import JSONSettingRegistry from openpype.resources import get_openpype_icon_filepath from openpype.tools import resources from openpype.tools.utils import ( @@ -23,38 +24,198 @@ else: from report_items import PublishReport -FILEPATH_ROLE = QtCore.Qt.UserRole + 1 -MODIFIED_ROLE = QtCore.Qt.UserRole + 2 +ITEM_ID_ROLE = QtCore.Qt.UserRole + 1 -class PublisherReportRegistry(JSONSettingRegistry): - """Class handling storing publish report tool. - - Attributes: - vendor (str): Name used for path construction. - product (str): Additional name used for path construction. +def get_reports_dir(): + """Root directory where publish reports are stored for next session. + Returns: + str: Path to directory where reports are stored. """ + report_dir = os.path.join( + appdirs.user_data_dir("openpype", "pypeclub"), + "publish_report_viewer" + ) + if not os.path.exists(report_dir): + os.makedirs(report_dir) + return report_dir + + +class PublishReportItem: + """Report item representing one file in report directory.""" + + def __init__(self, content): + item_id = content.get("id") + changed = False + if not item_id: + item_id = str(uuid.uuid4()) + changed = True + content["id"] = item_id + + if not content.get("report_version"): + changed = True + content["report_version"] = "0.0.1" + + report_path = os.path.join(get_reports_dir(), item_id) + file_modified = None + if os.path.exists(report_path): + file_modified = os.path.getmtime(report_path) + self.content = content + self.report_path = report_path + self.file_modified = file_modified + self._loaded_label = content.get("label") + self._changed = changed + self.publish_report = PublishReport(content) + + @property + def version(self): + return self.content["report_version"] + + @property + def id(self): + return self.content["id"] + + def get_label(self): + return self.content.get("label") or "Unfilled label" + + def set_label(self, label): + if not label: + self.content.pop("label", None) + self.content["label"] = label + + label = property(get_label, set_label) + + def save(self): + save = False + if ( + self._changed + or self._loaded_label != self.label + or not os.path.exists(self.report_path) + or self.file_modified != os.path.getmtime(self.report_path) + ): + save = True + + if not save: + return + + with open(self.report_path, "w") as stream: + json.dump(self.content, stream) + + self._loaded_label = self.content.get("label") + self._changed = False + self.file_modified = os.path.getmtime(self.report_path) + + @classmethod + def from_filepath(cls, filepath): + if not os.path.exists(filepath): + return None + + try: + with open(filepath, "r") as stream: + content = json.load(stream) + + return cls(content) + except Exception: + return None + + def remove_file(self): + if os.path.exists(self.report_path): + os.remove(self.report_path) + + def update_file_content(self): + if not os.path.exists(self.report_path): + return + + file_modified = os.path.getmtime(self.report_path) + if file_modified == self.file_modified: + return + + with open(self.report_path, "r") as stream: + content = json.load(self.content, stream) + + item_id = content.get("id") + version = content.get("report_version") + if not item_id: + item_id = str(uuid.uuid4()) + content["id"] = item_id + + if not version: + version = "0.0.1" + content["report_version"] = version + + self.content = content + self.file_modified = file_modified + + +class PublisherReportHandler: + """Class handling storing publish report tool.""" + def __init__(self): - self.vendor = "pypeclub" - self.product = "openpype" - name = "publish_report_viewer" - path = appdirs.user_data_dir(self.product, self.vendor) - super(PublisherReportRegistry, self).__init__(name, path) + self._reports = None + self._reports_by_id = {} + + def reset(self): + self._reports = None + self._reports_by_id = {} + + def list_reports(self): + if self._reports is not None: + return self._reports + + reports = [] + reports_by_id = {} + report_dir = get_reports_dir() + for filename in os.listdir(report_dir): + ext = os.path.splitext(filename)[-1] + if ext == ".json": + continue + filepath = os.path.join(report_dir, filename) + item = PublishReportItem.from_filepath(filepath) + reports.append(item) + reports_by_id[item.id] = item + + self._reports = reports + self._reports_by_id = reports_by_id + return reports + + def remove_report_items(self, item_id): + item = self._reports_by_id.get(item_id) + if item: + try: + item.remove_file() + self._reports_by_id.get(item_id) + except Exception: + pass -class LoadedFilesMopdel(QtGui.QStandardItemModel): +class LoadedFilesModel(QtGui.QStandardItemModel): def __init__(self, *args, **kwargs): - super(LoadedFilesMopdel, self).__init__(*args, **kwargs) - self.setColumnCount(2) - self._items_by_filepath = {} - self._reports_by_filepath = {} + super(LoadedFilesModel, self).__init__(*args, **kwargs) - self._registry = PublisherReportRegistry() + self._items_by_id = {} + self._report_items_by_id = {} + + self._handler = PublisherReportHandler() self._loading_registry = False - self._load_registry() + + def refresh(self): + self._handler.reset() + self._items_by_id = {} + self._report_items_by_id = {} + + new_items = [] + for report_item in self._handler.list_reports(): + item = self._create_item(report_item) + self._report_items_by_id[report_item.id] = report_item + self._items_by_id[report_item.id] = item + new_items.append(item) + + if new_items: + root_item = self.invisibleRootItem() + root_item.appendRows(new_items) def headerData(self, section, orientation, role): if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole): @@ -63,22 +224,7 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): if section == 1: return "Modified" return "" - super(LoadedFilesMopdel, self).headerData(section, orientation, role) - - def _load_registry(self): - self._loading_registry = True - try: - filepaths = self._registry.get_item("filepaths") - self.add_filepaths(filepaths) - except ValueError: - pass - self._loading_registry = False - - def _store_registry(self): - if self._loading_registry: - return - filepaths = list(self._items_by_filepath.keys()) - self._registry.set_item("filepaths", filepaths) + super(LoadedFilesModel, self).headerData(section, orientation, role) def data(self, index, role=None): if role is None: @@ -88,17 +234,28 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): if col != 0: index = self.index(index.row(), 0, index.parent()) - if role == QtCore.Qt.ToolTipRole: - if col == 0: - role = FILEPATH_ROLE - elif col == 1: - return "File modified" + return super(LoadedFilesModel, self).data(index, role) + + def setData(self, index, value, role): + if role == QtCore.Qt.EditRole: + item_id = index.data(ITEM_ID_ROLE) + report_item = self._report_items_by_id.get(item_id) + if report_item is not None: + report_item.label = value + report_item.save() + value = report_item.label + + return super(LoadedFilesModel, self).setData(index, value, role) + + def _create_item(self, report_item): + if report_item.id in self._items_by_id: return None - elif role == QtCore.Qt.DisplayRole: - if col == 1: - role = MODIFIED_ROLE - return super(LoadedFilesMopdel, self).data(index, role) + item = QtGui.QStandardItem(report_item.label) + item.setColumnCount(self.columnCount()) + item.setData(report_item.id, ITEM_ID_ROLE) + + return item def add_filepaths(self, filepaths): if not filepaths: @@ -110,9 +267,6 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): filtered_paths = [] for filepath in filepaths: normalized_path = os.path.normpath(filepath) - if normalized_path in self._items_by_filepath: - continue - if ( os.path.exists(normalized_path) and normalized_path not in filtered_paths @@ -127,54 +281,46 @@ class LoadedFilesMopdel(QtGui.QStandardItemModel): try: with open(normalized_path, "r") as stream: data = json.load(stream) - report = PublishReport(data) + report_item = PublishReportItem(data) except Exception: # TODO handle errors continue - modified = os.path.getmtime(normalized_path) - item = QtGui.QStandardItem(os.path.basename(normalized_path)) - item.setColumnCount(self.columnCount()) - item.setData(normalized_path, FILEPATH_ROLE) - item.setData(modified, MODIFIED_ROLE) + label = data.get("label") + if not label: + report_item.label = ( + os.path.splitext(os.path.basename(filepath))[0] + ) + + item = self._create_item(report_item) + if item is None: + continue + new_items.append(item) - self._items_by_filepath[normalized_path] = item - self._reports_by_filepath[normalized_path] = report + report_item.save() + self._items_by_id[report_item.id] = item + self._report_items_by_id[report_item.id] = report_item - if not new_items: + if new_items: + root_item = self.invisibleRootItem() + root_item.appendRows(new_items) + + def remove_item_by_id(self, item_id): + report_item = self._report_items_by_id.get(item_id) + if not report_item: return + self._handler.remove_report_items(item_id) + item = self._items_by_id.get(item_id) + parent = self.invisibleRootItem() - parent.appendRows(new_items) + parent.removeRow(item.row()) - self._store_registry() - - def remove_filepaths(self, filepaths): - if not filepaths: - return - - if isinstance(filepaths, six.string_types): - filepaths = [filepaths] - - filtered_paths = [] - for filepath in filepaths: - normalized_path = os.path.normpath(filepath) - if normalized_path in self._items_by_filepath: - filtered_paths.append(normalized_path) - - if not filtered_paths: - return - - parent = self.invisibleRootItem() - for filepath in filtered_paths: - self._reports_by_filepath.pop(normalized_path) - item = self._items_by_filepath.pop(filepath) - parent.removeRow(item.row()) - - self._store_registry() - - def get_report_by_filepath(self, filepath): - return self._reports_by_filepath.get(filepath) + def get_report_by_id(self, item_id): + report_item = self._report_items_by_id.get(item_id) + if report_item: + return report_item.publish_report + return None class LoadedFilesView(QtWidgets.QTreeView): @@ -182,11 +328,13 @@ class LoadedFilesView(QtWidgets.QTreeView): def __init__(self, *args, **kwargs): super(LoadedFilesView, self).__init__(*args, **kwargs) - self.setEditTriggers(self.NoEditTriggers) + self.setEditTriggers( + self.EditKeyPressed | self.SelectedClicked | self.DoubleClicked + ) self.setIndentation(0) self.setAlternatingRowColors(True) - model = LoadedFilesMopdel() + model = LoadedFilesModel() self.setModel(model) time_delegate = PrettyTimeDelegate() @@ -219,6 +367,7 @@ class LoadedFilesView(QtWidgets.QTreeView): def _on_rows_inserted(self): header = self.header() header.resizeSections(header.ResizeToContents) + self._update_remove_btn() def resizeEvent(self, event): super(LoadedFilesView, self).resizeEvent(event) @@ -226,9 +375,10 @@ class LoadedFilesView(QtWidgets.QTreeView): def showEvent(self, event): super(LoadedFilesView, self).showEvent(event) - self._update_remove_btn() + self._model.refresh() header = self.header() header.resizeSections(header.ResizeToContents) + self._update_remove_btn() def _on_selection_change(self): self.selection_changed.emit() @@ -237,14 +387,14 @@ class LoadedFilesView(QtWidgets.QTreeView): self._model.add_filepaths(filepaths) self._fill_selection() - def remove_filepaths(self, filepaths): - self._model.remove_filepaths(filepaths) + def remove_item_by_id(self, item_id): + self._model.remove_item_by_id(item_id) self._fill_selection() def _on_remove_clicked(self): index = self.currentIndex() - filepath = index.data(FILEPATH_ROLE) - self.remove_filepaths(filepath) + item_id = index.data(ITEM_ID_ROLE) + self.remove_item_by_id(item_id) def _fill_selection(self): index = self.currentIndex() @@ -257,8 +407,8 @@ class LoadedFilesView(QtWidgets.QTreeView): def get_current_report(self): index = self.currentIndex() - filepath = index.data(FILEPATH_ROLE) - return self._model.get_report_by_filepath(filepath) + item_id = index.data(ITEM_ID_ROLE) + return self._model.get_report_by_id(item_id) class LoadedFilesWidget(QtWidgets.QWidget): diff --git a/openpype/tools/publisher/widgets/__init__.py b/openpype/tools/publisher/widgets/__init__.py index 55afc349ff..042985b007 100644 --- a/openpype/tools/publisher/widgets/__init__.py +++ b/openpype/tools/publisher/widgets/__init__.py @@ -3,35 +3,21 @@ from .icons import ( get_pixmap, get_icon ) -from .border_label_widget import ( - BorderedLabelWidget -) from .widgets import ( - SubsetAttributesWidget, - StopBtn, ResetBtn, ValidateBtn, PublishBtn, - - CreateInstanceBtn, - RemoveInstanceBtn, - ChangeViewBtn + CreateNextPageOverlay, ) -from .publish_widget import ( - PublishFrame -) -from .create_dialog import ( - CreateDialog -) - -from .card_view_widgets import ( - InstanceCardView -) - -from .list_view_widgets import ( - InstanceListView +from .help_widget import ( + HelpButton, + HelpDialog, ) +from .publish_frame import PublishFrame +from .tabs_widget import PublisherTabsWidget +from .overview_widget import OverviewWidget +from .validations_widget import ValidationsWidget __all__ = ( @@ -39,22 +25,18 @@ __all__ = ( "get_pixmap", "get_icon", - "SubsetAttributesWidget", - "BorderedLabelWidget", - "StopBtn", "ResetBtn", "ValidateBtn", "PublishBtn", + "CreateNextPageOverlay", - "CreateInstanceBtn", - "RemoveInstanceBtn", - "ChangeViewBtn", + "HelpButton", + "HelpDialog", "PublishFrame", - "CreateDialog", - - "InstanceCardView", - "InstanceListView", + "PublisherTabsWidget", + "OverviewWidget", + "ValidationsWidget", ) diff --git a/openpype/tools/publisher/widgets/assets_widget.py b/openpype/tools/publisher/widgets/assets_widget.py index 984da59c77..3c559af259 100644 --- a/openpype/tools/publisher/widgets/assets_widget.py +++ b/openpype/tools/publisher/widgets/assets_widget.py @@ -1,6 +1,7 @@ import collections -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui + from openpype.tools.utils import ( PlaceholderLineEdit, RecursiveSortFilterProxyModel, @@ -13,20 +14,41 @@ from openpype.tools.utils.assets_widget import ( ) -class CreateDialogAssetsWidget(SingleSelectAssetsWidget): +class CreateWidgetAssetsWidget(SingleSelectAssetsWidget): current_context_required = QtCore.Signal() + header_height_changed = QtCore.Signal(int) def __init__(self, controller, parent): self._controller = controller - super(CreateDialogAssetsWidget, self).__init__(None, parent) + super(CreateWidgetAssetsWidget, self).__init__(None, parent) self.set_refresh_btn_visibility(False) self.set_current_asset_btn_visibility(False) - self._current_asset_name = None self._last_selection = None self._enabled = None + self._last_filter_height = None + + def _check_header_height(self): + """Catch header height changes. + + Label on top of creaters should have same height so Creators view has + same offset. + """ + height = self.header_widget.height() + if height != self._last_filter_height: + self._last_filter_height = height + self.header_height_changed.emit(height) + + def resizeEvent(self, event): + super(CreateWidgetAssetsWidget, self).resizeEvent(event) + self._check_header_height() + + def showEvent(self, event): + super(CreateWidgetAssetsWidget, self).showEvent(event) + self._check_header_height() + def _on_current_asset_click(self): self.current_context_required.emit() @@ -41,19 +63,19 @@ class CreateDialogAssetsWidget(SingleSelectAssetsWidget): self.select_asset(self._last_selection) def _select_indexes(self, *args, **kwargs): - super(CreateDialogAssetsWidget, self)._select_indexes(*args, **kwargs) + super(CreateWidgetAssetsWidget, self)._select_indexes(*args, **kwargs) if self._enabled: return self._last_selection = self.get_selected_asset_id() self._clear_selection() - def set_current_asset_name(self, asset_name): - self._current_asset_name = asset_name + def update_current_asset(self): # Hide set current asset if there is no one - self.set_current_asset_btn_visibility(asset_name is not None) + asset_name = self._get_current_session_asset() + self.set_current_asset_btn_visibility(bool(asset_name)) def _get_current_session_asset(self): - return self._current_asset_name + return self._controller.current_asset_name def _create_source_model(self): return AssetsHierarchyModel(self._controller) @@ -64,13 +86,14 @@ class CreateDialogAssetsWidget(SingleSelectAssetsWidget): class AssetsHierarchyModel(QtGui.QStandardItemModel): - """Assets hiearrchy model. + """Assets hierarchy model. - For selecting asset for which should beinstance created. + For selecting asset for which an instance should be created. Uses controller to load asset hierarchy. All asset documents are stored by their parents. """ + def __init__(self, controller): super(AssetsHierarchyModel, self).__init__() self._controller = controller @@ -141,8 +164,19 @@ class AssetsHierarchyModel(QtGui.QStandardItemModel): return item_name in self._items_by_name +class AssetDialogView(QtWidgets.QTreeView): + double_clicked = QtCore.Signal(QtCore.QModelIndex) + + def mouseDoubleClickEvent(self, event): + index = self.indexAt(event.pos()) + if index.isValid(): + self.double_clicked.emit(index) + event.accept() + + class AssetsDialog(QtWidgets.QDialog): """Dialog to select asset for a context of instance.""" + def __init__(self, controller, parent): super(AssetsDialog, self).__init__(parent) self.setWindowTitle("Select asset") @@ -155,11 +189,11 @@ class AssetsDialog(QtWidgets.QDialog): filter_input = PlaceholderLineEdit(self) filter_input.setPlaceholderText("Filter assets..") - asset_view = QtWidgets.QTreeView(self) + asset_view = AssetDialogView(self) asset_view.setModel(proxy_model) asset_view.setHeaderHidden(True) asset_view.setFrameShape(QtWidgets.QFrame.NoFrame) - asset_view.setEditTriggers(QtWidgets.QTreeView.NoEditTriggers) + asset_view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) asset_view.setAlternatingRowColors(True) asset_view.setSelectionBehavior(QtWidgets.QTreeView.SelectRows) asset_view.setAllColumnsShowFocus(True) @@ -177,6 +211,7 @@ class AssetsDialog(QtWidgets.QDialog): layout.addWidget(asset_view, 1) layout.addLayout(btns_layout, 0) + asset_view.double_clicked.connect(self._on_ok_clicked) filter_input.textChanged.connect(self._on_filter_change) ok_btn.clicked.connect(self._on_ok_clicked) cancel_btn.clicked.connect(self._on_cancel_clicked) @@ -196,9 +231,26 @@ class AssetsDialog(QtWidgets.QDialog): # - adds ability to call reset on multiple places without repeating self._soft_reset_enabled = True + self._first_show = True + self._default_height = 500 + + def _on_first_show(self): + center = self.rect().center() + size = self.size() + size.setHeight(self._default_height) + + self.resize(size) + new_pos = self.mapToGlobal(center) + new_pos.setX(new_pos.x() - int(self.width() / 2)) + new_pos.setY(new_pos.y() - int(self.height() / 2)) + self.move(new_pos) + def showEvent(self, event): """Refresh asset model on show.""" super(AssetsDialog, self).showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() # Refresh on show self.reset(False) @@ -234,7 +286,7 @@ class AssetsDialog(QtWidgets.QDialog): index = self._asset_view.currentIndex() asset_name = None if index.isValid(): - asset_name = index.data(QtCore.Qt.DisplayRole) + asset_name = index.data(ASSET_NAME_ROLE) self._selected_asset = asset_name self.done(1) diff --git a/openpype/tools/publisher/widgets/border_label_widget.py b/openpype/tools/publisher/widgets/border_label_widget.py index 696a9050b8..5617e159cd 100644 --- a/openpype/tools/publisher/widgets/border_label_widget.py +++ b/openpype/tools/publisher/widgets/border_label_widget.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui from openpype.style import get_objected_colors @@ -29,8 +29,8 @@ class _VLineWidget(QtWidgets.QWidget): pos_x = self.width() painter = QtGui.QPainter(self) painter.setRenderHints( - painter.Antialiasing - | painter.SmoothPixmapTransform + QtGui.QPainter.Antialiasing + | QtGui.QPainter.SmoothPixmapTransform ) if self._color: pen = QtGui.QPen(self._color) @@ -73,8 +73,8 @@ class _HBottomLineWidget(QtWidgets.QWidget): ) painter = QtGui.QPainter(self) painter.setRenderHints( - painter.Antialiasing - | painter.SmoothPixmapTransform + QtGui.QPainter.Antialiasing + | QtGui.QPainter.SmoothPixmapTransform ) if self._color: pen = QtGui.QPen(self._color) @@ -131,8 +131,8 @@ class _HTopCornerLineWidget(QtWidgets.QWidget): painter = QtGui.QPainter(self) painter.setRenderHints( - painter.Antialiasing - | painter.SmoothPixmapTransform + QtGui.QPainter.Antialiasing + | QtGui.QPainter.SmoothPixmapTransform ) if self._color: pen = QtGui.QPen(self._color) @@ -158,8 +158,7 @@ class BorderedLabelWidget(QtWidgets.QFrame): """ def __init__(self, label, parent): super(BorderedLabelWidget, self).__init__(parent) - colors_data = get_objected_colors() - color_value = colors_data.get("border") + color_value = get_objected_colors("border") color = None if color_value: color = color_value.get_qcolor() diff --git a/openpype/tools/publisher/widgets/card_view_widgets.py b/openpype/tools/publisher/widgets/card_view_widgets.py index 086cd5c59c..47f8ebb914 100644 --- a/openpype/tools/publisher/widgets/card_view_widgets.py +++ b/openpype/tools/publisher/widgets/card_view_widgets.py @@ -23,11 +23,12 @@ Only one item can be selected at a time. import re import collections -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore from openpype.widgets.nice_checkbox import NiceCheckbox from openpype.tools.utils import BaseClickableFrame +from openpype.tools.utils.lib import html_escape from .widgets import ( AbstractInstanceView, ContextWarningLabel, @@ -36,18 +37,24 @@ from .widgets import ( ) from ..constants import ( CONTEXT_ID, - CONTEXT_LABEL + CONTEXT_LABEL, + CONTEXT_GROUP, + CONVERTOR_ITEM_GROUP, ) -class GroupWidget(QtWidgets.QWidget): - """Widget wrapping instances under group.""" - selected = QtCore.Signal(str, str) - active_changed = QtCore.Signal() +class SelectionTypes: + clear = "clear" + extend = "extend" + extend_to = "extend_to" + + +class BaseGroupWidget(QtWidgets.QWidget): + selected = QtCore.Signal(str, str, str) removed_selected = QtCore.Signal() - def __init__(self, group_name, group_icons, parent): - super(GroupWidget, self).__init__(parent) + def __init__(self, group_name, parent): + super(BaseGroupWidget, self).__init__(parent) label_widget = QtWidgets.QLabel(group_name, self) @@ -68,29 +75,144 @@ class GroupWidget(QtWidgets.QWidget): layout.addLayout(label_layout, 0) self._group = group_name - self._group_icons = group_icons self._widgets_by_id = {} + self._ordered_item_ids = [] self._label_widget = label_widget self._content_layout = layout - def get_widget_by_instance_id(self, instance_id): + @property + def group_name(self): + """Group which widget represent. + + Returns: + str: Name of group. + """ + + return self._group + + def get_widget_by_item_id(self, item_id): """Get instance widget by it's id.""" - return self._widgets_by_id.get(instance_id) + + return self._widgets_by_id.get(item_id) + + def get_selected_item_ids(self): + """Selected instance ids. + + Returns: + Set[str]: Instance ids that are selected. + """ + + return { + instance_id + for instance_id, widget in self._widgets_by_id.items() + if widget.is_selected + } + + def get_selected_widgets(self): + """Access to widgets marked as selected. + + Returns: + List[InstanceCardWidget]: Instance widgets that are selected. + """ + + return [ + widget + for instance_id, widget in self._widgets_by_id.items() + if widget.is_selected + ] + + def get_ordered_widgets(self): + """Get instance ids in order as are shown in ui. + + Returns: + List[str]: Instance ids. + """ + + return [ + self._widgets_by_id[instance_id] + for instance_id in self._ordered_item_ids + ] + + def _remove_all_except(self, item_ids): + item_ids = set(item_ids) + # Remove instance widgets that are not in passed instances + for item_id in tuple(self._widgets_by_id.keys()): + if item_id in item_ids: + continue + + widget = self._widgets_by_id.pop(item_id) + if widget.is_selected: + self.removed_selected.emit() + + widget.setVisible(False) + self._content_layout.removeWidget(widget) + widget.deleteLater() + + def _update_ordered_item_ids(self): + ordered_item_ids = [] + for idx in range(self._content_layout.count()): + if idx > 0: + item = self._content_layout.itemAt(idx) + widget = item.widget() + if widget is not None: + ordered_item_ids.append(widget.id) + + self._ordered_item_ids = ordered_item_ids + + def _on_widget_selection(self, instance_id, group_id, selection_type): + self.selected.emit(instance_id, group_id, selection_type) + + +class ConvertorItemsGroupWidget(BaseGroupWidget): + def update_items(self, items_by_id): + items_by_label = collections.defaultdict(list) + for item in items_by_id.values(): + items_by_label[item.label].append(item) + + # Remove instance widgets that are not in passed instances + self._remove_all_except(items_by_id.keys()) + + # Sort instances by subset name + sorted_labels = list(sorted(items_by_label.keys())) + + # Add new instances to widget + widget_idx = 1 + for label in sorted_labels: + for item in items_by_label[label]: + if item.id in self._widgets_by_id: + widget = self._widgets_by_id[item.id] + widget.update_item(item) + else: + widget = ConvertorItemCardWidget(item, self) + widget.selected.connect(self._on_widget_selection) + self._widgets_by_id[item.id] = widget + self._content_layout.insertWidget(widget_idx, widget) + widget_idx += 1 + + self._update_ordered_item_ids() + + +class InstanceGroupWidget(BaseGroupWidget): + """Widget wrapping instances under group.""" + + active_changed = QtCore.Signal() + + def __init__(self, group_icons, *args, **kwargs): + super(InstanceGroupWidget, self).__init__(*args, **kwargs) + + self._group_icons = group_icons + + def update_icons(self, group_icons): + self._group_icons = group_icons def update_instance_values(self): """Trigger update on instance widgets.""" + for widget in self._widgets_by_id.values(): widget.update_instance_values() - def confirm_remove_instance_id(self, instance_id): - """Delete widget by instance id.""" - widget = self._widgets_by_id.pop(instance_id) - widget.setVisible(False) - self._content_layout.removeWidget(widget) - widget.deleteLater() - def update_instances(self, instances): """Update instances for the group. @@ -98,6 +220,7 @@ class GroupWidget(QtWidgets.QWidget): instances(list): List of instances in CreateContext. """ + # Store instances by id and by subset name instances_by_id = {} instances_by_subset_name = collections.defaultdict(list) @@ -107,20 +230,11 @@ class GroupWidget(QtWidgets.QWidget): instances_by_subset_name[subset_name].append(instance) # Remove instance widgets that are not in passed instances - for instance_id in tuple(self._widgets_by_id.keys()): - if instance_id in instances_by_id: - continue - - widget = self._widgets_by_id.pop(instance_id) - if widget.is_selected: - self.removed_selected.emit() - - widget.setVisible(False) - self._content_layout.removeWidget(widget) - widget.deleteLater() + self._remove_all_except(instances_by_id.keys()) # Sort instances by subset name sorted_subset_names = list(sorted(instances_by_subset_name.keys())) + # Add new instances to widget widget_idx = 1 for subset_names in sorted_subset_names: @@ -133,16 +247,19 @@ class GroupWidget(QtWidgets.QWidget): widget = InstanceCardWidget( instance, group_icon, self ) - widget.selected.connect(self.selected) + widget.selected.connect(self._on_widget_selection) widget.active_changed.connect(self.active_changed) self._widgets_by_id[instance.id] = widget self._content_layout.insertWidget(widget_idx, widget) widget_idx += 1 + self._update_ordered_item_ids() + class CardWidget(BaseClickableFrame): """Clickable card used as bigger button.""" - selected = QtCore.Signal(str, str) + + selected = QtCore.Signal(str, str, str) # Group identifier of card # - this must be set because if send when mouse is released with card id _group_identifier = None @@ -154,6 +271,12 @@ class CardWidget(BaseClickableFrame): self._selected = False self._id = None + @property + def id(self): + """Id of card.""" + + return self._id + @property def is_selected(self): """Is card selected.""" @@ -170,7 +293,16 @@ class CardWidget(BaseClickableFrame): def _mouse_release_callback(self): """Trigger selected signal.""" - self.selected.emit(self._id, self._group_identifier) + + modifiers = QtWidgets.QApplication.keyboardModifiers() + selection_type = SelectionTypes.clear + if bool(modifiers & QtCore.Qt.ShiftModifier): + selection_type = SelectionTypes.extend_to + + elif bool(modifiers & QtCore.Qt.ControlModifier): + selection_type = SelectionTypes.extend + + self.selected.emit(self._id, self._group_identifier, selection_type) class ContextCardWidget(CardWidget): @@ -178,11 +310,12 @@ class ContextCardWidget(CardWidget): Is not visually under group widget and is always at the top of card view. """ + def __init__(self, parent): super(ContextCardWidget, self).__init__(parent) self._id = CONTEXT_ID - self._group_identifier = "" + self._group_identifier = CONTEXT_GROUP icon_widget = PublishPixmapLabel(None, self) icon_widget.setObjectName("FamilyIconLabel") @@ -202,15 +335,50 @@ class ContextCardWidget(CardWidget): self._label_widget = label_widget +class ConvertorItemCardWidget(CardWidget): + """Card for global context. + + Is not visually under group widget and is always at the top of card view. + """ + + def __init__(self, item, parent): + super(ConvertorItemCardWidget, self).__init__(parent) + + self._id = item.id + self.identifier = item.identifier + self._group_identifier = CONVERTOR_ITEM_GROUP + + icon_widget = IconValuePixmapLabel("fa.magic", self) + icon_widget.setObjectName("FamilyIconLabel") + + label_widget = QtWidgets.QLabel(item.label, self) + + icon_layout = QtWidgets.QHBoxLayout() + icon_layout.setContentsMargins(10, 5, 5, 5) + icon_layout.addWidget(icon_widget) + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 5, 10, 5) + layout.addLayout(icon_layout, 0) + layout.addWidget(label_widget, 1) + + self._icon_widget = icon_widget + self._label_widget = label_widget + + def update_instance_values(self): + pass + + class InstanceCardWidget(CardWidget): """Card widget representing instance.""" + active_changed = QtCore.Signal() def __init__(self, instance, group_icon, parent): super(InstanceCardWidget, self).__init__(parent) self._id = instance.id - self._group_identifier = instance.creator_label + self._group_identifier = instance.group_label self._group_icon = group_icon self.instance = instance @@ -303,13 +471,14 @@ class InstanceCardWidget(CardWidget): self._last_variant = variant self._last_subset_name = subset_name # Make `variant` bold - found_parts = set(re.findall(variant, subset_name, re.IGNORECASE)) + label = html_escape(self.instance.label) + found_parts = set(re.findall(variant, label, re.IGNORECASE)) if found_parts: for part in found_parts: replacement = "{}".format(part) - subset_name = subset_name.replace(part, replacement) + label = label.replace(part, replacement) - self._label_widget.setText(subset_name) + self._label_widget.setText(label) # HTML text will cause that label start catch mouse clicks # - disabling with changing interaction flag self._label_widget.setTextInteractionFlags( @@ -345,10 +514,11 @@ class InstanceCardView(AbstractInstanceView): Wrapper of all widgets in card view. """ + def __init__(self, controller, parent): super(InstanceCardView, self).__init__(parent) - self.controller = controller + self._controller = controller scroll_area = QtWidgets.QScrollArea(self) scroll_area.setWidgetResizable(True) @@ -375,11 +545,13 @@ class InstanceCardView(AbstractInstanceView): self._content_layout = content_layout self._content_widget = content_widget - self._widgets_by_group = {} self._context_widget = None + self._convertor_items_group = None + self._widgets_by_group = {} + self._ordered_groups = [] - self._selected_group = None - self._selected_instance_id = None + self._explicitly_selected_instance_ids = [] + self._explicitly_selected_groups = [] self.setSizePolicy( QtWidgets.QSizePolicy.Minimum, @@ -399,43 +571,49 @@ class InstanceCardView(AbstractInstanceView): result.setWidth(width) return result - def _get_selected_widget(self): - if self._selected_instance_id == CONTEXT_ID: - return self._context_widget + def _get_selected_widgets(self): + output = [] + if ( + self._context_widget is not None + and self._context_widget.is_selected + ): + output.append(self._context_widget) - group_widget = self._widgets_by_group.get( - self._selected_group - ) - if group_widget is not None: - widget = group_widget.get_widget_by_instance_id( - self._selected_instance_id - ) - if widget is not None: - return widget + if self._convertor_items_group is not None: + output.extend(self._convertor_items_group.get_selected_widgets()) - return None + for group_widget in self._widgets_by_group.values(): + for widget in group_widget.get_selected_widgets(): + output.append(widget) + return output + + def _get_selected_instance_ids(self): + output = [] + if ( + self._context_widget is not None + and self._context_widget.is_selected + ): + output.append(CONTEXT_ID) + + if self._convertor_items_group is not None: + output.extend(self._convertor_items_group.get_selected_item_ids()) + + for group_widget in self._widgets_by_group.values(): + output.extend(group_widget.get_selected_item_ids()) + return output def refresh(self): """Refresh instances in view based on CreatedContext.""" - # Create context item if is not already existing - # - this must be as first thing to do as context item should be at the - # top - if self._context_widget is None: - widget = ContextCardWidget(self._content_widget) - widget.selected.connect(self._on_widget_selection) - self._context_widget = widget + self._make_sure_context_widget_exists() - self.selection_changed.emit() - self._content_layout.insertWidget(0, widget) - - self.select_item(CONTEXT_ID, None) + self._update_convertor_items_group() # Prepare instances by group and identifiers by group instances_by_group = collections.defaultdict(list) identifiers_by_group = collections.defaultdict(set) - for instance in self.controller.instances: - group_name = instance.creator_label + for instance in self._controller.instances.values(): + group_name = instance.group_label instances_by_group[group_name].append(instance) identifiers_by_group[group_name].add( instance.creator_identifier @@ -446,35 +624,38 @@ class InstanceCardView(AbstractInstanceView): if group_name in instances_by_group: continue - if group_name == self._selected_group: - self._on_remove_selected() widget = self._widgets_by_group.pop(group_name) widget.setVisible(False) self._content_layout.removeWidget(widget) widget.deleteLater() + if group_name in self._explicitly_selected_groups: + self._explicitly_selected_groups.remove(group_name) + # Sort groups sorted_group_names = list(sorted(instances_by_group.keys())) + # Keep track of widget indexes # - we start with 1 because Context item as at the top widget_idx = 1 + if self._convertor_items_group is not None: + widget_idx += 1 + for group_name in sorted_group_names: + group_icons = { + idenfier: self._controller.get_creator_icon(idenfier) + for idenfier in identifiers_by_group[group_name] + } if group_name in self._widgets_by_group: group_widget = self._widgets_by_group[group_name] - else: - group_icons = { - idenfier: self.controller.get_icon_for_family(idenfier) - for idenfier in identifiers_by_group[group_name] - } + group_widget.update_icons(group_icons) - group_widget = GroupWidget( - group_name, group_icons, self._content_widget + else: + group_widget = InstanceGroupWidget( + group_icons, group_name, self._content_widget ) group_widget.active_changed.connect(self._on_active_changed) group_widget.selected.connect(self._on_widget_selection) - group_widget.removed_selected.connect( - self._on_remove_selected - ) self._content_layout.insertWidget(widget_idx, group_widget) self._widgets_by_group[group_name] = group_widget @@ -483,6 +664,63 @@ class InstanceCardView(AbstractInstanceView): instances_by_group[group_name] ) + self._update_ordered_group_names() + + def has_items(self): + if self._convertor_items_group is not None: + return True + if self._widgets_by_group: + return True + return False + + def _update_ordered_group_names(self): + ordered_group_names = [CONTEXT_GROUP] + for idx in range(self._content_layout.count()): + if idx > 0: + item = self._content_layout.itemAt(idx) + group_widget = item.widget() + if group_widget is not None: + ordered_group_names.append(group_widget.group_name) + + self._ordered_groups = ordered_group_names + + def _make_sure_context_widget_exists(self): + # Create context item if is not already existing + # - this must be as first thing to do as context item should be at the + # top + if self._context_widget is not None: + return + + widget = ContextCardWidget(self._content_widget) + widget.selected.connect(self._on_widget_selection) + + self._context_widget = widget + + self.selection_changed.emit() + self._content_layout.insertWidget(0, widget) + + def _update_convertor_items_group(self): + convertor_items = self._controller.convertor_items + if not convertor_items and self._convertor_items_group is None: + return + + if not convertor_items: + self._convertor_items_group.setVisible(False) + self._content_layout.removeWidget(self._convertor_items_group) + self._convertor_items_group.deleteLater() + self._convertor_items_group = None + return + + if self._convertor_items_group is None: + group_widget = ConvertorItemsGroupWidget( + CONVERTOR_ITEM_GROUP, self._content_widget + ) + group_widget.selected.connect(self._on_widget_selection) + self._content_layout.insertWidget(1, group_widget) + self._convertor_items_group = group_widget + + self._convertor_items_group.update_items(convertor_items) + def refresh_instance_states(self): """Trigger update of instances on group widgets.""" for widget in self._widgets_by_group.values(): @@ -491,10 +729,7 @@ class InstanceCardView(AbstractInstanceView): def _on_active_changed(self): self.active_changed.emit() - def _on_widget_selection(self, instance_id, group_name): - self.select_item(instance_id, group_name) - - def select_item(self, instance_id, group_name): + def _on_widget_selection(self, instance_id, group_name, selection_type): """Select specific item by instance id. Pass `CONTEXT_ID` as instance id and empty string as group to select @@ -502,38 +737,353 @@ class InstanceCardView(AbstractInstanceView): """ if instance_id == CONTEXT_ID: new_widget = self._context_widget + else: - group_widget = self._widgets_by_group[group_name] - new_widget = group_widget.get_widget_by_instance_id(instance_id) + if group_name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[group_name] + new_widget = group_widget.get_widget_by_item_id(instance_id) - selected_widget = self._get_selected_widget() - if new_widget is selected_widget: - return - - if selected_widget is not None: - selected_widget.set_selected(False) - - self._selected_instance_id = instance_id - self._selected_group = group_name - if new_widget is not None: - new_widget.set_selected(True) + if selection_type == SelectionTypes.clear: + self._select_item_clear(instance_id, group_name, new_widget) + elif selection_type == SelectionTypes.extend: + self._select_item_extend(instance_id, group_name, new_widget) + elif selection_type == SelectionTypes.extend_to: + self._select_item_extend_to(instance_id, group_name, new_widget) self.selection_changed.emit() - def _on_remove_selected(self): - selected_widget = self._get_selected_widget() - if selected_widget is None: - self._on_widget_selection(CONTEXT_ID, None) + def _select_item_clear(self, instance_id, group_name, new_widget): + """Select specific item by instance id and clear previous selection. + + Pass `CONTEXT_ID` as instance id and empty string as group to select + global context item. + """ + + selected_widgets = self._get_selected_widgets() + for widget in selected_widgets: + if widget.id != instance_id: + widget.set_selected(False) + + self._explicitly_selected_groups = [group_name] + self._explicitly_selected_instance_ids = [instance_id] + + if new_widget is not None: + new_widget.set_selected(True) + + def _select_item_extend(self, instance_id, group_name, new_widget): + """Add/Remove single item to/from current selection. + + If item is already selected the selection is removed. + """ + + self._explicitly_selected_instance_ids = ( + self._get_selected_instance_ids() + ) + if new_widget.is_selected: + self._explicitly_selected_instance_ids.remove(instance_id) + new_widget.set_selected(False) + remove_group = False + if instance_id == CONTEXT_ID: + remove_group = True + else: + if group_name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[group_name] + if not group_widget.get_selected_widgets(): + remove_group = True + + if remove_group: + self._explicitly_selected_groups.remove(group_name) + return + + self._explicitly_selected_instance_ids.append(instance_id) + if group_name in self._explicitly_selected_groups: + self._explicitly_selected_groups.remove(group_name) + self._explicitly_selected_groups.append(group_name) + new_widget.set_selected(True) + + def _select_item_extend_to(self, instance_id, group_name, new_widget): + """Extend selected items to specific instance id. + + This method is handling Shift+click selection of widgets. Selection + is not stored to explicit selection items. That's because user can + shift select again and it should use last explicit selected item as + source item for selection. + + Items selected via this function can get to explicit selection only if + selection is extended by one specific item ('_select_item_extend'). + From that moment the selection is locked to new last explicit selected + item. + + It's required to traverse through group widgets in their UI order and + through their instances in UI order. All explicitly selected items + must not change their selection state during this function. Passed + instance id can be above or under last selected item so a start item + and end item must be found to be able know which direction is selection + happening. + """ + + # Start group name (in '_ordered_groups') + start_group = None + # End group name (in '_ordered_groups') + end_group = None + # Instance id of first selected item + start_instance_id = None + # Instance id of last selected item + end_instance_id = None + + # Get previously selected group by explicit selected groups + previous_group = None + if self._explicitly_selected_groups: + previous_group = self._explicitly_selected_groups[-1] + + # Find last explicitly selected instance id + previous_last_selected_id = None + if self._explicitly_selected_instance_ids: + previous_last_selected_id = ( + self._explicitly_selected_instance_ids[-1] + ) + + # If last instance id was not found or available then last selected + # group is also invalid. + # NOTE: This probably never happen? + if previous_last_selected_id is None: + previous_group = None + + # Check if previously selected group is available and find out if + # new instance group is above or under previous selection + # - based on these information are start/end group/instance filled + if previous_group in self._ordered_groups: + new_idx = self._ordered_groups.index(group_name) + prev_idx = self._ordered_groups.index(previous_group) + if new_idx < prev_idx: + start_group = group_name + end_group = previous_group + start_instance_id = instance_id + end_instance_id = previous_last_selected_id + else: + start_group = previous_group + end_group = group_name + start_instance_id = previous_last_selected_id + end_instance_id = instance_id + + # If start group is not set then use context item group name + if start_group is None: + start_group = CONTEXT_GROUP + + # If start instance id is not filled then use context id (similar to + # group) + if start_instance_id is None: + start_instance_id = CONTEXT_ID + + # If end group is not defined then use passed group name + # - this can be happen when previous group was not selected + # - when this happens the selection will probably happen from context + # item to item selected by user + if end_group is None: + end_group = group_name + + # If end instance is not filled then use instance selected by user + if end_instance_id is None: + end_instance_id = instance_id + + # Start and end group are the same + # - a different logic is needed in that case + same_group = start_group == end_group + + # Process known information and change selection of items + passed_start_group = False + passed_end_group = False + # Go through ordered groups (from top to bottom) and change selection + for name in self._ordered_groups: + # Prepare sorted instance widgets + if name == CONTEXT_GROUP: + sorted_widgets = [self._context_widget] + else: + if name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[name] + sorted_widgets = group_widget.get_ordered_widgets() + + # Change selection based on explicit selection if start group + # was not passed yet + if not passed_start_group: + if name != start_group: + for widget in sorted_widgets: + widget.set_selected( + widget.id in self._explicitly_selected_instance_ids + ) + continue + + # Change selection based on explicit selection if end group + # already passed + if passed_end_group: + for widget in sorted_widgets: + widget.set_selected( + widget.id in self._explicitly_selected_instance_ids + ) + continue + + # Start group is already passed and end group was not yet hit + if same_group: + passed_start_group = True + passed_end_group = True + passed_start_instance = False + passed_end_instance = False + for widget in sorted_widgets: + if not passed_start_instance: + if widget.id in (start_instance_id, end_instance_id): + if widget.id != start_instance_id: + # Swap start/end instance if start instance is + # after end + # - fix 'passed_end_instance' check + start_instance_id, end_instance_id = ( + end_instance_id, start_instance_id + ) + passed_start_instance = True + + # Find out if widget should be selected + select = False + if passed_end_instance: + select = False + + elif passed_start_instance: + select = True + + # Check if instance is in explicitly selected items if + # should ont be selected + if ( + not select + and widget.id in self._explicitly_selected_instance_ids + ): + select = True + + widget.set_selected(select) + + if ( + not passed_end_instance + and widget.id == end_instance_id + ): + passed_end_instance = True + + elif name == start_group: + # First group from which selection should start + # - look for start instance first from which the selection + # should happen + passed_start_group = True + passed_start_instance = False + for widget in sorted_widgets: + if widget.id == start_instance_id: + passed_start_instance = True + + select = False + # Check if passed start instance or instance is + # in explicitly selected items to be selected + if ( + passed_start_instance + or widget.id in self._explicitly_selected_instance_ids + ): + select = True + widget.set_selected(select) + + elif name == end_group: + # Last group where selection should happen + # - look for end instance first after which the selection + # should stop + passed_end_group = True + passed_end_instance = False + for widget in sorted_widgets: + select = False + # Check if not yet passed end instance or if instance is + # in explicitly selected items to be selected + if ( + not passed_end_instance + or widget.id in self._explicitly_selected_instance_ids + ): + select = True + + widget.set_selected(select) + + if widget.id == end_instance_id: + passed_end_instance = True + + else: + # Just select everything between start and end group + for widget in sorted_widgets: + widget.set_selected(True) def get_selected_items(self): """Get selected instance ids and context.""" + + convertor_identifiers = [] instances = [] + selected_widgets = self._get_selected_widgets() + context_selected = False - selected_widget = self._get_selected_widget() - if selected_widget is self._context_widget: - context_selected = True + for widget in selected_widgets: + if widget is self._context_widget: + context_selected = True - elif selected_widget is not None: - instances.append(selected_widget.instance) + elif isinstance(widget, InstanceCardWidget): + instances.append(widget.id) - return instances, context_selected + elif isinstance(widget, ConvertorItemCardWidget): + convertor_identifiers.append(widget.identifier) + + return instances, context_selected, convertor_identifiers + + def set_selected_items( + self, instance_ids, context_selected, convertor_identifiers + ): + s_instance_ids = set(instance_ids) + s_convertor_identifiers = set(convertor_identifiers) + cur_ids, cur_context, cur_convertor_identifiers = ( + self.get_selected_items() + ) + if ( + set(cur_ids) == s_instance_ids + and cur_context == context_selected + and set(cur_convertor_identifiers) == s_convertor_identifiers + ): + return + + selected_groups = [] + selected_instances = [] + if context_selected: + selected_groups.append(CONTEXT_GROUP) + selected_instances.append(CONTEXT_ID) + + self._context_widget.set_selected(context_selected) + + for group_name in self._ordered_groups: + if group_name == CONTEXT_GROUP: + continue + + is_convertor_group = group_name == CONVERTOR_ITEM_GROUP + if is_convertor_group: + group_widget = self._convertor_items_group + else: + group_widget = self._widgets_by_group[group_name] + + group_selected = False + for widget in group_widget.get_ordered_widgets(): + select = False + if is_convertor_group: + is_in = widget.identifier in s_convertor_identifiers + else: + is_in = widget.id in s_instance_ids + if is_in: + selected_instances.append(widget.id) + group_selected = True + select = True + widget.set_selected(select) + + if group_selected: + selected_groups.append(group_name) + + self._explicitly_selected_groups = selected_groups + self._explicitly_selected_instance_ids = selected_instances diff --git a/openpype/tools/publisher/widgets/create_dialog.py b/openpype/tools/publisher/widgets/create_dialog.py deleted file mode 100644 index 27ce97955a..0000000000 --- a/openpype/tools/publisher/widgets/create_dialog.py +++ /dev/null @@ -1,854 +0,0 @@ -import sys -import re -import traceback -import copy - -try: - import commonmark -except Exception: - commonmark = None -from Qt import QtWidgets, QtCore, QtGui -from openpype.lib import TaskNotSetError -from openpype.pipeline.create import ( - CreatorError, - SUBSET_NAME_ALLOWED_SYMBOLS -) - -from openpype.tools.utils import ErrorMessageBox - -from .widgets import IconValuePixmapLabel -from .assets_widget import CreateDialogAssetsWidget -from .tasks_widget import CreateDialogTasksWidget -from .precreate_widget import PreCreateWidget -from ..constants import ( - VARIANT_TOOLTIP, - CREATOR_IDENTIFIER_ROLE, - FAMILY_ROLE -) - -SEPARATORS = ("---separator---", "---") - - -class CreateErrorMessageBox(ErrorMessageBox): - def __init__( - self, - creator_label, - subset_name, - asset_name, - exc_msg, - formatted_traceback, - parent - ): - self._creator_label = creator_label - self._subset_name = subset_name - self._asset_name = asset_name - self._exc_msg = exc_msg - self._formatted_traceback = formatted_traceback - super(CreateErrorMessageBox, self).__init__("Creation failed", parent) - - def _create_top_widget(self, parent_widget): - label_widget = QtWidgets.QLabel(parent_widget) - label_widget.setText( - "Failed to create" - ) - return label_widget - - def _get_report_data(self): - report_message = ( - "{creator}: Failed to create Subset: \"{subset}\"" - " in Asset: \"{asset}\"" - "\n\nError: {message}" - ).format( - creator=self._creator_label, - subset=self._subset_name, - asset=self._asset_name, - message=self._exc_msg, - ) - if self._formatted_traceback: - report_message += "\n\n{}".format(self._formatted_traceback) - return [report_message] - - def _create_content(self, content_layout): - item_name_template = ( - "Creator: {}
" - "Subset: {}
" - "Asset: {}
" - ) - exc_msg_template = "{}" - - line = self._create_line() - content_layout.addWidget(line) - - item_name_widget = QtWidgets.QLabel(self) - item_name_widget.setText( - item_name_template.format( - self._creator_label, self._subset_name, self._asset_name - ) - ) - content_layout.addWidget(item_name_widget) - - message_label_widget = QtWidgets.QLabel(self) - message_label_widget.setText( - exc_msg_template.format(self.convert_text_for_html(self._exc_msg)) - ) - content_layout.addWidget(message_label_widget) - - if self._formatted_traceback: - line_widget = self._create_line() - tb_widget = self._create_traceback_widget( - self._formatted_traceback - ) - content_layout.addWidget(line_widget) - content_layout.addWidget(tb_widget) - - -# TODO add creator identifier/label to details -class CreatorShortDescWidget(QtWidgets.QWidget): - def __init__(self, parent=None): - super(CreatorShortDescWidget, self).__init__(parent=parent) - - # --- Short description widget --- - icon_widget = IconValuePixmapLabel(None, self) - icon_widget.setObjectName("FamilyIconLabel") - - # --- Short description inputs --- - short_desc_input_widget = QtWidgets.QWidget(self) - - family_label = QtWidgets.QLabel(short_desc_input_widget) - family_label.setAlignment( - QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft - ) - - description_label = QtWidgets.QLabel(short_desc_input_widget) - description_label.setAlignment( - QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft - ) - - short_desc_input_layout = QtWidgets.QVBoxLayout( - short_desc_input_widget - ) - short_desc_input_layout.setSpacing(0) - short_desc_input_layout.addWidget(family_label) - short_desc_input_layout.addWidget(description_label) - # -------------------------------- - - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - layout.addWidget(icon_widget, 0) - layout.addWidget(short_desc_input_widget, 1) - # -------------------------------- - - self._icon_widget = icon_widget - self._family_label = family_label - self._description_label = description_label - - def set_plugin(self, plugin=None): - if not plugin: - self._icon_widget.set_icon_def(None) - self._family_label.setText("") - self._description_label.setText("") - return - - plugin_icon = plugin.get_icon() - description = plugin.get_description() or "" - - self._icon_widget.set_icon_def(plugin_icon) - self._family_label.setText("{}".format(plugin.family)) - self._family_label.setTextInteractionFlags(QtCore.Qt.NoTextInteraction) - self._description_label.setText(description) - - -class HelpButton(QtWidgets.QPushButton): - resized = QtCore.Signal() - - def __init__(self, *args, **kwargs): - super(HelpButton, self).__init__(*args, **kwargs) - self.setObjectName("CreateDialogHelpButton") - - self._expanded = None - self.set_expanded() - - def set_expanded(self, expanded=None): - if self._expanded is expanded: - if expanded is not None: - return - expanded = False - self._expanded = expanded - if expanded: - text = "<" - else: - text = "?" - self.setText(text) - - self._update_size() - - def _update_size(self): - new_size = self.minimumSizeHint() - if self.size() != new_size: - self.resize(new_size) - self.resized.emit() - - def showEvent(self, event): - super(HelpButton, self).showEvent(event) - self._update_size() - - def resizeEvent(self, event): - super(HelpButton, self).resizeEvent(event) - self._update_size() - - -class CreateDialog(QtWidgets.QDialog): - default_size = (900, 500) - - def __init__( - self, controller, asset_name=None, task_name=None, parent=None - ): - super(CreateDialog, self).__init__(parent) - - self.setWindowTitle("Create new instance") - - self.controller = controller - - if asset_name is None: - asset_name = self.dbcon.Session.get("AVALON_ASSET") - - if task_name is None: - task_name = self.dbcon.Session.get("AVALON_TASK") - - self._asset_name = asset_name - self._task_name = task_name - - self._last_pos = None - self._asset_doc = None - self._subset_names = None - self._selected_creator = None - - self._prereq_available = False - - self._message_dialog = None - - name_pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) - self._name_pattern = name_pattern - self._compiled_name_pattern = re.compile(name_pattern) - - context_widget = QtWidgets.QWidget(self) - - assets_widget = CreateDialogAssetsWidget(controller, context_widget) - tasks_widget = CreateDialogTasksWidget(controller, context_widget) - - context_layout = QtWidgets.QVBoxLayout(context_widget) - context_layout.setContentsMargins(0, 0, 0, 0) - context_layout.setSpacing(0) - context_layout.addWidget(assets_widget, 2) - context_layout.addWidget(tasks_widget, 1) - - # --- Creators view --- - creators_view = QtWidgets.QListView(self) - creators_model = QtGui.QStandardItemModel() - creators_view.setModel(creators_model) - - variant_input = QtWidgets.QLineEdit(self) - variant_input.setObjectName("VariantInput") - variant_input.setToolTip(VARIANT_TOOLTIP) - - variant_hints_btn = QtWidgets.QPushButton(self) - variant_hints_btn.setFixedWidth(18) - - variant_hints_menu = QtWidgets.QMenu(variant_hints_btn) - variant_hints_group = QtWidgets.QActionGroup(variant_hints_menu) - variant_hints_btn.setMenu(variant_hints_menu) - - variant_layout = QtWidgets.QHBoxLayout() - variant_layout.setContentsMargins(0, 0, 0, 0) - variant_layout.setSpacing(0) - variant_layout.addWidget(variant_input, 1) - variant_layout.addWidget(variant_hints_btn, 0) - - subset_name_input = QtWidgets.QLineEdit(self) - subset_name_input.setEnabled(False) - - create_btn = QtWidgets.QPushButton("Create", self) - create_btn.setEnabled(False) - - form_layout = QtWidgets.QFormLayout() - form_layout.addRow("Name:", variant_layout) - form_layout.addRow("Subset:", subset_name_input) - - mid_widget = QtWidgets.QWidget(self) - mid_layout = QtWidgets.QVBoxLayout(mid_widget) - mid_layout.setContentsMargins(0, 0, 0, 0) - mid_layout.addWidget(QtWidgets.QLabel("Choose family:", self)) - mid_layout.addWidget(creators_view, 1) - mid_layout.addLayout(form_layout, 0) - mid_layout.addWidget(create_btn, 0) - # ------------ - - # --- Creator short info and attr defs --- - creator_attrs_widget = QtWidgets.QWidget(self) - - creator_short_desc_widget = CreatorShortDescWidget( - creator_attrs_widget - ) - - separator_widget = QtWidgets.QWidget(self) - separator_widget.setObjectName("Separator") - separator_widget.setMinimumHeight(2) - separator_widget.setMaximumHeight(2) - - # Precreate attributes widget - pre_create_widget = PreCreateWidget(creator_attrs_widget) - - creator_attrs_layout = QtWidgets.QVBoxLayout(creator_attrs_widget) - creator_attrs_layout.setContentsMargins(0, 0, 0, 0) - creator_attrs_layout.addWidget(creator_short_desc_widget, 0) - creator_attrs_layout.addWidget(separator_widget, 0) - creator_attrs_layout.addWidget(pre_create_widget, 1) - # ------------------------------------- - - # --- Detailed information about creator --- - # Detailed description of creator - detail_description_widget = QtWidgets.QTextEdit(self) - detail_description_widget.setObjectName("InfoText") - detail_description_widget.setTextInteractionFlags( - QtCore.Qt.TextBrowserInteraction - ) - detail_description_widget.setVisible(False) - # ------------------------------------------- - - splitter_widget = QtWidgets.QSplitter(self) - splitter_widget.addWidget(context_widget) - splitter_widget.addWidget(mid_widget) - splitter_widget.addWidget(creator_attrs_widget) - splitter_widget.addWidget(detail_description_widget) - splitter_widget.setStretchFactor(0, 1) - splitter_widget.setStretchFactor(1, 1) - splitter_widget.setStretchFactor(2, 1) - splitter_widget.setStretchFactor(3, 1) - - layout = QtWidgets.QHBoxLayout(self) - layout.addWidget(splitter_widget, 1) - - # Floating help button - help_btn = HelpButton(self) - - prereq_timer = QtCore.QTimer() - prereq_timer.setInterval(50) - prereq_timer.setSingleShot(True) - - prereq_timer.timeout.connect(self._on_prereq_timer) - - help_btn.clicked.connect(self._on_help_btn) - help_btn.resized.connect(self._on_help_btn_resize) - - create_btn.clicked.connect(self._on_create) - variant_input.returnPressed.connect(self._on_create) - variant_input.textChanged.connect(self._on_variant_change) - creators_view.selectionModel().currentChanged.connect( - self._on_creator_item_change - ) - variant_hints_menu.triggered.connect(self._on_variant_action) - assets_widget.selection_changed.connect(self._on_asset_change) - assets_widget.current_context_required.connect( - self._on_current_session_context_request - ) - tasks_widget.task_changed.connect(self._on_task_change) - - controller.add_plugins_refresh_callback(self._on_plugins_refresh) - - self._splitter_widget = splitter_widget - - self._context_widget = context_widget - self._assets_widget = assets_widget - self._tasks_widget = tasks_widget - - self.subset_name_input = subset_name_input - - self.variant_input = variant_input - self.variant_hints_btn = variant_hints_btn - self.variant_hints_menu = variant_hints_menu - self.variant_hints_group = variant_hints_group - - self.creators_model = creators_model - self.creators_view = creators_view - self.create_btn = create_btn - - self._creator_short_desc_widget = creator_short_desc_widget - self._pre_create_widget = pre_create_widget - self._detail_description_widget = detail_description_widget - self._help_btn = help_btn - - self._prereq_timer = prereq_timer - self._first_show = True - - def _context_change_is_enabled(self): - return self._context_widget.isEnabled() - - def _get_asset_name(self): - asset_name = None - if self._context_change_is_enabled(): - asset_name = self._assets_widget.get_selected_asset_name() - - if asset_name is None: - asset_name = self._asset_name - return asset_name - - def _get_task_name(self): - task_name = None - if self._context_change_is_enabled(): - # Don't use selection of task if asset is not set - asset_name = self._assets_widget.get_selected_asset_name() - if asset_name: - task_name = self._tasks_widget.get_selected_task_name() - - if not task_name: - task_name = self._task_name - return task_name - - @property - def dbcon(self): - return self.controller.dbcon - - def _set_context_enabled(self, enabled): - self._assets_widget.set_enabled(enabled) - self._tasks_widget.set_enabled(enabled) - self._context_widget.setEnabled(enabled) - - def refresh(self): - # Get context before refresh to keep selection of asset and - # task widgets - asset_name = self._get_asset_name() - task_name = self._get_task_name() - - self._prereq_available = False - - # Disable context widget so refresh of asset will use context asset - # name - self._set_context_enabled(False) - - self._assets_widget.refresh() - - # Refresh data before update of creators - self._refresh_asset() - # Then refresh creators which may trigger callbacks using refreshed - # data - self._refresh_creators() - - self._assets_widget.set_current_asset_name(self._asset_name) - self._assets_widget.select_asset_by_name(asset_name) - self._tasks_widget.set_asset_name(asset_name) - self._tasks_widget.select_task_name(task_name) - - self._invalidate_prereq() - - def _invalidate_prereq(self): - self._prereq_timer.start() - - def _on_prereq_timer(self): - prereq_available = True - if self.creators_model.rowCount() < 1: - prereq_available = False - - if self._asset_doc is None: - # QUESTION how to handle invalid asset? - prereq_available = False - - if prereq_available != self._prereq_available: - self._prereq_available = prereq_available - - self.create_btn.setEnabled(prereq_available) - self.creators_view.setEnabled(prereq_available) - self.variant_input.setEnabled(prereq_available) - self.variant_hints_btn.setEnabled(prereq_available) - self._on_variant_change() - - def _refresh_asset(self): - asset_name = self._get_asset_name() - - # Skip if asset did not change - if self._asset_doc and self._asset_doc["name"] == asset_name: - return - - # Make sure `_asset_doc` and `_subset_names` variables are reset - self._asset_doc = None - self._subset_names = None - if asset_name is None: - return - - asset_doc = self.dbcon.find_one({ - "type": "asset", - "name": asset_name - }) - self._asset_doc = asset_doc - - if asset_doc: - subset_docs = self.dbcon.find( - { - "type": "subset", - "parent": asset_doc["_id"] - }, - {"name": 1} - ) - self._subset_names = set(subset_docs.distinct("name")) - - if not asset_doc: - self.subset_name_input.setText("< Asset is not set >") - - def _refresh_creators(self): - # Refresh creators and add their families to list - existing_items = {} - old_creators = set() - for row in range(self.creators_model.rowCount()): - item = self.creators_model.item(row, 0) - identifier = item.data(CREATOR_IDENTIFIER_ROLE) - existing_items[identifier] = item - old_creators.add(identifier) - - # Add new families - new_creators = set() - for identifier, creator in self.controller.manual_creators.items(): - # TODO add details about creator - new_creators.add(identifier) - if identifier in existing_items: - item = existing_items[identifier] - else: - item = QtGui.QStandardItem() - item.setFlags( - QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable - ) - self.creators_model.appendRow(item) - - label = creator.label or identifier - item.setData(label, QtCore.Qt.DisplayRole) - item.setData(identifier, CREATOR_IDENTIFIER_ROLE) - item.setData(creator.family, FAMILY_ROLE) - - # Remove families that are no more available - for identifier in (old_creators - new_creators): - item = existing_items[identifier] - self.creators_model.takeRow(item.row()) - - if self.creators_model.rowCount() < 1: - return - - # Make sure there is a selection - indexes = self.creators_view.selectedIndexes() - if not indexes: - index = self.creators_model.index(0, 0) - self.creators_view.setCurrentIndex(index) - else: - index = indexes[0] - - identifier = index.data(CREATOR_IDENTIFIER_ROLE) - - self._set_creator(identifier) - - def _on_plugins_refresh(self): - # Trigger refresh only if is visible - if self.isVisible(): - self.refresh() - - def _on_asset_change(self): - self._refresh_asset() - - asset_name = self._assets_widget.get_selected_asset_name() - self._tasks_widget.set_asset_name(asset_name) - if self._context_change_is_enabled(): - self._invalidate_prereq() - - def _on_task_change(self): - if self._context_change_is_enabled(): - self._invalidate_prereq() - - def _on_current_session_context_request(self): - self._assets_widget.set_current_session_asset() - if self._task_name: - self._tasks_widget.select_task_name(self._task_name) - - def _on_creator_item_change(self, new_index, _old_index): - identifier = None - if new_index.isValid(): - identifier = new_index.data(CREATOR_IDENTIFIER_ROLE) - self._set_creator(identifier) - - def _update_help_btn(self): - pos_x = self.width() - self._help_btn.width() - point = self._creator_short_desc_widget.rect().topRight() - mapped_point = self._creator_short_desc_widget.mapTo(self, point) - pos_y = mapped_point.y() - self._help_btn.move(max(0, pos_x), max(0, pos_y)) - - def _on_help_btn_resize(self): - self._update_help_btn() - - def _on_help_btn(self): - final_size = self.size() - cur_sizes = self._splitter_widget.sizes() - spacing = self._splitter_widget.handleWidth() - - sizes = [] - for idx, value in enumerate(cur_sizes): - if idx < 3: - sizes.append(value) - - now_visible = self._detail_description_widget.isVisible() - if now_visible: - width = final_size.width() - ( - spacing + self._detail_description_widget.width() - ) - - else: - last_size = self._detail_description_widget.sizeHint().width() - width = final_size.width() + spacing + last_size - sizes.append(last_size) - - final_size.setWidth(width) - - self._detail_description_widget.setVisible(not now_visible) - self._splitter_widget.setSizes(sizes) - self.resize(final_size) - - self._help_btn.set_expanded(not now_visible) - - def _set_creator_detailed_text(self, creator): - if not creator: - self._detail_description_widget.setPlainText("") - return - detailed_description = creator.get_detail_description() or "" - if commonmark: - html = commonmark.commonmark(detailed_description) - self._detail_description_widget.setHtml(html) - else: - self._detail_description_widget.setMarkdown(detailed_description) - - def _set_creator(self, identifier): - creator = self.controller.manual_creators.get(identifier) - - self._creator_short_desc_widget.set_plugin(creator) - self._set_creator_detailed_text(creator) - self._pre_create_widget.set_plugin(creator) - - self._selected_creator = creator - - if not creator: - self._set_context_enabled(False) - return - - if ( - creator.create_allow_context_change - != self._context_change_is_enabled() - ): - self._set_context_enabled(creator.create_allow_context_change) - self._refresh_asset() - - default_variants = creator.get_default_variants() - if not default_variants: - default_variants = ["Main"] - - default_variant = creator.get_default_variant() - if not default_variant: - default_variant = default_variants[0] - - for action in tuple(self.variant_hints_menu.actions()): - self.variant_hints_menu.removeAction(action) - action.deleteLater() - - for variant in default_variants: - if variant in SEPARATORS: - self.variant_hints_menu.addSeparator() - elif variant: - self.variant_hints_menu.addAction(variant) - - self.variant_input.setText(default_variant or "Main") - - def _on_variant_action(self, action): - value = action.text() - if self.variant_input.text() != value: - self.variant_input.setText(value) - - def _on_variant_change(self, variant_value=None): - if not self._prereq_available: - return - - # This should probably never happen? - if not self._selected_creator: - if self.subset_name_input.text(): - self.subset_name_input.setText("") - return - - if variant_value is None: - variant_value = self.variant_input.text() - - self.create_btn.setEnabled(True) - if not self._compiled_name_pattern.match(variant_value): - self.create_btn.setEnabled(False) - self._set_variant_state_property("invalid") - self.subset_name_input.setText("< Invalid variant >") - return - - project_name = self.controller.project_name - task_name = self._get_task_name() - - asset_doc = copy.deepcopy(self._asset_doc) - # Calculate subset name with Creator plugin - try: - subset_name = self._selected_creator.get_subset_name( - variant_value, task_name, asset_doc, project_name - ) - except TaskNotSetError: - self.create_btn.setEnabled(False) - self._set_variant_state_property("invalid") - self.subset_name_input.setText("< Missing task >") - return - - self.subset_name_input.setText(subset_name) - - self._validate_subset_name(subset_name, variant_value) - - def _validate_subset_name(self, subset_name, variant_value): - # Get all subsets of the current asset - if self._subset_names: - existing_subset_names = set(self._subset_names) - else: - existing_subset_names = set() - existing_subset_names_low = set( - _name.lower() - for _name in existing_subset_names - ) - - # Replace - compare_regex = re.compile(re.sub( - variant_value, "(.+)", subset_name, flags=re.IGNORECASE - )) - variant_hints = set() - if variant_value: - for _name in existing_subset_names: - _result = compare_regex.search(_name) - if _result: - variant_hints |= set(_result.groups()) - - # Remove previous hints from menu - for action in tuple(self.variant_hints_group.actions()): - self.variant_hints_group.removeAction(action) - self.variant_hints_menu.removeAction(action) - action.deleteLater() - - # Add separator if there are hints and menu already has actions - if variant_hints and self.variant_hints_menu.actions(): - self.variant_hints_menu.addSeparator() - - # Add hints to actions - for variant_hint in variant_hints: - action = self.variant_hints_menu.addAction(variant_hint) - self.variant_hints_group.addAction(action) - - # Indicate subset existence - if not variant_value: - property_value = "empty" - - elif subset_name.lower() in existing_subset_names_low: - # validate existence of subset name with lowered text - # - "renderMain" vs. "rendermain" mean same path item for - # windows - property_value = "exists" - else: - property_value = "new" - - self._set_variant_state_property(property_value) - - variant_is_valid = variant_value.strip() != "" - if variant_is_valid != self.create_btn.isEnabled(): - self.create_btn.setEnabled(variant_is_valid) - - def _set_variant_state_property(self, state): - current_value = self.variant_input.property("state") - if current_value != state: - self.variant_input.setProperty("state", state) - self.variant_input.style().polish(self.variant_input) - - def moveEvent(self, event): - super(CreateDialog, self).moveEvent(event) - self._last_pos = self.pos() - - def showEvent(self, event): - super(CreateDialog, self).showEvent(event) - if self._first_show: - self._first_show = False - width, height = self.default_size - self.resize(width, height) - - third_size = int(width / 3) - self._splitter_widget.setSizes( - [third_size, third_size, width - (2 * third_size)] - ) - - if self._last_pos is not None: - self.move(self._last_pos) - - self._update_help_btn() - - self.refresh() - - def resizeEvent(self, event): - super(CreateDialog, self).resizeEvent(event) - self._update_help_btn() - - def _on_create(self): - indexes = self.creators_view.selectedIndexes() - if not indexes or len(indexes) > 1: - return - - if not self.create_btn.isEnabled(): - return - - index = indexes[0] - creator_label = index.data(QtCore.Qt.DisplayRole) - creator_identifier = index.data(CREATOR_IDENTIFIER_ROLE) - family = index.data(FAMILY_ROLE) - subset_name = self.subset_name_input.text() - variant = self.variant_input.text() - asset_name = self._get_asset_name() - task_name = self._get_task_name() - pre_create_data = self._pre_create_widget.current_value() - # Where to define these data? - # - what data show be stored? - instance_data = { - "asset": asset_name, - "task": task_name, - "variant": variant, - "family": family - } - - error_msg = None - formatted_traceback = None - try: - self.controller.create( - creator_identifier, - subset_name, - instance_data, - pre_create_data - ) - - except CreatorError as exc: - error_msg = str(exc) - - # Use bare except because some hosts raise their exceptions that - # do not inherit from python's `BaseException` - except: - exc_type, exc_value, exc_traceback = sys.exc_info() - formatted_traceback = "".join(traceback.format_exception( - exc_type, exc_value, exc_traceback - )) - error_msg = str(exc_value) - - if error_msg is not None: - box = CreateErrorMessageBox( - creator_label, - subset_name, - asset_name, - error_msg, - formatted_traceback, - parent=self - ) - box.show() - # Store dialog so is not garbage collected before is shown - self._message_dialog = box diff --git a/openpype/tools/publisher/widgets/create_widget.py b/openpype/tools/publisher/widgets/create_widget.py new file mode 100644 index 0000000000..ef9c5b98fe --- /dev/null +++ b/openpype/tools/publisher/widgets/create_widget.py @@ -0,0 +1,792 @@ +import re + +from qtpy import QtWidgets, QtCore, QtGui + +from openpype.pipeline.create import ( + SUBSET_NAME_ALLOWED_SYMBOLS, + PRE_CREATE_THUMBNAIL_KEY, + TaskNotSetError, +) + +from .thumbnail_widget import ThumbnailWidget +from .widgets import ( + IconValuePixmapLabel, + CreateBtn, +) +from .assets_widget import CreateWidgetAssetsWidget +from .tasks_widget import CreateWidgetTasksWidget +from .precreate_widget import PreCreateWidget +from ..constants import ( + VARIANT_TOOLTIP, + FAMILY_ROLE, + CREATOR_IDENTIFIER_ROLE, + CREATOR_THUMBNAIL_ENABLED_ROLE, + CREATOR_SORT_ROLE, +) + +SEPARATORS = ("---separator---", "---") + + +class ResizeControlWidget(QtWidgets.QWidget): + resized = QtCore.Signal() + + def resizeEvent(self, event): + super(ResizeControlWidget, self).resizeEvent(event) + self.resized.emit() + + +# TODO add creator identifier/label to details +class CreatorShortDescWidget(QtWidgets.QWidget): + def __init__(self, parent=None): + super(CreatorShortDescWidget, self).__init__(parent=parent) + + # --- Short description widget --- + icon_widget = IconValuePixmapLabel(None, self) + icon_widget.setObjectName("FamilyIconLabel") + + # --- Short description inputs --- + short_desc_input_widget = QtWidgets.QWidget(self) + + family_label = QtWidgets.QLabel(short_desc_input_widget) + family_label.setAlignment( + QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft + ) + + description_label = QtWidgets.QLabel(short_desc_input_widget) + description_label.setAlignment( + QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft + ) + + short_desc_input_layout = QtWidgets.QVBoxLayout( + short_desc_input_widget + ) + short_desc_input_layout.setSpacing(0) + short_desc_input_layout.addWidget(family_label) + short_desc_input_layout.addWidget(description_label) + # -------------------------------- + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(icon_widget, 0) + layout.addWidget(short_desc_input_widget, 1) + # -------------------------------- + + self._icon_widget = icon_widget + self._family_label = family_label + self._description_label = description_label + + def set_creator_item(self, creator_item=None): + if not creator_item: + self._icon_widget.set_icon_def(None) + self._family_label.setText("") + self._description_label.setText("") + return + + plugin_icon = creator_item.icon + description = creator_item.description or "" + + self._icon_widget.set_icon_def(plugin_icon) + self._family_label.setText("{}".format(creator_item.family)) + self._family_label.setTextInteractionFlags(QtCore.Qt.NoTextInteraction) + self._description_label.setText(description) + + +class CreatorsProxyModel(QtCore.QSortFilterProxyModel): + def lessThan(self, left, right): + l_show_order = left.data(CREATOR_SORT_ROLE) + r_show_order = right.data(CREATOR_SORT_ROLE) + if l_show_order == r_show_order: + return super(CreatorsProxyModel, self).lessThan(left, right) + return l_show_order < r_show_order + + +class CreateWidget(QtWidgets.QWidget): + def __init__(self, controller, parent=None): + super(CreateWidget, self).__init__(parent) + + self._controller = controller + + self._asset_name = None + self._subset_names = None + self._selected_creator = None + + self._prereq_available = False + + name_pattern = "^[{}]*$".format(SUBSET_NAME_ALLOWED_SYMBOLS) + self._name_pattern = name_pattern + self._compiled_name_pattern = re.compile(name_pattern) + + main_splitter_widget = QtWidgets.QSplitter(self) + + context_widget = QtWidgets.QWidget(main_splitter_widget) + + assets_widget = CreateWidgetAssetsWidget(controller, context_widget) + tasks_widget = CreateWidgetTasksWidget(controller, context_widget) + + context_layout = QtWidgets.QVBoxLayout(context_widget) + context_layout.setContentsMargins(0, 0, 0, 0) + context_layout.setSpacing(0) + context_layout.addWidget(assets_widget, 2) + context_layout.addWidget(tasks_widget, 1) + + # --- Creators view --- + creators_widget = QtWidgets.QWidget(main_splitter_widget) + + creator_short_desc_widget = CreatorShortDescWidget(creators_widget) + + attr_separator_widget = QtWidgets.QWidget(creators_widget) + attr_separator_widget.setObjectName("Separator") + attr_separator_widget.setMinimumHeight(1) + attr_separator_widget.setMaximumHeight(1) + + creators_splitter = QtWidgets.QSplitter(creators_widget) + + creators_view_widget = QtWidgets.QWidget(creators_splitter) + + creator_view_label = QtWidgets.QLabel( + "Choose publish type", creators_view_widget + ) + + creators_view = QtWidgets.QListView(creators_view_widget) + creators_model = QtGui.QStandardItemModel() + creators_sort_model = CreatorsProxyModel() + creators_sort_model.setSourceModel(creators_model) + creators_view.setModel(creators_sort_model) + + creators_view_layout = QtWidgets.QVBoxLayout(creators_view_widget) + creators_view_layout.setContentsMargins(0, 0, 0, 0) + creators_view_layout.addWidget(creator_view_label, 0) + creators_view_layout.addWidget(creators_view, 1) + + # --- Creator attr defs --- + creators_attrs_widget = QtWidgets.QWidget(creators_splitter) + + # Top part - variant / subset name + thumbnail + creators_attrs_top = QtWidgets.QWidget(creators_attrs_widget) + + # Basics - variant / subset name + creator_basics_widget = ResizeControlWidget(creators_attrs_top) + + variant_subset_label = QtWidgets.QLabel( + "Create options", creator_basics_widget + ) + + variant_subset_widget = QtWidgets.QWidget(creator_basics_widget) + # Variant and subset input + variant_widget = ResizeControlWidget(variant_subset_widget) + variant_widget.setObjectName("VariantInputsWidget") + + variant_input = QtWidgets.QLineEdit(variant_widget) + variant_input.setObjectName("VariantInput") + variant_input.setToolTip(VARIANT_TOOLTIP) + + variant_hints_btn = QtWidgets.QToolButton(variant_widget) + variant_hints_btn.setArrowType(QtCore.Qt.DownArrow) + variant_hints_btn.setIconSize(QtCore.QSize(12, 12)) + + variant_hints_menu = QtWidgets.QMenu(variant_widget) + variant_hints_group = QtWidgets.QActionGroup(variant_hints_menu) + + variant_layout = QtWidgets.QHBoxLayout(variant_widget) + variant_layout.setContentsMargins(0, 0, 0, 0) + variant_layout.setSpacing(0) + variant_layout.addWidget(variant_input, 1) + variant_layout.addWidget(variant_hints_btn, 0, QtCore.Qt.AlignVCenter) + + subset_name_input = QtWidgets.QLineEdit(variant_subset_widget) + subset_name_input.setEnabled(False) + + variant_subset_layout = QtWidgets.QFormLayout(variant_subset_widget) + variant_subset_layout.setContentsMargins(0, 0, 0, 0) + variant_subset_layout.addRow("Variant", variant_widget) + variant_subset_layout.addRow("Subset", subset_name_input) + + creator_basics_layout = QtWidgets.QVBoxLayout(creator_basics_widget) + creator_basics_layout.setContentsMargins(0, 0, 0, 0) + creator_basics_layout.addWidget(variant_subset_label, 0) + creator_basics_layout.addWidget(variant_subset_widget, 0) + + thumbnail_widget = ThumbnailWidget(controller, creators_attrs_top) + + creators_attrs_top_layout = QtWidgets.QHBoxLayout(creators_attrs_top) + creators_attrs_top_layout.setContentsMargins(0, 0, 0, 0) + creators_attrs_top_layout.addWidget(creator_basics_widget, 1) + creators_attrs_top_layout.addWidget(thumbnail_widget, 0) + + # Precreate attributes widget + pre_create_widget = PreCreateWidget(creators_attrs_widget) + + # Create button + create_btn_wrapper = QtWidgets.QWidget(creators_attrs_widget) + create_btn = CreateBtn(create_btn_wrapper) + create_btn.setEnabled(False) + + create_btn_wrap_layout = QtWidgets.QHBoxLayout(create_btn_wrapper) + create_btn_wrap_layout.setContentsMargins(0, 0, 0, 0) + create_btn_wrap_layout.addStretch(1) + create_btn_wrap_layout.addWidget(create_btn, 0) + + creators_attrs_layout = QtWidgets.QVBoxLayout(creators_attrs_widget) + creators_attrs_layout.setContentsMargins(0, 0, 0, 0) + creators_attrs_layout.addWidget(creators_attrs_top, 0) + creators_attrs_layout.addWidget(pre_create_widget, 1) + creators_attrs_layout.addWidget(create_btn_wrapper, 0) + + creators_splitter.addWidget(creators_view_widget) + creators_splitter.addWidget(creators_attrs_widget) + creators_splitter.setStretchFactor(0, 1) + creators_splitter.setStretchFactor(1, 2) + + creators_layout = QtWidgets.QVBoxLayout(creators_widget) + creators_layout.setContentsMargins(0, 0, 0, 0) + creators_layout.addWidget(creator_short_desc_widget, 0) + creators_layout.addWidget(attr_separator_widget, 0) + creators_layout.addWidget(creators_splitter, 1) + # ------------ + + # --- Detailed information about creator --- + # Detailed description of creator + # TODO this has no way how can be showed now + + # ------------------------------------------- + main_splitter_widget.addWidget(context_widget) + main_splitter_widget.addWidget(creators_widget) + main_splitter_widget.setStretchFactor(0, 1) + main_splitter_widget.setStretchFactor(1, 3) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(main_splitter_widget, 1) + + prereq_timer = QtCore.QTimer() + prereq_timer.setInterval(50) + prereq_timer.setSingleShot(True) + + prereq_timer.timeout.connect(self._invalidate_prereq) + + create_btn.clicked.connect(self._on_create) + variant_widget.resized.connect(self._on_variant_widget_resize) + creator_basics_widget.resized.connect(self._on_creator_basics_resize) + variant_input.returnPressed.connect(self._on_create) + variant_input.textChanged.connect(self._on_variant_change) + creators_view.selectionModel().currentChanged.connect( + self._on_creator_item_change + ) + variant_hints_btn.clicked.connect(self._on_variant_btn_click) + variant_hints_menu.triggered.connect(self._on_variant_action) + assets_widget.selection_changed.connect(self._on_asset_change) + assets_widget.current_context_required.connect( + self._on_current_session_context_request + ) + tasks_widget.task_changed.connect(self._on_task_change) + thumbnail_widget.thumbnail_created.connect(self._on_thumbnail_create) + thumbnail_widget.thumbnail_cleared.connect(self._on_thumbnail_clear) + + controller.event_system.add_callback( + "plugins.refresh.finished", self._on_plugins_refresh + ) + + self._main_splitter_widget = main_splitter_widget + + self._creators_splitter = creators_splitter + + self._context_widget = context_widget + self._assets_widget = assets_widget + self._tasks_widget = tasks_widget + + self.subset_name_input = subset_name_input + + self.variant_input = variant_input + self.variant_hints_btn = variant_hints_btn + self.variant_hints_menu = variant_hints_menu + self.variant_hints_group = variant_hints_group + + self._creators_model = creators_model + self._creators_sort_model = creators_sort_model + self._creators_view = creators_view + self._create_btn = create_btn + + self._creator_short_desc_widget = creator_short_desc_widget + self._creator_basics_widget = creator_basics_widget + self._thumbnail_widget = thumbnail_widget + self._pre_create_widget = pre_create_widget + self._attr_separator_widget = attr_separator_widget + + self._prereq_timer = prereq_timer + self._first_show = True + self._last_thumbnail_path = None + + @property + def current_asset_name(self): + return self._controller.current_asset_name + + @property + def current_task_name(self): + return self._controller.current_task_name + + def _context_change_is_enabled(self): + return self._context_widget.isEnabled() + + def _get_asset_name(self): + asset_name = None + if self._context_change_is_enabled(): + asset_name = self._assets_widget.get_selected_asset_name() + + if asset_name is None: + asset_name = self.current_asset_name + return asset_name or None + + def _get_task_name(self): + task_name = None + if self._context_change_is_enabled(): + # Don't use selection of task if asset is not set + asset_name = self._assets_widget.get_selected_asset_name() + if asset_name: + task_name = self._tasks_widget.get_selected_task_name() + + if not task_name: + task_name = self.current_task_name + return task_name + + def _set_context_enabled(self, enabled): + self._assets_widget.set_enabled(enabled) + self._tasks_widget.set_enabled(enabled) + check_prereq = self._context_widget.isEnabled() != enabled + self._context_widget.setEnabled(enabled) + if check_prereq: + self._invalidate_prereq() + + def refresh(self): + # Get context before refresh to keep selection of asset and + # task widgets + asset_name = self._get_asset_name() + task_name = self._get_task_name() + + self._prereq_available = False + + # Disable context widget so refresh of asset will use context asset + # name + self._set_context_enabled(False) + + self._assets_widget.refresh() + + # Refresh data before update of creators + self._refresh_asset() + # Then refresh creators which may trigger callbacks using refreshed + # data + self._refresh_creators() + + self._assets_widget.update_current_asset() + self._assets_widget.select_asset_by_name(asset_name) + self._tasks_widget.set_asset_name(asset_name) + self._tasks_widget.select_task_name(task_name) + + self._invalidate_prereq_deffered() + + def _invalidate_prereq_deffered(self): + self._prereq_timer.start() + + def _invalidate_prereq(self): + prereq_available = True + creator_btn_tooltips = [] + + available_creators = self._creators_model.rowCount() > 0 + if available_creators != self._creators_view.isEnabled(): + self._creators_view.setEnabled(available_creators) + + if not available_creators: + prereq_available = False + creator_btn_tooltips.append("Creator is not selected") + + if self._context_change_is_enabled() and self._asset_name is None: + # QUESTION how to handle invalid asset? + prereq_available = False + creator_btn_tooltips.append("Context is not selected") + + if prereq_available != self._prereq_available: + self._prereq_available = prereq_available + + self._create_btn.setEnabled(prereq_available) + + self.variant_input.setEnabled(prereq_available) + self.variant_hints_btn.setEnabled(prereq_available) + + tooltip = "" + if creator_btn_tooltips: + tooltip = "\n".join(creator_btn_tooltips) + self._create_btn.setToolTip(tooltip) + + self._on_variant_change() + + def _refresh_asset(self): + asset_name = self._get_asset_name() + + # Skip if asset did not change + if self._asset_name and self._asset_name == asset_name: + return + + # Make sure `_asset_name` and `_subset_names` variables are reset + self._asset_name = asset_name + self._subset_names = None + if asset_name is None: + return + + subset_names = self._controller.get_existing_subset_names(asset_name) + + self._subset_names = subset_names + if subset_names is None: + self.subset_name_input.setText("< Asset is not set >") + + def _refresh_creators(self): + # Refresh creators and add their families to list + existing_items = {} + old_creators = set() + for row in range(self._creators_model.rowCount()): + item = self._creators_model.item(row, 0) + identifier = item.data(CREATOR_IDENTIFIER_ROLE) + existing_items[identifier] = item + old_creators.add(identifier) + + # Add new families + new_creators = set() + creator_items_by_identifier = self._controller.creator_items + for identifier, creator_item in creator_items_by_identifier.items(): + if creator_item.creator_type != "artist": + continue + + # TODO add details about creator + new_creators.add(identifier) + if identifier in existing_items: + is_new = False + item = existing_items[identifier] + else: + is_new = True + item = QtGui.QStandardItem() + item.setFlags( + QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable + ) + + item.setData(creator_item.label, QtCore.Qt.DisplayRole) + item.setData(creator_item.show_order, CREATOR_SORT_ROLE) + item.setData(identifier, CREATOR_IDENTIFIER_ROLE) + item.setData( + creator_item.create_allow_thumbnail, + CREATOR_THUMBNAIL_ENABLED_ROLE + ) + item.setData(creator_item.family, FAMILY_ROLE) + if is_new: + self._creators_model.appendRow(item) + + # Remove families that are no more available + for identifier in (old_creators - new_creators): + item = existing_items[identifier] + self._creators_model.takeRow(item.row()) + + if self._creators_model.rowCount() < 1: + return + + self._creators_sort_model.sort(0) + # Make sure there is a selection + indexes = self._creators_view.selectedIndexes() + if not indexes: + index = self._creators_sort_model.index(0, 0) + self._creators_view.setCurrentIndex(index) + else: + index = indexes[0] + + identifier = index.data(CREATOR_IDENTIFIER_ROLE) + create_item = creator_items_by_identifier.get(identifier) + + self._set_creator(create_item) + + def _on_plugins_refresh(self): + # Trigger refresh only if is visible + self.refresh() + + def _on_asset_change(self): + self._refresh_asset() + + asset_name = self._assets_widget.get_selected_asset_name() + self._tasks_widget.set_asset_name(asset_name) + if self._context_change_is_enabled(): + self._invalidate_prereq_deffered() + + def _on_task_change(self): + if self._context_change_is_enabled(): + self._invalidate_prereq_deffered() + + def _on_thumbnail_create(self, thumbnail_path): + self._last_thumbnail_path = thumbnail_path + self._thumbnail_widget.set_current_thumbnails([thumbnail_path]) + + def _on_thumbnail_clear(self): + self._last_thumbnail_path = None + + def _on_current_session_context_request(self): + self._assets_widget.set_current_session_asset() + task_name = self.current_task_name + if task_name: + self._tasks_widget.select_task_name(task_name) + + def _on_creator_item_change(self, new_index, _old_index): + identifier = None + if new_index.isValid(): + identifier = new_index.data(CREATOR_IDENTIFIER_ROLE) + self._set_creator_by_identifier(identifier) + + def _set_creator_detailed_text(self, creator_item): + # TODO implement + description = "" + if creator_item is not None: + description = creator_item.detailed_description or description + self._controller.event_system.emit( + "show.detailed.help", + { + "message": description + }, + "create.widget" + ) + + def _set_creator_by_identifier(self, identifier): + creator_item = self._controller.creator_items.get(identifier) + self._set_creator(creator_item) + + def _set_creator(self, creator_item): + """Set current creator item. + + Args: + creator_item (CreatorItem): Item representing creator that can be + triggered by artist. + """ + + self._creator_short_desc_widget.set_creator_item(creator_item) + self._set_creator_detailed_text(creator_item) + self._pre_create_widget.set_creator_item(creator_item) + + self._selected_creator = creator_item + + if not creator_item: + self._set_context_enabled(False) + return + + if ( + creator_item.create_allow_context_change + != self._context_change_is_enabled() + ): + self._set_context_enabled(creator_item.create_allow_context_change) + self._refresh_asset() + + self._thumbnail_widget.setVisible( + creator_item.create_allow_thumbnail + ) + + default_variants = creator_item.default_variants + if not default_variants: + default_variants = ["Main"] + + default_variant = creator_item.default_variant + if not default_variant: + default_variant = default_variants[0] + + for action in tuple(self.variant_hints_menu.actions()): + self.variant_hints_menu.removeAction(action) + action.deleteLater() + + for variant in default_variants: + if variant in SEPARATORS: + self.variant_hints_menu.addSeparator() + elif variant: + self.variant_hints_menu.addAction(variant) + + variant_text = default_variant or "Main" + # Make sure subset name is updated to new plugin + if variant_text == self.variant_input.text(): + self._on_variant_change() + else: + self.variant_input.setText(variant_text) + + def _on_variant_widget_resize(self): + self.variant_hints_btn.setFixedHeight(self.variant_input.height()) + + def _on_variant_btn_click(self): + pos = self.variant_hints_btn.rect().bottomLeft() + point = self.variant_hints_btn.mapToGlobal(pos) + self.variant_hints_menu.popup(point) + + def _on_variant_action(self, action): + value = action.text() + if self.variant_input.text() != value: + self.variant_input.setText(value) + + def _on_variant_change(self, variant_value=None): + if not self._prereq_available: + return + + # This should probably never happen? + if not self._selected_creator: + if self.subset_name_input.text(): + self.subset_name_input.setText("") + return + + if variant_value is None: + variant_value = self.variant_input.text() + + if not self._compiled_name_pattern.match(variant_value): + self._create_btn.setEnabled(False) + self._set_variant_state_property("invalid") + self.subset_name_input.setText("< Invalid variant >") + return + + if not self._context_change_is_enabled(): + self._create_btn.setEnabled(True) + self._set_variant_state_property("") + self.subset_name_input.setText("< Valid variant >") + return + + asset_name = self._get_asset_name() + task_name = self._get_task_name() + creator_idenfier = self._selected_creator.identifier + # Calculate subset name with Creator plugin + try: + subset_name = self._controller.get_subset_name( + creator_idenfier, variant_value, task_name, asset_name + ) + except TaskNotSetError: + self._create_btn.setEnabled(False) + self._set_variant_state_property("invalid") + self.subset_name_input.setText("< Missing task >") + return + + self.subset_name_input.setText(subset_name) + + self._create_btn.setEnabled(True) + self._validate_subset_name(subset_name, variant_value) + + def _validate_subset_name(self, subset_name, variant_value): + # Get all subsets of the current asset + if self._subset_names: + existing_subset_names = set(self._subset_names) + else: + existing_subset_names = set() + existing_subset_names_low = set( + _name.lower() + for _name in existing_subset_names + ) + + # Replace + compare_regex = re.compile(re.sub( + variant_value, "(.+)", subset_name, flags=re.IGNORECASE + )) + variant_hints = set() + if variant_value: + for _name in existing_subset_names: + _result = compare_regex.search(_name) + if _result: + variant_hints |= set(_result.groups()) + + # Remove previous hints from menu + for action in tuple(self.variant_hints_group.actions()): + self.variant_hints_group.removeAction(action) + self.variant_hints_menu.removeAction(action) + action.deleteLater() + + # Add separator if there are hints and menu already has actions + if variant_hints and self.variant_hints_menu.actions(): + self.variant_hints_menu.addSeparator() + + # Add hints to actions + for variant_hint in variant_hints: + action = self.variant_hints_menu.addAction(variant_hint) + self.variant_hints_group.addAction(action) + + # Indicate subset existence + if not variant_value: + property_value = "empty" + + elif subset_name.lower() in existing_subset_names_low: + # validate existence of subset name with lowered text + # - "renderMain" vs. "rendermain" mean same path item for + # windows + property_value = "exists" + else: + property_value = "new" + + self._set_variant_state_property(property_value) + + variant_is_valid = variant_value.strip() != "" + if variant_is_valid != self._create_btn.isEnabled(): + self._create_btn.setEnabled(variant_is_valid) + + def _set_variant_state_property(self, state): + current_value = self.variant_input.property("state") + if current_value != state: + self.variant_input.setProperty("state", state) + self.variant_input.style().polish(self.variant_input) + + def _on_first_show(self): + width = self.width() + part = int(width / 4) + rem_width = width - part + self._main_splitter_widget.setSizes([part, rem_width]) + rem_width = rem_width - part + self._creators_splitter.setSizes([part, rem_width]) + + def showEvent(self, event): + super(CreateWidget, self).showEvent(event) + if self._first_show: + self._first_show = False + self._on_first_show() + + def _on_creator_basics_resize(self): + self._thumbnail_widget.set_height( + self._creator_basics_widget.sizeHint().height() + ) + + def _on_create(self): + indexes = self._creators_view.selectedIndexes() + if not indexes or len(indexes) > 1: + return + + if not self._create_btn.isEnabled(): + return + + index = indexes[0] + creator_identifier = index.data(CREATOR_IDENTIFIER_ROLE) + family = index.data(FAMILY_ROLE) + variant = self.variant_input.text() + # Care about subset name only if context change is enabled + subset_name = None + asset_name = None + task_name = None + if self._context_change_is_enabled(): + subset_name = self.subset_name_input.text() + asset_name = self._get_asset_name() + task_name = self._get_task_name() + + pre_create_data = self._pre_create_widget.current_value() + if index.data(CREATOR_THUMBNAIL_ENABLED_ROLE): + pre_create_data[PRE_CREATE_THUMBNAIL_KEY] = ( + self._last_thumbnail_path + ) + + # Where to define these data? + # - what data show be stored? + instance_data = { + "asset": asset_name, + "task": task_name, + "variant": variant, + "family": family + } + + success = self._controller.create( + creator_identifier, + subset_name, + instance_data, + pre_create_data + ) + + if success: + self._set_creator(self._selected_creator) + self._controller.emit_card_message("Creation finished...") + self._last_thumbnail_path = None + self._thumbnail_widget.set_current_thumbnails() diff --git a/openpype/tools/publisher/widgets/help_widget.py b/openpype/tools/publisher/widgets/help_widget.py new file mode 100644 index 0000000000..5d474613df --- /dev/null +++ b/openpype/tools/publisher/widgets/help_widget.py @@ -0,0 +1,84 @@ +try: + import commonmark +except Exception: + commonmark = None + +from qtpy import QtWidgets, QtCore + + +class HelpButton(QtWidgets.QPushButton): + """Button used to trigger help dialog.""" + + def __init__(self, parent): + super(HelpButton, self).__init__(parent) + self.setObjectName("CreateDialogHelpButton") + self.setText("?") + + +class HelpWidget(QtWidgets.QWidget): + """Widget showing help for single functionality.""" + + def __init__(self, parent): + super(HelpWidget, self).__init__(parent) + + # TODO add hints what to help with? + detail_description_input = QtWidgets.QTextEdit(self) + detail_description_input.setObjectName("CreatorDetailedDescription") + detail_description_input.setTextInteractionFlags( + QtCore.Qt.TextBrowserInteraction + ) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.setSpacing(0) + main_layout.addWidget(detail_description_input, 1) + + self._detail_description_input = detail_description_input + + self.set_detailed_text() + + def set_detailed_text(self, text=None): + if not text: + text = "We didn't prepare help for this part..." + + if commonmark: + html = commonmark.commonmark(text) + self._detail_description_input.setHtml(html) + elif hasattr(self._detail_description_input, "setMarkdown"): + self._detail_description_input.setMarkdown(text) + else: + self._detail_description_input.setText(text) + + +class HelpDialog(QtWidgets.QDialog): + default_width = 530 + default_height = 340 + + def __init__(self, controller, parent): + super(HelpDialog, self).__init__(parent) + + self.setWindowTitle("Help dialog") + + help_content = HelpWidget(self) + + main_layout = QtWidgets.QHBoxLayout(self) + main_layout.addWidget(help_content, 1) + + controller.event_system.add_callback( + "show.detailed.help", self._on_help_request + ) + + self._controller = controller + + self._help_content = help_content + + def _on_help_request(self, event): + message = event.get("message") + self.set_detailed_text(message) + + def set_detailed_text(self, text=None): + self._help_content.set_detailed_text(text) + + def showEvent(self, event): + super(HelpDialog, self).showEvent(event) + self.resize(self.default_width, self.default_height) diff --git a/openpype/tools/publisher/widgets/icons.py b/openpype/tools/publisher/widgets/icons.py index fd5c45f901..8aa82f580f 100644 --- a/openpype/tools/publisher/widgets/icons.py +++ b/openpype/tools/publisher/widgets/icons.py @@ -1,6 +1,6 @@ import os -from Qt import QtGui +from qtpy import QtGui def get_icon_path(icon_name=None, filename=None): diff --git a/openpype/tools/publisher/widgets/images/clear_thumbnail.png b/openpype/tools/publisher/widgets/images/clear_thumbnail.png new file mode 100644 index 0000000000..406328cb51 Binary files /dev/null and b/openpype/tools/publisher/widgets/images/clear_thumbnail.png differ diff --git a/openpype/tools/publisher/widgets/images/copy.png b/openpype/tools/publisher/widgets/images/copy.png deleted file mode 100644 index 522afcdc87..0000000000 Binary files a/openpype/tools/publisher/widgets/images/copy.png and /dev/null differ diff --git a/openpype/tools/publisher/widgets/images/create.png b/openpype/tools/publisher/widgets/images/create.png new file mode 100644 index 0000000000..d691f364dd Binary files /dev/null and b/openpype/tools/publisher/widgets/images/create.png differ diff --git a/openpype/tools/publisher/widgets/images/download_arrow.png b/openpype/tools/publisher/widgets/images/download_arrow.png deleted file mode 100644 index a35a12fb39..0000000000 Binary files a/openpype/tools/publisher/widgets/images/download_arrow.png and /dev/null differ diff --git a/openpype/tools/publisher/widgets/images/validate.png b/openpype/tools/publisher/widgets/images/validate.png index d3cfa0b75d..c8472e9d31 100644 Binary files a/openpype/tools/publisher/widgets/images/validate.png and b/openpype/tools/publisher/widgets/images/validate.png differ diff --git a/openpype/tools/publisher/widgets/images/view_report.png b/openpype/tools/publisher/widgets/images/view_report.png index 50e214c3f8..6f3efd5e19 100644 Binary files a/openpype/tools/publisher/widgets/images/view_report.png and b/openpype/tools/publisher/widgets/images/view_report.png differ diff --git a/openpype/tools/publisher/widgets/list_view_widgets.py b/openpype/tools/publisher/widgets/list_view_widgets.py index 6bddaf66c8..172563d15c 100644 --- a/openpype/tools/publisher/widgets/list_view_widgets.py +++ b/openpype/tools/publisher/widgets/list_view_widgets.py @@ -24,17 +24,21 @@ selection can be enabled disabled using checkbox or keyboard key presses: """ import collections -from Qt import QtWidgets, QtCore, QtGui +from qtpy import QtWidgets, QtCore, QtGui from openpype.style import get_objected_colors from openpype.widgets.nice_checkbox import NiceCheckbox +from openpype.tools.utils.lib import html_escape, checkstate_int_to_enum from .widgets import AbstractInstanceView from ..constants import ( INSTANCE_ID_ROLE, SORT_VALUE_ROLE, IS_GROUP_ROLE, CONTEXT_ID, - CONTEXT_LABEL + CONTEXT_LABEL, + GROUP_ROLE, + CONVERTER_IDENTIFIER_ROLE, + CONVERTOR_ITEM_GROUP, ) @@ -53,8 +57,7 @@ class ListItemDelegate(QtWidgets.QStyledItemDelegate): def __init__(self, parent): super(ListItemDelegate, self).__init__(parent) - colors_data = get_objected_colors() - group_color_info = colors_data["publisher"]["list-view-group"] + group_color_info = get_objected_colors("publisher", "list-view-group") self._group_colors = { key: value.get_qcolor() @@ -83,9 +86,9 @@ class ListItemDelegate(QtWidgets.QStyledItemDelegate): painter.save() painter.setRenderHints( - painter.Antialiasing - | painter.SmoothPixmapTransform - | painter.TextAntialiasing + QtGui.QPainter.Antialiasing + | QtGui.QPainter.SmoothPixmapTransform + | QtGui.QPainter.TextAntialiasing ) # Draw backgrounds @@ -113,7 +116,9 @@ class InstanceListItemWidget(QtWidgets.QWidget): self.instance = instance - subset_name_label = QtWidgets.QLabel(instance["subset"], self) + instance_label = html_escape(instance.label) + + subset_name_label = QtWidgets.QLabel(instance_label, self) subset_name_label.setObjectName("ListViewSubsetName") active_checkbox = NiceCheckbox(parent=self) @@ -132,7 +137,7 @@ class InstanceListItemWidget(QtWidgets.QWidget): active_checkbox.stateChanged.connect(self._on_active_change) - self._subset_name_label = subset_name_label + self._instance_label_widget = subset_name_label self._active_checkbox = active_checkbox self._has_valid_context = None @@ -146,8 +151,8 @@ class InstanceListItemWidget(QtWidgets.QWidget): state = "" if not valid: state = "invalid" - self._subset_name_label.setProperty("state", state) - self._subset_name_label.style().polish(self._subset_name_label) + self._instance_label_widget.setProperty("state", state) + self._instance_label_widget.style().polish(self._instance_label_widget) def is_active(self): """Instance is activated.""" @@ -176,9 +181,9 @@ class InstanceListItemWidget(QtWidgets.QWidget): def update_instance_values(self): """Update instance data propagated to widgets.""" # Check subset name - subset_name = self.instance["subset"] - if subset_name != self._subset_name_label.text(): - self._subset_name_label.setText(subset_name) + label = self.instance.label + if label != self._instance_label_widget.text(): + self._instance_label_widget.setText(html_escape(label)) # Check active state self.set_active(self.instance["active"]) # Check valid states @@ -267,6 +272,7 @@ class InstanceListGroupWidget(QtWidgets.QFrame): state(QtCore.Qt.CheckState): Checkstate of checkbox. Have 3 variants Unchecked, Checked and PartiallyChecked. """ + if self.checkstate() == state: return self._ignore_state_change = True @@ -274,7 +280,8 @@ class InstanceListGroupWidget(QtWidgets.QFrame): self._ignore_state_change = False def checkstate(self): - """CUrrent checkstate of "active" checkbox.""" + """Current checkstate of "active" checkbox.""" + return self.toggle_checkbox.checkState() def _on_checkbox_change(self, state): @@ -328,6 +335,9 @@ class InstanceTreeView(QtWidgets.QTreeView): """Ids of selected instances.""" instance_ids = set() for index in self.selectionModel().selectedIndexes(): + if index.data(CONVERTER_IDENTIFIER_ROLE) is not None: + continue + instance_id = index.data(INSTANCE_ID_ROLE) if instance_id is not None: instance_ids.add(instance_id) @@ -407,7 +417,7 @@ class InstanceListView(AbstractInstanceView): def __init__(self, controller, parent): super(InstanceListView, self).__init__(parent) - self.controller = controller + self._controller = controller instance_view = InstanceTreeView(self) instance_delegate = ListItemDelegate(instance_view) @@ -437,26 +447,35 @@ class InstanceListView(AbstractInstanceView): self._group_items = {} self._group_widgets = {} self._widgets_by_id = {} + # Group by instance id for handling of active state self._group_by_instance_id = {} self._context_item = None self._context_widget = None + self._convertor_group_item = None + self._convertor_group_widget = None + self._convertor_items_by_id = {} + self._instance_view = instance_view self._instance_delegate = instance_delegate self._instance_model = instance_model self._proxy_model = proxy_model def _on_expand(self, index): - group_name = index.data(SORT_VALUE_ROLE) - group_widget = self._group_widgets.get(group_name) - if group_widget: - group_widget.set_expanded(True) + self._update_widget_expand_state(index, True) def _on_collapse(self, index): - group_name = index.data(SORT_VALUE_ROLE) - group_widget = self._group_widgets.get(group_name) + self._update_widget_expand_state(index, False) + + def _update_widget_expand_state(self, index, expanded): + group_name = index.data(GROUP_ROLE) + if group_name == CONVERTOR_ITEM_GROUP: + group_widget = self._convertor_group_widget + else: + group_widget = self._group_widgets.get(group_name) + if group_widget: - group_widget.set_expanded(False) + group_widget.set_expanded(expanded) def _on_toggle_request(self, toggle): selected_instance_ids = self._instance_view.get_selected_instance_ids() @@ -515,83 +534,30 @@ class InstanceListView(AbstractInstanceView): def refresh(self): """Refresh instances in the view.""" - # Prepare instances by their groups - instances_by_group_name = collections.defaultdict(list) - group_names = set() - for instance in self.controller.instances: - group_label = instance.creator_label - group_names.add(group_label) - instances_by_group_name[group_label].append(instance) - # Sort view at the end of refresh # - is turned off until any change in view happens sort_at_the_end = False - - # Access to root item of main model - root_item = self._instance_model.invisibleRootItem() - # Create or use already existing context item # - context widget does not change so we don't have to update anything - context_item = None - if self._context_item is None: + if self._make_sure_context_item_exists(): sort_at_the_end = True - context_item = QtGui.QStandardItem() - context_item.setData(0, SORT_VALUE_ROLE) - context_item.setData(CONTEXT_ID, INSTANCE_ID_ROLE) - root_item.appendRow(context_item) + self._update_convertor_items_group() - index = self._instance_model.index( - context_item.row(), context_item.column() - ) - proxy_index = self._proxy_model.mapFromSource(index) - widget = ListContextWidget(self._instance_view) - self._instance_view.setIndexWidget(proxy_index, widget) - - self._context_widget = widget - self._context_item = context_item + # Prepare instances by their groups + instances_by_group_name = collections.defaultdict(list) + group_names = set() + for instance in self._controller.instances.values(): + group_label = instance.group_label + group_names.add(group_label) + instances_by_group_name[group_label].append(instance) # Create new groups based on prepared `instances_by_group_name` - new_group_items = [] - for group_name in group_names: - if group_name in self._group_items: - continue - - group_item = QtGui.QStandardItem() - group_item.setData(group_name, SORT_VALUE_ROLE) - group_item.setData(True, IS_GROUP_ROLE) - group_item.setFlags(QtCore.Qt.ItemIsEnabled) - self._group_items[group_name] = group_item - new_group_items.append(group_item) - - # Add new group items to root item if there are any - if new_group_items: - # Trigger sort at the end + if self._make_sure_groups_exists(group_names): sort_at_the_end = True - root_item.appendRows(new_group_items) - - # Create widget for each new group item and store it for future usage - for group_item in new_group_items: - index = self._instance_model.index( - group_item.row(), group_item.column() - ) - proxy_index = self._proxy_model.mapFromSource(index) - group_name = group_item.data(SORT_VALUE_ROLE) - widget = InstanceListGroupWidget(group_name, self._instance_view) - widget.expand_changed.connect(self._on_group_expand_request) - widget.toggle_requested.connect(self._on_group_toggle_request) - self._group_widgets[group_name] = widget - self._instance_view.setIndexWidget(proxy_index, widget) # Remove groups that are not available anymore - for group_name in tuple(self._group_items.keys()): - if group_name in group_names: - continue - - group_item = self._group_items.pop(group_name) - root_item.removeRow(group_item.row()) - widget = self._group_widgets.pop(group_name) - widget.deleteLater() + self._remove_groups_except(group_names) # Store which groups should be expanded at the end expand_groups = set() @@ -650,6 +616,7 @@ class InstanceListView(AbstractInstanceView): # Create new item and store it as new item = QtGui.QStandardItem() item.setData(instance["subset"], SORT_VALUE_ROLE) + item.setData(instance["subset"], GROUP_ROLE) item.setData(instance_id, INSTANCE_ID_ROLE) new_items.append(item) new_items_with_instance.append((item, instance)) @@ -715,19 +682,158 @@ class InstanceListView(AbstractInstanceView): self._instance_view.expand(proxy_index) + def _make_sure_context_item_exists(self): + if self._context_item is not None: + return False + + root_item = self._instance_model.invisibleRootItem() + context_item = QtGui.QStandardItem() + context_item.setData(0, SORT_VALUE_ROLE) + context_item.setData(CONTEXT_ID, INSTANCE_ID_ROLE) + + root_item.appendRow(context_item) + + index = self._instance_model.index( + context_item.row(), context_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(index) + widget = ListContextWidget(self._instance_view) + self._instance_view.setIndexWidget(proxy_index, widget) + + self._context_widget = widget + self._context_item = context_item + return True + + def _update_convertor_items_group(self): + created_new_items = False + convertor_items_by_id = self._controller.convertor_items + group_item = self._convertor_group_item + if not convertor_items_by_id and group_item is None: + return created_new_items + + root_item = self._instance_model.invisibleRootItem() + if not convertor_items_by_id: + root_item.removeRow(group_item.row()) + self._convertor_group_widget.deleteLater() + self._convertor_group_widget = None + self._convertor_items_by_id = {} + return created_new_items + + if group_item is None: + created_new_items = True + group_item = QtGui.QStandardItem() + group_item.setData(CONVERTOR_ITEM_GROUP, GROUP_ROLE) + group_item.setData(1, SORT_VALUE_ROLE) + group_item.setData(True, IS_GROUP_ROLE) + group_item.setFlags(QtCore.Qt.ItemIsEnabled) + + root_item.appendRow(group_item) + + index = self._instance_model.index( + group_item.row(), group_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(index) + widget = InstanceListGroupWidget( + CONVERTOR_ITEM_GROUP, self._instance_view + ) + widget.toggle_checkbox.setVisible(False) + widget.expand_changed.connect( + self._on_convertor_group_expand_request + ) + self._instance_view.setIndexWidget(proxy_index, widget) + + self._convertor_group_item = group_item + self._convertor_group_widget = widget + + for row in reversed(range(group_item.rowCount())): + child_item = group_item.child(row) + child_identifier = child_item.data(CONVERTER_IDENTIFIER_ROLE) + if child_identifier not in convertor_items_by_id: + self._convertor_items_by_id.pop(child_identifier, None) + group_item.removeRows(row, 1) + + new_items = [] + for identifier, convertor_item in convertor_items_by_id.items(): + item = self._convertor_items_by_id.get(identifier) + if item is None: + created_new_items = True + item = QtGui.QStandardItem(convertor_item.label) + new_items.append(item) + item.setData(convertor_item.id, INSTANCE_ID_ROLE) + item.setData(convertor_item.label, SORT_VALUE_ROLE) + item.setData(CONVERTOR_ITEM_GROUP, GROUP_ROLE) + item.setData( + convertor_item.identifier, CONVERTER_IDENTIFIER_ROLE + ) + self._convertor_items_by_id[identifier] = item + + if new_items: + group_item.appendRows(new_items) + + return created_new_items + + def _make_sure_groups_exists(self, group_names): + new_group_items = [] + for group_name in group_names: + if group_name in self._group_items: + continue + + group_item = QtGui.QStandardItem() + group_item.setData(group_name, GROUP_ROLE) + group_item.setData(group_name, SORT_VALUE_ROLE) + group_item.setData(True, IS_GROUP_ROLE) + group_item.setFlags(QtCore.Qt.ItemIsEnabled) + self._group_items[group_name] = group_item + new_group_items.append(group_item) + + # Add new group items to root item if there are any + if not new_group_items: + return False + + # Access to root item of main model + root_item = self._instance_model.invisibleRootItem() + root_item.appendRows(new_group_items) + + # Create widget for each new group item and store it for future usage + for group_item in new_group_items: + index = self._instance_model.index( + group_item.row(), group_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(index) + group_name = group_item.data(GROUP_ROLE) + widget = InstanceListGroupWidget(group_name, self._instance_view) + widget.expand_changed.connect(self._on_group_expand_request) + widget.toggle_requested.connect(self._on_group_toggle_request) + self._group_widgets[group_name] = widget + self._instance_view.setIndexWidget(proxy_index, widget) + + return True + + def _remove_groups_except(self, group_names): + # Remove groups that are not available anymore + root_item = self._instance_model.invisibleRootItem() + for group_name in tuple(self._group_items.keys()): + if group_name in group_names: + continue + + group_item = self._group_items.pop(group_name) + root_item.removeRow(group_item.row()) + widget = self._group_widgets.pop(group_name) + widget.deleteLater() + def refresh_instance_states(self): """Trigger update of all instances.""" for widget in self._widgets_by_id.values(): widget.update_instance_values() def _on_active_changed(self, changed_instance_id, new_value): - selected_instances, _ = self.get_selected_items() + selected_instance_ids, _, _ = self.get_selected_items() selected_ids = set() found = False - for instance in selected_instances: - selected_ids.add(instance.id) - if not found and instance.id == changed_instance_id: + for instance_id in selected_instance_ids: + selected_ids.add(instance_id) + if not found and instance_id == changed_instance_id: found = True if not found: @@ -758,32 +864,6 @@ class InstanceListView(AbstractInstanceView): if changed_ids: self.active_changed.emit() - def get_selected_items(self): - """Get selected instance ids and context selection. - - Returns: - tuple: Selected instance ids and boolean if context - is selected. - """ - instances = [] - context_selected = False - instances_by_id = { - instance.id: instance - for instance in self.controller.instances - } - - for index in self._instance_view.selectionModel().selectedIndexes(): - instance_id = index.data(INSTANCE_ID_ROLE) - if not context_selected and instance_id == CONTEXT_ID: - context_selected = True - - elif instance_id is not None: - instance = instances_by_id.get(instance_id) - if instance: - instances.append(instance) - - return instances, context_selected - def _on_selection_change(self, *_args): self.selection_changed.emit() @@ -798,7 +878,18 @@ class InstanceListView(AbstractInstanceView): proxy_index = self._proxy_model.mapFromSource(group_index) self._instance_view.setExpanded(proxy_index, expanded) + def _on_convertor_group_expand_request(self, _, expanded): + group_item = self._convertor_group_item + if not group_item: + return + group_index = self._instance_model.index( + group_item.row(), group_item.column() + ) + proxy_index = self._proxy_model.mapFromSource(group_index) + self._instance_view.setExpanded(proxy_index, expanded) + def _on_group_toggle_request(self, group_name, state): + state = checkstate_int_to_enum(state) if state == QtCore.Qt.PartiallyChecked: return @@ -823,3 +914,140 @@ class InstanceListView(AbstractInstanceView): proxy_index = self._proxy_model.mapFromSource(group_item.index()) if not self._instance_view.isExpanded(proxy_index): self._instance_view.expand(proxy_index) + + def has_items(self): + if self._convertor_group_widget is not None: + return True + if self._group_items: + return True + return False + + def get_selected_items(self): + """Get selected instance ids and context selection. + + Returns: + tuple: Selected instance ids and boolean if context + is selected. + """ + + instance_ids = [] + convertor_identifiers = [] + context_selected = False + + for index in self._instance_view.selectionModel().selectedIndexes(): + convertor_identifier = index.data(CONVERTER_IDENTIFIER_ROLE) + if convertor_identifier is not None: + convertor_identifiers.append(convertor_identifier) + continue + + instance_id = index.data(INSTANCE_ID_ROLE) + if not context_selected and instance_id == CONTEXT_ID: + context_selected = True + + elif instance_id is not None: + instance_ids.append(instance_id) + + return instance_ids, context_selected, convertor_identifiers + + def set_selected_items( + self, instance_ids, context_selected, convertor_identifiers + ): + s_instance_ids = set(instance_ids) + s_convertor_identifiers = set(convertor_identifiers) + cur_ids, cur_context, cur_convertor_identifiers = ( + self.get_selected_items() + ) + if ( + set(cur_ids) == s_instance_ids + and cur_context == context_selected + and set(cur_convertor_identifiers) == s_convertor_identifiers + ): + return + + view = self._instance_view + src_model = self._instance_model + proxy_model = self._proxy_model + + select_indexes = [] + + select_queue = collections.deque() + select_queue.append( + (src_model.invisibleRootItem(), []) + ) + while select_queue: + queue_item = select_queue.popleft() + item, parent_items = queue_item + + if item.hasChildren(): + new_parent_items = list(parent_items) + new_parent_items.append(item) + for row in range(item.rowCount()): + select_queue.append( + (item.child(row), list(new_parent_items)) + ) + + convertor_identifier = item.data(CONVERTER_IDENTIFIER_ROLE) + + select = False + expand_parent = True + if convertor_identifier is not None: + if convertor_identifier in s_convertor_identifiers: + select = True + else: + instance_id = item.data(INSTANCE_ID_ROLE) + if instance_id == CONTEXT_ID: + if context_selected: + select = True + expand_parent = False + + elif instance_id in s_instance_ids: + select = True + + if not select: + continue + + select_indexes.append(item.index()) + if not expand_parent: + continue + + for parent_item in parent_items: + index = parent_item.index() + proxy_index = proxy_model.mapFromSource(index) + if not view.isExpanded(proxy_index): + view.expand(proxy_index) + + selection_model = view.selectionModel() + if not select_indexes: + selection_model.clear() + return + + if len(select_indexes) == 1: + proxy_index = proxy_model.mapFromSource(select_indexes[0]) + selection_model.setCurrentIndex( + proxy_index, + selection_model.ClearAndSelect | selection_model.Rows + ) + return + + first_index = proxy_model.mapFromSource(select_indexes.pop(0)) + last_index = proxy_model.mapFromSource(select_indexes.pop(-1)) + + selection_model.setCurrentIndex( + first_index, + QtCore.QItemSelectionModel.ClearAndSelect + | QtCore.QItemSelectionModel.Rows + ) + + for index in select_indexes: + proxy_index = proxy_model.mapFromSource(index) + selection_model.select( + proxy_index, + QtCore.QItemSelectionModel.Select + | QtCore.QItemSelectionModel.Rows + ) + + selection_model.setCurrentIndex( + last_index, + QtCore.QItemSelectionModel.Select + | QtCore.QItemSelectionModel.Rows + ) diff --git a/openpype/tools/publisher/widgets/overview_widget.py b/openpype/tools/publisher/widgets/overview_widget.py new file mode 100644 index 0000000000..022de2dc34 --- /dev/null +++ b/openpype/tools/publisher/widgets/overview_widget.py @@ -0,0 +1,408 @@ +from qtpy import QtWidgets, QtCore + +from .border_label_widget import BorderedLabelWidget + +from .card_view_widgets import InstanceCardView +from .list_view_widgets import InstanceListView +from .widgets import ( + SubsetAttributesWidget, + CreateInstanceBtn, + RemoveInstanceBtn, + ChangeViewBtn, +) +from .create_widget import CreateWidget + + +class OverviewWidget(QtWidgets.QFrame): + active_changed = QtCore.Signal() + instance_context_changed = QtCore.Signal() + create_requested = QtCore.Signal() + + anim_end_value = 200 + anim_duration = 200 + + def __init__(self, controller, parent): + super(OverviewWidget, self).__init__(parent) + + self._refreshing_instances = False + self._controller = controller + + create_widget = CreateWidget(controller, self) + + # --- Created Subsets/Instances --- + # Common widget for creation and overview + subset_views_widget = BorderedLabelWidget( + "Subsets to publish", self + ) + + subset_view_cards = InstanceCardView(controller, subset_views_widget) + subset_list_view = InstanceListView(controller, subset_views_widget) + + subset_views_layout = QtWidgets.QStackedLayout() + subset_views_layout.addWidget(subset_view_cards) + subset_views_layout.addWidget(subset_list_view) + subset_views_layout.setCurrentWidget(subset_view_cards) + + # Buttons at the bottom of subset view + create_btn = CreateInstanceBtn(self) + delete_btn = RemoveInstanceBtn(self) + change_view_btn = ChangeViewBtn(self) + + # --- Overview --- + # Subset details widget + subset_attributes_wrap = BorderedLabelWidget( + "Publish options", self + ) + subset_attributes_widget = SubsetAttributesWidget( + controller, subset_attributes_wrap + ) + subset_attributes_wrap.set_center_widget(subset_attributes_widget) + + # Layout of buttons at the bottom of subset view + subset_view_btns_layout = QtWidgets.QHBoxLayout() + subset_view_btns_layout.setContentsMargins(0, 5, 0, 0) + subset_view_btns_layout.addWidget(create_btn) + subset_view_btns_layout.addSpacing(5) + subset_view_btns_layout.addWidget(delete_btn) + subset_view_btns_layout.addStretch(1) + subset_view_btns_layout.addWidget(change_view_btn) + + # Layout of view and buttons + # - widget 'subset_view_widget' is necessary + # - only layout won't be resized automatically to minimum size hint + # on child resize request! + subset_view_widget = QtWidgets.QWidget(subset_views_widget) + subset_view_layout = QtWidgets.QVBoxLayout(subset_view_widget) + subset_view_layout.setContentsMargins(0, 0, 0, 0) + subset_view_layout.addLayout(subset_views_layout, 1) + subset_view_layout.addLayout(subset_view_btns_layout, 0) + + subset_views_widget.set_center_widget(subset_view_widget) + + # Whole subset layout with attributes and details + subset_content_widget = QtWidgets.QWidget(self) + subset_content_layout = QtWidgets.QHBoxLayout(subset_content_widget) + subset_content_layout.setContentsMargins(0, 0, 0, 0) + subset_content_layout.addWidget(create_widget, 7) + subset_content_layout.addWidget(subset_views_widget, 3) + subset_content_layout.addWidget(subset_attributes_wrap, 7) + + # Subset frame layout + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(0, 0, 0, 0) + main_layout.addWidget(subset_content_widget, 1) + + change_anim = QtCore.QVariantAnimation() + change_anim.setStartValue(float(0)) + change_anim.setEndValue(float(self.anim_end_value)) + change_anim.setDuration(self.anim_duration) + change_anim.setEasingCurve(QtCore.QEasingCurve.InOutQuad) + + # --- Calbacks for instances/subsets view --- + create_btn.clicked.connect(self._on_create_clicked) + delete_btn.clicked.connect(self._on_delete_clicked) + change_view_btn.clicked.connect(self._on_change_view_clicked) + + change_anim.valueChanged.connect(self._on_change_anim) + change_anim.finished.connect(self._on_change_anim_finished) + + # Selection changed + subset_list_view.selection_changed.connect( + self._on_subset_change + ) + subset_view_cards.selection_changed.connect( + self._on_subset_change + ) + # Active instances changed + subset_list_view.active_changed.connect( + self._on_active_changed + ) + subset_view_cards.active_changed.connect( + self._on_active_changed + ) + # Instance context has changed + subset_attributes_widget.instance_context_changed.connect( + self._on_instance_context_change + ) + subset_attributes_widget.convert_requested.connect( + self._on_convert_requested + ) + + # --- Controller callbacks --- + controller.event_system.add_callback( + "publish.process.started", self._on_publish_start + ) + controller.event_system.add_callback( + "publish.reset.finished", self._on_publish_reset + ) + controller.event_system.add_callback( + "instances.refresh.finished", self._on_instances_refresh + ) + + self._subset_content_widget = subset_content_widget + self._subset_content_layout = subset_content_layout + + self._subset_view_cards = subset_view_cards + self._subset_list_view = subset_list_view + self._subset_views_layout = subset_views_layout + + self._delete_btn = delete_btn + + self._subset_attributes_widget = subset_attributes_widget + self._create_widget = create_widget + self._subset_views_widget = subset_views_widget + self._subset_attributes_wrap = subset_attributes_wrap + + self._change_anim = change_anim + + # Start in create mode + self._create_widget_policy = create_widget.sizePolicy() + self._subset_views_widget_policy = subset_views_widget.sizePolicy() + self._subset_attributes_wrap_policy = ( + subset_attributes_wrap.sizePolicy() + ) + self._max_widget_width = None + self._current_state = "create" + subset_attributes_wrap.setVisible(False) + + def set_state(self, new_state, animate): + if new_state == self._current_state: + return + + self._current_state = new_state + + anim_is_running = ( + self._change_anim.state() == QtCore.QAbstractAnimation.Running + ) + if not animate: + self._change_visibility_for_state() + if anim_is_running: + self._change_anim.stop() + return + + if self._max_widget_width is None: + self._max_widget_width = self._subset_views_widget.maximumWidth() + + if new_state == "create": + direction = QtCore.QAbstractAnimation.Backward + else: + direction = QtCore.QAbstractAnimation.Forward + self._change_anim.setDirection(direction) + + if not anim_is_running: + view_width = self._subset_views_widget.width() + self._subset_views_widget.setMinimumWidth(view_width) + self._subset_views_widget.setMaximumWidth(view_width) + self._change_anim.start() + + def get_subset_views_geo(self): + parent = self._subset_views_widget.parent() + global_pos = parent.mapToGlobal(self._subset_views_widget.pos()) + return QtCore.QRect( + global_pos.x(), + global_pos.y(), + self._subset_views_widget.width(), + self._subset_views_widget.height() + ) + + def has_items(self): + view = self._subset_views_layout.currentWidget() + return view.has_items() + + def _on_create_clicked(self): + """Pass signal to parent widget which should care about changing state. + + We don't change anything here until the parent will care about it. + """ + + self.create_requested.emit() + + def _on_delete_clicked(self): + instance_ids, _, _ = self.get_selected_items() + + # Ask user if he really wants to remove instances + dialog = QtWidgets.QMessageBox(self) + dialog.setIcon(QtWidgets.QMessageBox.Question) + dialog.setWindowTitle("Are you sure?") + if len(instance_ids) > 1: + msg = ( + "Do you really want to remove {} instances?" + ).format(len(instance_ids)) + else: + msg = ( + "Do you really want to remove the instance?" + ) + dialog.setText(msg) + dialog.setStandardButtons( + QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel + ) + dialog.setDefaultButton(QtWidgets.QMessageBox.Ok) + dialog.setEscapeButton(QtWidgets.QMessageBox.Cancel) + dialog.exec_() + # Skip if OK was not clicked + if dialog.result() == QtWidgets.QMessageBox.Ok: + instance_ids = set(instance_ids) + self._controller.remove_instances(instance_ids) + + def _on_change_view_clicked(self): + self._change_view_type() + + def _on_subset_change(self, *_args): + # Ignore changes if in middle of refreshing + if self._refreshing_instances: + return + + instance_ids, context_selected, convertor_identifiers = ( + self.get_selected_items() + ) + + # Disable delete button if nothing is selected + self._delete_btn.setEnabled(len(instance_ids) > 0) + + instances_by_id = self._controller.instances + instances = [ + instances_by_id[instance_id] + for instance_id in instance_ids + ] + self._subset_attributes_widget.set_current_instances( + instances, context_selected, convertor_identifiers + ) + + def _on_active_changed(self): + if self._refreshing_instances: + return + self.active_changed.emit() + + def _on_change_anim(self, value): + self._create_widget.setVisible(True) + self._subset_attributes_wrap.setVisible(True) + width = ( + self._subset_content_widget.width() + - ( + self._subset_views_widget.width() + + (self._subset_content_layout.spacing() * 2) + ) + ) + subset_attrs_width = int((float(width) / self.anim_end_value) * value) + if subset_attrs_width > width: + subset_attrs_width = width + + create_width = width - subset_attrs_width + + self._create_widget.setMinimumWidth(create_width) + self._create_widget.setMaximumWidth(create_width) + self._subset_attributes_wrap.setMinimumWidth(subset_attrs_width) + self._subset_attributes_wrap.setMaximumWidth(subset_attrs_width) + + def _on_change_anim_finished(self): + self._change_visibility_for_state() + self._create_widget.setMinimumWidth(0) + self._create_widget.setMaximumWidth(self._max_widget_width) + self._subset_attributes_wrap.setMinimumWidth(0) + self._subset_attributes_wrap.setMaximumWidth(self._max_widget_width) + self._subset_views_widget.setMinimumWidth(0) + self._subset_views_widget.setMaximumWidth(self._max_widget_width) + self._create_widget.setSizePolicy( + self._create_widget_policy + ) + self._subset_attributes_wrap.setSizePolicy( + self._subset_attributes_wrap_policy + ) + self._subset_views_widget.setSizePolicy( + self._subset_views_widget_policy + ) + + def _change_visibility_for_state(self): + self._create_widget.setVisible( + self._current_state == "create" + ) + self._subset_attributes_wrap.setVisible( + self._current_state == "publish" + ) + + def _on_instance_context_change(self): + current_idx = self._subset_views_layout.currentIndex() + for idx in range(self._subset_views_layout.count()): + if idx == current_idx: + continue + widget = self._subset_views_layout.widget(idx) + if widget.refreshed: + widget.set_refreshed(False) + + current_widget = self._subset_views_layout.widget(current_idx) + current_widget.refresh_instance_states() + + self.instance_context_changed.emit() + + def _on_convert_requested(self): + _, _, convertor_identifiers = self.get_selected_items() + self._controller.trigger_convertor_items(convertor_identifiers) + + def get_selected_items(self): + view = self._subset_views_layout.currentWidget() + return view.get_selected_items() + + def _change_view_type(self): + idx = self._subset_views_layout.currentIndex() + new_idx = (idx + 1) % self._subset_views_layout.count() + + old_view = self._subset_views_layout.currentWidget() + new_view = self._subset_views_layout.widget(new_idx) + + if not new_view.refreshed: + new_view.refresh() + new_view.set_refreshed(True) + else: + new_view.refresh_instance_states() + + instance_ids, context_selected, convertor_identifiers = ( + old_view.get_selected_items() + ) + new_view.set_selected_items( + instance_ids, context_selected, convertor_identifiers + ) + + self._subset_views_layout.setCurrentIndex(new_idx) + + self._on_subset_change() + + def _refresh_instances(self): + if self._refreshing_instances: + return + + self._refreshing_instances = True + + for idx in range(self._subset_views_layout.count()): + widget = self._subset_views_layout.widget(idx) + widget.set_refreshed(False) + + view = self._subset_views_layout.currentWidget() + view.refresh() + view.set_refreshed(True) + + self._refreshing_instances = False + + # Force to change instance and refresh details + self._on_subset_change() + + def _on_publish_start(self): + """Publish started.""" + + self._subset_attributes_wrap.setEnabled(False) + + def _on_publish_reset(self): + """Context in controller has been refreshed.""" + + self._subset_attributes_wrap.setEnabled(True) + self._subset_content_widget.setEnabled(self._controller.host_is_valid) + + def _on_instances_refresh(self): + """Controller refreshed instances.""" + + self._refresh_instances() + + # Give a change to process Resize Request + QtWidgets.QApplication.processEvents() + # Trigger update geometry of + widget = self._subset_views_layout.currentWidget() + widget.updateGeometry() diff --git a/openpype/tools/publisher/widgets/precreate_widget.py b/openpype/tools/publisher/widgets/precreate_widget.py index eaadfe890b..3037a0e12d 100644 --- a/openpype/tools/publisher/widgets/precreate_widget.py +++ b/openpype/tools/publisher/widgets/precreate_widget.py @@ -1,6 +1,6 @@ -from Qt import QtWidgets, QtCore +from qtpy import QtWidgets, QtCore -from openpype.widgets.attribute_defs import create_widget_for_attr_def +from openpype.tools.attribute_defs import create_widget_for_attr_def class PreCreateWidget(QtWidgets.QWidget): @@ -58,12 +58,12 @@ class PreCreateWidget(QtWidgets.QWidget): def current_value(self): return self._attributes_widget.current_value() - def set_plugin(self, creator): + def set_creator_item(self, creator_item): attr_defs = [] creator_selected = False - if creator is not None: + if creator_item is not None: creator_selected = True - attr_defs = creator.get_pre_create_attr_defs() + attr_defs = creator_item.pre_create_attributes_defs self._attributes_widget.set_attr_defs(attr_defs) diff --git a/openpype/tools/publisher/widgets/publish_frame.py b/openpype/tools/publisher/widgets/publish_frame.py new file mode 100644 index 0000000000..e4e6740532 --- /dev/null +++ b/openpype/tools/publisher/widgets/publish_frame.py @@ -0,0 +1,524 @@ +import os +import json +import time + +from qtpy import QtWidgets, QtCore + +from .widgets import ( + StopBtn, + ResetBtn, + ValidateBtn, + PublishBtn, + PublishReportBtn, +) + + +class PublishFrame(QtWidgets.QWidget): + """Frame showed during publishing. + + Shows all information related to publishing. Contains validation error + widget which is showed if only validation error happens during validation. + + Processing layer is default layer. Validation error layer is shown if only + validation exception is raised during publishing. Report layer is available + only when publishing process is stopped and must be manually triggered to + change into that layer. + + +------------------------------------------------------------------------+ + | < Main label > | + | < Label top > | + | (#### 10% ) | + | | + | | + +------------------------------------------------------------------------+ + """ + + details_page_requested = QtCore.Signal() + + def __init__(self, controller, borders, parent): + super(PublishFrame, self).__init__(parent) + + # Bottom part of widget where process and callback buttons are showed + # - QFrame used to be able set background using stylesheets easily + # and not override all children widgets style + content_frame = QtWidgets.QFrame(self) + content_frame.setObjectName("PublishInfoFrame") + + top_content_widget = QtWidgets.QWidget(content_frame) + + # Center widget displaying current state (without any specific info) + main_label = QtWidgets.QLabel(top_content_widget) + main_label.setObjectName("PublishInfoMainLabel") + main_label.setAlignment(QtCore.Qt.AlignCenter) + + # Supporting labels for main label + # Top label is displayed just under main label + message_label_top = QtWidgets.QLabel(top_content_widget) + message_label_top.setAlignment(QtCore.Qt.AlignCenter) + + # Label showing currently processed instance + progress_widget = QtWidgets.QWidget(top_content_widget) + instance_plugin_widget = QtWidgets.QWidget(progress_widget) + instance_label = QtWidgets.QLabel( + "", instance_plugin_widget + ) + instance_label.setAlignment( + QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter + ) + # Label showing currently processed plugin + plugin_label = QtWidgets.QLabel( + "", instance_plugin_widget + ) + plugin_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter + ) + instance_plugin_layout = QtWidgets.QHBoxLayout(instance_plugin_widget) + instance_plugin_layout.setContentsMargins(0, 0, 0, 0) + instance_plugin_layout.addWidget(instance_label, 1) + instance_plugin_layout.addWidget(plugin_label, 1) + + # Progress bar showing progress of publishing + progress_bar = QtWidgets.QProgressBar(progress_widget) + progress_bar.setObjectName("PublishProgressBar") + + progress_layout = QtWidgets.QVBoxLayout(progress_widget) + progress_layout.setSpacing(5) + progress_layout.setContentsMargins(0, 0, 0, 0) + progress_layout.addWidget(instance_plugin_widget, 0) + progress_layout.addWidget(progress_bar, 0) + + top_content_layout = QtWidgets.QVBoxLayout(top_content_widget) + top_content_layout.setContentsMargins(0, 0, 0, 0) + top_content_layout.setSpacing(5) + top_content_layout.setAlignment(QtCore.Qt.AlignCenter) + top_content_layout.addWidget(main_label) + # TODO stretches should be probably replaced by spacing... + # - stretch in floating frame doesn't make sense + top_content_layout.addWidget(message_label_top) + top_content_layout.addWidget(progress_widget) + + # Publishing buttons to stop, reset or trigger publishing + footer_widget = QtWidgets.QWidget(content_frame) + + report_btn = PublishReportBtn(footer_widget) + + shrunk_main_label = QtWidgets.QLabel(footer_widget) + shrunk_main_label.setObjectName("PublishInfoMainLabel") + shrunk_main_label.setAlignment( + QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft + ) + + reset_btn = ResetBtn(footer_widget) + stop_btn = StopBtn(footer_widget) + validate_btn = ValidateBtn(footer_widget) + publish_btn = PublishBtn(footer_widget) + + report_btn.add_action("Go to details", "go_to_report") + report_btn.add_action("Copy report", "copy_report") + report_btn.add_action("Export report", "export_report") + + # Footer on info frame layout + footer_layout = QtWidgets.QHBoxLayout(footer_widget) + footer_layout.setContentsMargins(0, 0, 0, 0) + footer_layout.addWidget(report_btn, 0) + footer_layout.addWidget(shrunk_main_label, 1) + footer_layout.addWidget(reset_btn, 0) + footer_layout.addWidget(stop_btn, 0) + footer_layout.addWidget(validate_btn, 0) + footer_layout.addWidget(publish_btn, 0) + + # Info frame content + content_layout = QtWidgets.QVBoxLayout(content_frame) + content_layout.setSpacing(5) + + content_layout.addWidget(top_content_widget) + content_layout.addWidget(footer_widget) + + main_layout = QtWidgets.QVBoxLayout(self) + main_layout.setContentsMargins(borders, 0, borders, borders) + main_layout.addWidget(content_frame) + + shrunk_anim = QtCore.QVariantAnimation() + shrunk_anim.setDuration(140) + shrunk_anim.setEasingCurve(QtCore.QEasingCurve.InOutQuad) + + # Force translucent background for widgets + for widget in ( + self, + top_content_widget, + footer_widget, + progress_widget, + instance_plugin_widget, + ): + widget.setAttribute(QtCore.Qt.WA_TranslucentBackground) + + report_btn.triggered.connect(self._on_report_triggered) + reset_btn.clicked.connect(self._on_reset_clicked) + stop_btn.clicked.connect(self._on_stop_clicked) + validate_btn.clicked.connect(self._on_validate_clicked) + publish_btn.clicked.connect(self._on_publish_clicked) + + shrunk_anim.valueChanged.connect(self._on_shrunk_anim) + shrunk_anim.finished.connect(self._on_shrunk_anim_finish) + + controller.event_system.add_callback( + "publish.reset.finished", self._on_publish_reset + ) + controller.event_system.add_callback( + "publish.process.started", self._on_publish_start + ) + controller.event_system.add_callback( + "publish.has_validated.changed", self._on_publish_validated_change + ) + controller.event_system.add_callback( + "publish.process.stopped", self._on_publish_stop + ) + + controller.event_system.add_callback( + "publish.process.instance.changed", self._on_instance_change + ) + controller.event_system.add_callback( + "publish.process.plugin.changed", self._on_plugin_change + ) + + self._shrunk_anim = shrunk_anim + + self._controller = controller + + self._content_frame = content_frame + self._content_layout = content_layout + self._top_content_layout = top_content_layout + self._top_content_widget = top_content_widget + + self._main_label = main_label + self._message_label_top = message_label_top + + self._instance_label = instance_label + self._plugin_label = plugin_label + + self._progress_bar = progress_bar + self._progress_widget = progress_widget + + self._shrunk_main_label = shrunk_main_label + self._reset_btn = reset_btn + self._stop_btn = stop_btn + self._validate_btn = validate_btn + self._publish_btn = publish_btn + + self._shrunken = False + self._top_widget_max_height = None + self._top_widget_size_policy = top_content_widget.sizePolicy() + self._last_instance_label = None + self._last_plugin_label = None + + def mouseReleaseEvent(self, event): + super(PublishFrame, self).mouseReleaseEvent(event) + self._change_shrunk_state() + + def _change_shrunk_state(self): + self.set_shrunk_state(not self._shrunken) + + def set_shrunk_state(self, shrunk): + if shrunk is self._shrunken: + return + + if self._top_widget_max_height is None: + self._top_widget_max_height = ( + self._top_content_widget.maximumHeight() + ) + + self._shrunken = shrunk + + anim_is_running = ( + self._shrunk_anim.state() == QtCore.QAbstractAnimation.Running + ) + if not self.isVisible(): + if anim_is_running: + self._shrunk_anim.stop() + self._on_shrunk_anim_finish() + return + + start = 0 + end = 0 + if shrunk: + start = self._top_content_widget.height() + else: + if anim_is_running: + start = self._shrunk_anim.currentValue() + hint = self._top_content_widget.minimumSizeHint() + end = hint.height() + + self._shrunk_anim.setStartValue(float(start)) + self._shrunk_anim.setEndValue(float(end)) + if not anim_is_running: + self._shrunk_anim.start() + + def _on_shrunk_anim(self, value): + diff = self._top_content_widget.height() - int(value) + if not self._top_content_widget.isVisible(): + diff -= self._content_layout.spacing() + + window_pos = self.pos() + window_pos_y = window_pos.y() + diff + window_height = self.height() - diff + + self._top_content_widget.setMinimumHeight(value) + self._top_content_widget.setMaximumHeight(value) + self._top_content_widget.setVisible(True) + + self.resize(self.width(), window_height) + self.move(window_pos.x(), window_pos_y) + + def _on_shrunk_anim_finish(self): + self._top_content_widget.setVisible(not self._shrunken) + self._top_content_widget.setMinimumHeight(0) + self._top_content_widget.setMaximumHeight( + self._top_widget_max_height + ) + self._top_content_widget.setSizePolicy(self._top_widget_size_policy) + + if self._shrunken: + self._shrunk_main_label.setText(self._main_label.text()) + else: + self._shrunk_main_label.setText("") + + if self._shrunken: + content_frame_hint = self._content_frame.sizeHint() + + layout = self.layout() + margins = layout.contentsMargins() + window_height = ( + content_frame_hint.height() + + margins.bottom() + + margins.top() + ) + diff = self.height() - window_height + window_pos = self.pos() + window_pos_y = window_pos.y() + diff + self.resize(self.width(), window_height) + self.move(window_pos.x(), window_pos_y) + + def _set_main_label(self, message): + self._main_label.setText(message) + if self._shrunken: + self._shrunk_main_label.setText(message) + + def _on_publish_reset(self): + self._last_instance_label = None + self._last_plugin_label = None + + self._set_success_property() + self._set_progress_visibility(True) + + self._main_label.setText("Hit publish (play button)! If you want") + self._message_label_top.setText("") + + self._reset_btn.setEnabled(True) + self._stop_btn.setEnabled(False) + self._validate_btn.setEnabled(True) + self._publish_btn.setEnabled(True) + + self._progress_bar.setValue(self._controller.publish_progress) + self._progress_bar.setMaximum(self._controller.publish_max_progress) + + def _on_publish_start(self): + if self._last_plugin_label: + self._plugin_label.setText(self._last_plugin_label) + + if self._last_instance_label: + self._instance_label.setText(self._last_instance_label) + + self._set_success_property(3) + self._set_progress_visibility(True) + self._set_main_label("Publishing...") + + self._reset_btn.setEnabled(False) + self._stop_btn.setEnabled(True) + self._validate_btn.setEnabled(False) + self._publish_btn.setEnabled(False) + + self.set_shrunk_state(False) + + def _on_publish_validated_change(self, event): + if event["value"]: + self._validate_btn.setEnabled(False) + + def _on_instance_change(self, event): + """Change instance label when instance is going to be processed.""" + + self._last_instance_label = event["instance_label"] + self._instance_label.setText(event["instance_label"]) + QtWidgets.QApplication.processEvents() + + def _on_plugin_change(self, event): + """Change plugin label when instance is going to be processed.""" + + self._last_plugin_label = event["plugin_label"] + self._progress_bar.setValue(self._controller.publish_progress) + self._plugin_label.setText(event["plugin_label"]) + QtWidgets.QApplication.processEvents() + + def _on_publish_stop(self): + self._progress_bar.setValue(self._controller.publish_progress) + + self._reset_btn.setEnabled(True) + self._stop_btn.setEnabled(False) + + self._instance_label.setText("") + self._plugin_label.setText("") + + validate_enabled = not self._controller.publish_has_crashed + publish_enabled = not self._controller.publish_has_crashed + if validate_enabled: + validate_enabled = not self._controller.publish_has_validated + if publish_enabled: + if ( + self._controller.publish_has_validated + and self._controller.publish_has_validation_errors + ): + publish_enabled = False + + else: + publish_enabled = not self._controller.publish_has_finished + + self._validate_btn.setEnabled(validate_enabled) + self._publish_btn.setEnabled(publish_enabled) + + if self._controller.publish_has_crashed: + self._set_error_msg() + + elif self._controller.publish_has_validation_errors: + self._set_progress_visibility(False) + self._set_validation_errors() + + elif self._controller.publish_has_finished: + self._set_finished() + + else: + self._set_stopped() + + def _set_stopped(self): + main_label = "Publish paused" + if self._controller.publish_has_validated: + main_label += " - Validation passed" + + self._set_main_label(main_label) + self._message_label_top.setText( + "Hit publish (play button) to continue." + ) + + self._set_success_property(4) + + def _set_error_msg(self): + """Show error message to artist on publish crash.""" + + self._set_main_label("Error happened") + + self._message_label_top.setText(self._controller.publish_error_msg) + + self._set_success_property(1) + + def _set_validation_errors(self): + self._set_main_label("Your publish didn't pass studio validations") + self._message_label_top.setText("Check results above please") + self._set_success_property(2) + + def _set_finished(self): + self._set_main_label("Finished") + self._message_label_top.setText("") + self._set_success_property(0) + + def _set_progress_visibility(self, visible): + window_height = self.height() + self._progress_widget.setVisible(visible) + # Ignore rescaling and move of widget if is shrunken of progress bar + # should be visible + if self._shrunken or visible: + return + + height = self._progress_widget.height() + diff = height + self._top_content_layout.spacing() + + window_pos = self.pos() + window_pos_y = self.pos().y() + diff + window_height -= diff + + self.resize(self.width(), window_height) + self.move(window_pos.x(), window_pos_y) + + def _set_success_property(self, state=None): + """Apply styles by state. + + State enum: + - None - Default state after restart + - 0 - Success finish + - 1 - Error happened + - 2 - Validation error + - 3 - In progress + - 4 - Stopped/Paused + """ + + if state is None: + state = "" + else: + state = str(state) + + for widget in (self._progress_bar, self._content_frame): + if widget.property("state") != state: + widget.setProperty("state", state) + widget.style().polish(widget) + + def _copy_report(self): + logs = self._controller.get_publish_report() + logs_string = json.dumps(logs, indent=4) + + mime_data = QtCore.QMimeData() + mime_data.setText(logs_string) + QtWidgets.QApplication.instance().clipboard().setMimeData( + mime_data + ) + + def _export_report(self): + default_filename = "publish-report-{}".format( + time.strftime("%y%m%d-%H-%M") + ) + default_filepath = os.path.join( + os.path.expanduser("~"), + default_filename + ) + new_filepath, ext = QtWidgets.QFileDialog.getSaveFileName( + self, "Save report", default_filepath, ".json" + ) + if not ext or not new_filepath: + return + + logs = self._controller.get_publish_report() + full_path = new_filepath + ext + dir_path = os.path.dirname(full_path) + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + with open(full_path, "w") as file_stream: + json.dump(logs, file_stream) + + def _on_report_triggered(self, identifier): + if identifier == "export_report": + self._export_report() + + elif identifier == "copy_report": + self._copy_report() + + elif identifier == "go_to_report": + self.details_page_requested.emit() + + def _on_reset_clicked(self): + self._controller.reset() + + def _on_stop_clicked(self): + self._controller.stop_publish() + + def _on_validate_clicked(self): + self._controller.validate() + + def _on_publish_clicked(self): + self._controller.publish() diff --git a/openpype/tools/publisher/widgets/publish_widget.py b/openpype/tools/publisher/widgets/publish_widget.py deleted file mode 100644 index 80d0265dd3..0000000000 --- a/openpype/tools/publisher/widgets/publish_widget.py +++ /dev/null @@ -1,521 +0,0 @@ -import os -import json -import time - -from Qt import QtWidgets, QtCore, QtGui - -from openpype.pipeline import KnownPublishError - -from .validations_widget import ValidationsWidget -from ..publish_report_viewer import PublishReportViewerWidget -from .widgets import ( - StopBtn, - ResetBtn, - ValidateBtn, - PublishBtn, - CopyPublishReportBtn, - SavePublishReportBtn, - ShowPublishReportBtn -) - - -class ActionsButton(QtWidgets.QToolButton): - def __init__(self, parent=None): - super(ActionsButton, self).__init__(parent) - - self.setText("< No action >") - self.setPopupMode(self.MenuButtonPopup) - menu = QtWidgets.QMenu(self) - - self.setMenu(menu) - - self._menu = menu - self._actions = [] - self._current_action = None - - self.clicked.connect(self._on_click) - - def current_action(self): - return self._current_action - - def add_action(self, action): - self._actions.append(action) - action.triggered.connect(self._on_action_trigger) - self._menu.addAction(action) - if self._current_action is None: - self._set_action(action) - - def set_action(self, action): - if action not in self._actions: - self.add_action(action) - self._set_action(action) - - def _set_action(self, action): - if action is self._current_action: - return - self._current_action = action - self.setText(action.text()) - self.setIcon(action.icon()) - - def _on_click(self): - self._current_action.trigger() - - def _on_action_trigger(self): - action = self.sender() - if action not in self._actions: - return - - self._set_action(action) - - -class PublishFrame(QtWidgets.QFrame): - """Frame showed during publishing. - - Shows all information related to publishing. Contains validation error - widget which is showed if only validation error happens during validation. - - Processing layer is default layer. Validation error layer is shown if only - validation exception is raised during publishing. Report layer is available - only when publishing process is stopped and must be manually triggered to - change into that layer. - - +------------------------------------------------------------------------+ - | | - | | - | | - | < Validation error widget > | - | | - | | - | | - | | - +------------------------------------------------------------------------+ - | < Main label > | - | < Label top > | - | (#### 10% ) | - | | - | Report:

-
- -### Set Frame Ranges - -Use this feature in case you are not sure the frame range is correct. - -##### Result - -- setting Frame Range in script settings -- setting Frame Range in viewers (timeline) - -
-
- -![Set Frame Ranges](assets/nuke_setFrameRanges.png) - -
-
- - -
- -![Set Frame Ranges Timeline](assets/nuke_setFrameRanges_timeline.png) - -
- -1. limiting to Frame Range without handles -2. **Input** handle on start -3. **Output** handle on end - -
-
- -### Set Resolution - -
-
- - -This menu item will set correct resolution format for you defined by your production. - -##### Result - -- creates new item in formats with project name -- sets the new format as used - -
-
- -![Set Resolution](assets/nuke_setResolution.png) - -
-
- - -### Set Colorspace - -
-
- -This menu item will set correct Colorspace definitions for you. All has to be configured by your production (Project coordinator). - -##### Result - -- set Colorspace in your script settings -- set preview LUT to your viewers -- set correct colorspace to all discovered Read nodes (following expression set in settings) - -
-
- -![Set Colorspace](assets/nuke_setColorspace.png) - -
-
- - -### Apply All Settings - -
-
- -It is usually enough if you once per while use this option just to make yourself sure the workfile is having set correct properties. - -##### Result - -- set Frame Ranges -- set Colorspace -- set Resolution - -
-
- -![Apply All Settings](assets/nuke_applyAllSettings.png) - -
-
- -### Build Workfile - -
-
- -This tool will append all available subsets into an actual node graph. It will look into database and get all last [versions](artist_concepts.md#version) of available [subsets](artist_concepts.md#subset). - - -##### Result - -- adds all last versions of subsets (rendered image sequences) as read nodes -- adds publishable write node as `renderMain` subset - -
-
- -![Build First Work File](assets/nuke_buildFirstWorkfile.png) - -
-
\ No newline at end of file diff --git a/website/docs/artist_hosts_nuke_tut.md b/website/docs/artist_hosts_nuke_tut.md index 4b0ef7a78a..c3f01e042a 100644 --- a/website/docs/artist_hosts_nuke_tut.md +++ b/website/docs/artist_hosts_nuke_tut.md @@ -10,13 +10,13 @@ OpenPype supports Nuke version **`11.0`** and above. ## OpenPype global tools -- [Set Context](artist_tools.md#set-context) -- [Work Files](artist_tools.md#workfiles) -- [Create](artist_tools.md#creator) -- [Load](artist_tools.md#loader) -- [Manage (Inventory)](artist_tools.md#inventory) -- [Publish](artist_tools.md#publisher) -- [Library Loader](artist_tools.md#library-loader) +- [Set Context](artist_tools_context_manager) +- [Work Files](artist_tools_workfiles) +- [Create](artist_tools_creator) +- [Load](artist_tools_loader) +- [Manage (Inventory)](artist_tools_inventory) +- [Publish](artist_tools_publisher) +- [Library Loader](artist_tools_library_loader) ## Nuke specific tools @@ -89,6 +89,8 @@ This menu item will set correct Colorspace definitions for you. All has to be co - set preview LUT to your viewers - set correct colorspace to all discovered Read nodes (following expression set in settings) +See [Nuke Color Management](artist_hosts_nuke_tut.md#nuke-color-management) +
@@ -144,6 +146,8 @@ This tool will append all available subsets into an actual node graph. It will l This QuickStart is short introduction to what OpenPype can do for you. It attempts to make an overview for compositing artists, and simplifies processes that are better described in specific parts of the documentation. + + ### Launch Nuke - Shot and Task Context OpenPype has to know what shot and task you are working on. You need to run Nuke in context of the task, using Ftrack Action or OpenPype Launcher to select the task and run Nuke. @@ -161,7 +165,7 @@ Nuke OpenPype menu shows the current context Launching Nuke with context stops your timer, and starts the clock on the shot and task you picked. -Openpype makes initial setup for your Nuke script. It is the same as running [Apply All Settings](artist_hosts_nuke.md#apply-all-settings) from the OpenPype menu. +Openpype makes initial setup for your Nuke script. It is the same as running [Apply All Settings](artist_hosts_nuke_tut.md#apply-all-settings) from the OpenPype menu. - Reads frame range and resolution from Avalon database, sets it in Nuke Project Settings, Creates Viewer node, sets it’s range and indicates handles by In and Out points. @@ -182,7 +186,7 @@ The Next Available Version checks the work folder for already used versions and Subversion can be used to distinguish or name versions. For example used to add shortened artist name. -More about [workfiles](artist_tools.md#workfiles). +More about [workfiles](artist_tools_workfiles). :::tip Admin Tips @@ -210,7 +214,7 @@ Note that the Read node created by OpenPype is green. Green color indicates the ![Asset Load](assets/nuke_tut/nuke_AssetLoadOutOfDate.png) -More about [Asset loader](artist_tools.md#loader). +More about [Asset loader](artist_tools_loader). ### Create Write Node To create OpenPype managed Write node, select the Read node you just created, from OpenPype menu, pick Create. @@ -226,6 +230,11 @@ This will create a Group with a Write node inside. You can configure write node parameters in **Studio Settings → Project → Anatomy → Color Management and Output Formats → Nuke → Nodes** ::: +### Create Prerender Node +Creating Prerender is very similar to creating OpenPype managed Write node. + + + #### What Nuke Publish Does From Artist perspective, Nuke publish gathers all the stuff found in the Nuke script with Publish checkbox set to on, exports stuff and raises the Nuke script (workfile) version. @@ -264,7 +273,7 @@ Pyblish Dialog tries to pack a lot of info in a small area. One of the more tric If you run the publish and decide to not publish the Nuke script, you can turn it off right in the Pyblish dialog by clicking on the checkbox. If you decide to render and publish the shot in lower resolution to speed up the turnaround, you have to turn off the Write Resolution validator. If you want to use an older version of the asset (older version of the plate...), you have to turn off the Validate containers, and so on. -More info about [Using Pyblish](artist_tools.md#publisher) +More info about [Using Pyblish](artist_tools_publisher) :::tip Admin Tip - Configuring validators You can configure Nuke validators like Output Resolution in **Studio Settings → Project → Nuke → Publish plugins** @@ -315,6 +324,8 @@ Main disadvantage of this approach is that you can render only one version of yo When making quick farm publishes, like making two versions with different color correction, care must be taken to let the first job (first version) completely finish before the second version starts rendering. + + ### Managing Versions ![Versionless](assets/nuke_tut/nuke_ManageVersion.png) @@ -323,15 +334,30 @@ OpenPype checks all the assets loaded to Nuke on script open. All out of date as Use Manage to switch versions for loaded assets. +### Loading Effects +This video show how to publish effect from Hiero / Nuke Studio, and use the effect in Nuke. + + + + + +### Nuke Color Management + + + ## Troubleshooting ### Fixing Validate Containers +If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ![Versionless](assets/nuke_tut/nuke_ValidateContainers.png) -If your Pyblish dialog fails on Validate Containers, you might have an old asset loaded. Use OpenPype - Manage... to switch the asset(s) to the latest version. + ### Fixing Validate Version If your Pyblish dialog fails on Validate Version, you might be trying to publish already published version. Rise your version in the OpenPype WorkFiles SaveAs. -Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. \ No newline at end of file +Or maybe you accidentally copied write node from different shot to your current one. Check the write publishes on the left side of the Pyblish dialog. Typically you publish only one write. Locate and delete the stray write from other shot. + + diff --git a/website/docs/artist_hosts_photoshop.md b/website/docs/artist_hosts_photoshop.md index b2b5fd58da..88bfb1484d 100644 --- a/website/docs/artist_hosts_photoshop.md +++ b/website/docs/artist_hosts_photoshop.md @@ -6,11 +6,11 @@ sidebar_label: Photoshop ## Available Tools -- [Work Files](artist_tools.md#workfiles) -- [Create](artist_tools.md#creator) -- [Load](artist_tools.md#loader) -- [Publish](artist_tools.md#publisher) -- [Manage](artist_tools.md#inventory) +- [Work Files](artist_tools_workfiles) +- [Create](artist_tools_creator) +- [Load](artist_tools_loader) +- [Publish](artist_tools_publisher) +- [Manage](artist_tools_inventory) ## Setup @@ -22,66 +22,104 @@ When you launch Photoshop you will be met with the Workfiles app. If dont have a In Photoshop you can find the tools in the `OpenPype` extension: -![Extension](assets/photoshop_extension.PNG) +![Extension](assets/photoshop_extension.png) You can show the extension panel by going to `Window` > `Extensions` > `OpenPype`. -### Create - -When you have created an image you want to publish, you will need to create special groups or tag existing groups. To do this open the `Creator` through the extensions `Create` button. - -![Creator](assets/photoshop_creator.PNG) - -With the `Creator` you have a variety of options to create: - -- Check `Use selection` (A dialog will ask whether you want to create one image per selected layer). - - Yes. - - No selection. - - This will create a single group named after the `Subset` in the `Creator`. - - Single selected layer. - - The selected layer will be grouped under a single group named after the selected layer. - - Single selected group. - - The selected group will be tagged for publishing. - - Multiple selected items. - - Each selected group will be tagged for publishing and each layer will be grouped individually. - - No. - - All selected layers will be grouped under a single group named after the `Subset` in the `Creator`. -- Uncheck `Use selection`. - - This will create a single group named after the `Subset` in the `Creator`. - ### Publish -When you are ready to share some work, you will need to publish. This is done by opening the `Pyblish` through the extensions `Publish` button. +When you are ready to share some work, you will need to publish. This is done by opening the `Publisher` through the `Publish...` button. -![Publish](assets/photoshop_publish.PNG) +![Publish](assets/photoshop_publish.png) -This tool will run through checks to make sure the contents you are publishing is correct. Hit the "Play" button to start publishing. +There is always instance for workfile created automatically (see 'workfileArt' item in `Subsets to publish` column.) This allows to publish (and therefore backup) +workfile which is used to produce another publishable elements (as `image` and `review` items). + +#### Create + +Main publishable item in Photoshop will be of `image` family. Result of this item (instance) is picture that could be loaded and used in another DCCs (for example as +single layer in composition in AfterEffects, reference in Maya etc). + +There are couple of options what to publish: +- separate image per layer (or group of layers) +- all visible layers (groups) flattened into single image + +In most cases you would like to keep `Create only for selected` toggled on and select what you would like to publish. Toggling this off +will allow you to create instance(s) for all visible layers without a need to select them explicitly. + +For separate layers option keep `Create separate instance for each selected` toggled, select multiple layers and hit `Create >>>` button in the middle column. + +This will result in: + +![Image instances creates](assets/photoshop_publish_images.png) + +(In Photoshop's `Layers` tab standard layers will be wrapped into group and enriched with ℗ symbol to denote publishable instance. With `Create separate instance for each selected` toggled off +it will create only single publishable instance which will wrap all visible layers.) + +Name of publishable instance (eg. subset name) could be configured with a template in `project_settings/global/tools/creator/subset_name_profiles`. +(This must be configured by admin who has access to Openpype Settings.) + +Trash icon under the list of instances allows to delete any selected `image` instance. + +Workfile instance will be automatically recreated though. If you do not want to publish it, use pill toggle on the instance item. + +If you would like to modify publishable instance, click on `Publish` tab at the top. This would allow you to change name of publishable +instances, disable them from publishing, change their task etc. + +Publisher allows publishing into different context, just click on any instance, update `Variant`, `Asset` or `Task` in the form in the middle and don't forget to click on the 'Confirm' button. + +#### Validate + +If you would like to run validation rules set by your Studio, click on funnel icon at the bottom right. This will run through all +enabled instances, you could see more information after clicking on `Details` tab. + +![Image instances creates](assets/photoshop_publish_validations.png) + +In this dialog you could see publishable instances in left colummn, triggered plugins in the middle and logs in the right column. + +In left column you could see that `review` instance was created automatically. This instance flattens all publishable instances or +all visible layers if no publishable instances were created into single image which could serve as a single reviewable element (for example in Ftrack). + +Creation of Review could be disabled in `project_settings/photoshop/publish/CollectReview`. + +If you are satisfied with results of validation phase (and there are no errors there), you might hit `Publish` button at bottom right. +This will run through extraction phase (it physically creates images from `image` instances, creates `review` etc) and publishes them +(eg. stores files into their final destination and stores metadata about them into DB). +This part might take a while depending on amount of layers in the workfile, amount of available memory and performance of your machine. You may encounter issues with publishing which will be indicated with red squares. If these issues are within the validation section, then you can fix the issue. If there are issues outside of validation section, please let the OpenPype team know. +You can always start new publish run with a circle arrow button at the bottom right. You might also want to move between phases (Create, Update etc) +by clicking on available tabs at the top of the dialog. + +#### Simplified publish + +There is a simplified workflow for simple use case where only single image should be created containing all visible layers. +No image instances must be present in a workfile and `project_settings/photoshop/publish/CollectInstances/flatten_subset_template` must be filled in Settings. +Then artists just need to hit 'Publish' button in menu. + #### Repair Validation Issues -All validators will give some description about what the issue is. You can inspect this by going into the validator through the arrow: +If there is some issue in validator phase, you will receive something like this: -![Inspect](assets/photoshop_publish_inspect.PNG) +![Validation error](assets/photoshop_publish_failed.png) -You can expand the errors by clicking on them for more details: +All validators will give some description about what the issue is. You can inspect this by clicking on items in the left column. -![Expand](assets/photoshop_publish_expand.PNG) +If there is an option of automatic repair, there will be `Repair` button on the right. In other case you need to fix the issue manually. +(By deleting and recreating instance etc.) -Some validator have repair actions, which will fix the issue. If you can identify validators with actions by the circle icon with an "A": - -![Actions](assets/photoshop_publish_actions.PNG) - -To access the actions, you right click on the validator. If an action runs successfully, the actions icon will turn green. Once all issues are fixed, you can just hit the "Refresh" button and try to publish again. - -![Repair](assets/photoshop_publish_repair.gif) +#### Buttons on the bottom right are for: +- `Refresh publishing` - set publishing process to starting position - useful if previous publish failed, or you changed configuration of a publish +- `Stop/pause publishing` - if you would like to pause publishing process at any time +- `Validate` - if you would like to run only collecting and validating phases (nothing will be published yet) +- `Publish` - standard way how to kick off full publishing process ### Load When you want to load existing published work, you can load in smart layers through the `Loader`. You can reach the `Loader` through the extension's `Load` button. -![Loader](assets/photoshop_loader.PNG) +![Loader](assets/photoshop_loader.png) The supported families for Photoshop are: @@ -99,9 +137,27 @@ Now that we have some images loaded, we can manage which version is loaded. This Loaded images has to stay as smart layers in order to be updated. If you rasterize the layer, you cannot update it to a different version. ::: -![Loader](assets/photoshop_manage.PNG) +![Loader](assets/photoshop_manage.png) You can switch to a previous version of the image or update to the latest. ![Loader](assets/photoshop_manage_switch.gif) ![Loader](assets/photoshop_manage_update.gif) + + +#### Support help +If you would like to ask for help admin or support, you could use any of the three options on the `Note` button on bottom left: +- `Go to details` - switches into a more detailed list of published instances and plugins. +- `Copy report` - stash full publishing log to a clipboard +- `Export report` - save log into a file for sending it via mail or any communication tool + +If you are able to fix the workfile yourself, use the first button on the right to set the UI to initial state before publish. (Click the `Publish` button to start again.) + +#### Legacy instances + +All screenshots from Publish are from updated dialog, before publishing was being done by regular `Pyblish` tool. +New publishing process should be backward compatible, eg. if you have a workfile with instances created in the previous publishing approach, they will be translated automatically and +could be used right away. + +If you hit on unexpected behaviour with old instances, contact support first, then you could try to delete and recreate instances from scratch. +Nuclear option is to purge workfile metadata in `File > File Info > Origin > Headline`. This is only for most determined daredevils though! diff --git a/website/docs/artist_hosts_resolve.md b/website/docs/artist_hosts_resolve.md index 7c462484f5..bb183455db 100644 --- a/website/docs/artist_hosts_resolve.md +++ b/website/docs/artist_hosts_resolve.md @@ -16,18 +16,18 @@ Before you are able to start with OpenPype tools in DaVinci Resolve, installatio ## OpenPype global tools -- [Work Files](artist_tools.md#workfiles) -- [Create](artist_tools.md#creator) -- [Load](artist_tools.md#loader) -- [Manage (Inventory)](artist_tools.md#inventory) -- [Publish](artist_tools.md#publisher) +- [Work Files](artist_tools_workfiles) +- [Create](artist_tools_creator) +- [Load](artist_tools_loader) +- [Manage (Inventory)](artist_tools_inventory) +- [Publish](artist_tools_publisher)
## Creating Shots from timeline items -Before a clip can be published with [Publisher](artist_tools.md#publisher) timeline item has to be marked with OpenPype metadata markers. This way it is converted to a publishable subset. +Before a clip can be published with [Publisher](artist_tools_publisher) timeline item has to be marked with OpenPype metadata markers. This way it is converted to a publishable subset. Lets do it step by step. diff --git a/website/docs/artist_hosts_tvpaint.md b/website/docs/artist_hosts_tvpaint.md index 2e831e64d8..a0ce5d5ff8 100644 --- a/website/docs/artist_hosts_tvpaint.md +++ b/website/docs/artist_hosts_tvpaint.md @@ -4,13 +4,13 @@ title: TVPaint sidebar_label: TVPaint --- -- [Work Files](artist_tools.md#workfiles) -- [Load](artist_tools.md#loader) -- [Create](artist_tools.md#creator) -- [Subset Manager](artist_tools.md#subset-manager) -- [Scene Inventory](artist_tools.md#scene-inventory) -- [Publish](artist_tools.md#publisher) -- [Library](artist_tools.md#library) +- [Work Files](artist_tools_workfiles) +- [Load](artist_tools_loader) +- [Create](artist_tools_creator) +- [Subset Manager](artist_tools_subset_manager) +- [Scene Inventory](artist_tools_inventory) +- [Publish](artist_tools_publisher) +- [Library](artist_tools_library) ## Setup diff --git a/website/docs/artist_hosts_unreal.md b/website/docs/artist_hosts_unreal.md index 1ff09893e3..d79e9c387c 100644 --- a/website/docs/artist_hosts_unreal.md +++ b/website/docs/artist_hosts_unreal.md @@ -8,6 +8,20 @@ sidebar_label: Unreal OpenPype supports Unreal in similar ways as in other DCCs Yet there are few specific you need to be aware of. +### Creating the Unreal project + +Selecting a task and opening it with Unreal will generate the Unreal project, if it hasn't been created before. +By default, OpenPype includes the plugin that will be built together with the project. + +Alternatively, the Environment variable `"OPENPYPE_UNREAL_PLUGIN"` can be set to the path of a compiled version of the plugin. +The version of the compiled plugin must match the version of Unreal with which the project is being created. + +:::note +Unreal version 5.0 onwards requires the following Environment variable: + +`"UE_PYTHONPATH": "{PYTHONPATH}"` +::: + ### Project naming Unreal doesn't support project names starting with non-alphabetic character. So names like `123_myProject` are @@ -15,15 +29,15 @@ invalid. If OpenPype detects such name it automatically prepends letter **P** to ## OpenPype global tools -OpenPype global tools can be found in *Window* main menu: +OpenPype global tools can be found in Unreal's toolbar and in the *Tools* main menu: -![Unreal OpenPype Menu](assets/unreal-avalon_tools.jpg) +![Unreal OpenPype Menu](assets/unreal_openpype_tools.png) -- [Create](artist_tools.md#creator) -- [Load](artist_tools.md#loader) -- [Manage (Inventory)](artist_tools.md#inventory) -- [Publish](artist_tools.md#publisher) -- [Library Loader](artist_tools.md#library-loader) +- [Create](artist_tools_creator) +- [Load](artist_tools_loader) +- [Manage (Inventory)](artist_tools_inventory) +- [Publish](artist_tools_publisher) +- [Library Loader](artist_tools_library_loader) ## Static Mesh @@ -31,10 +45,118 @@ OpenPype global tools can be found in *Window* main menu: To import Static Mesh model, just choose **OpenPype → Load ...** and select your mesh. Static meshes are transferred as FBX files as specified in [Unreal Engine 4 Static Mesh Pipeline](https://docs.unrealengine.com/en-US/Engine/Content/Importing/FBX/StaticMeshes/index.html). This action will create new folder with subset name (`unrealStaticMeshMain_CON` for example) and put all data into it. Inside, you can find: -![Unreal Container Content](assets/unreal-container.jpg) +![Unreal Container Content](assets/unreal_container.jpg) -In this case there is **lambert1**, material pulled from Maya when this static mesh was published, **unrealStaticMeshCube** is the geometry itself, **unrealStaticMeshCube_CON** is a *AssetContainer* type and is there to mark this directory as Avalon Container (to track changes) and to hold OpenPype metadata. +In this case there is **lambert1**, material pulled from Maya when this static mesh was published, **antennaA_modelMain** is the geometry itself, **modelMain_v002_CON** is a *AssetContainer* type and is there to mark this directory as Avalon Container (to track changes) and to hold OpenPype metadata. ### Publishing -Publishing of Static Mesh works in similar ways. Select your mesh in *Content Browser* and **OpenPype → Create ...**. This will create folder named by subset you've chosen - for example **unrealStaticMeshDefault_INS**. It this folder is that mesh and *Avalon Publish Instance* asset marking this folder as publishable instance and holding important metadata on it. If you want to publish this instance, go **OpenPype → Publish ...** \ No newline at end of file +Publishing of Static Mesh works in similar ways. Select your mesh in *Content Browser* and **OpenPype → Create ...**. This will create folder named by subset you've chosen - for example **unrealStaticMeshDefault_INS**. It this folder is that mesh and *Avalon Publish Instance* asset marking this folder as publishable instance and holding important metadata on it. If you want to publish this instance, go **OpenPype → Publish ...** + +## Layout + +There are two different layout options in Unreal, depending on the type of project you are working on. +One only imports the layout, and saves it in a level. +The other uses [Master Sequences](https://docs.unrealengine.com/4.27/en-US/AnimatingObjects/Sequencer/Overview/TracksShot/) to track the whole level sequence hierarchy. +You can choose in the Project Settings if you want to generate the level sequences. + +![Unreal OP Settings Level Sequence](assets/unreal_setting_level_sequence.png) + +### Loading + +To load a layout, click on the OpenPype icon in Unreal’s main taskbar, and select **Load**. + +![Unreal OP Tools Load](assets/unreal_openpype_tools_load.png) + +Select the task on the left, then right click on the layout asset and select **Load Layout**. + +![Unreal Layout Load](assets/unreal_load_layout.png) + +If you need to load multiple layouts, you can select more than one task on the left, and you can load them together. + +![Unreal Layout Load Batch](assets/unreal_load_layout_batch.png) + +### Navigating the project + +The layout will be imported in the directory `/Content/OpenPype`. The layout will be split into two subfolders: +- *Assets*, which will contain all the rigs and models contained in the layout; +- *Asset name* (in the following example, *episode 2*), a folder named as the **asset** of the current **task**. + +![Unreal Layout Loading Result](assets/unreal_layout_loading_result.png) + +If you chose to generate the level sequences, in the second folder you will find the master level for the task (usually an episode), the level sequence and the folders for all the scenes in the episodes. +Otherwise you will find the level generated for the loaded layout. + +#### Layout without level sequences + +In the layout folder, you will find the level with the imported layout and an object of *AssetContainer* type. The latter is there to mark this directory as Avalon Container (to track changes) and to hold OpenPype metadata. + +![Unreal Layout Loading No Sequence](assets/unreal_layout_loading_no_sequence.png) + +The layout level will and should contain only the data included in the layout. To add lighting, or other elements, like an environment, you have to create a master level, and add the layout level as a [streaming level](https://docs.unrealengine.com/5.0/en-US/level-streaming-in-unreal-engine/). + +Create the master level and open it. Then, open the *Levels* window (from the menu **Windows → Levels**). Click on **Levels → Add Existing** and select the layout level and the other levels you with to include in the scene. The following example shows a master level in which have been added a light level and the layout level. + +![Unreal Add Level](assets/unreal_add_level.png) +![Unreal Level List](assets/unreal_level_list_no_sequences.png) + +#### Layout with level sequences + +In the episode folder, you will find the master level for the episode, the master level sequence and the folders for all the scenes in the episodes. + +After opening the master level, open the *Levels* window (from the menu **Windows → Levels**), and you will see the list of the levels of each shot of the episode for which a layout has been loaded. + +![Unreal Level List](assets/unreal_level_list.png) + +If it has not been added already, you will need to add the environment to the level. Click on **Levels → Add Existing** and select the level with the environment (check with the studio where it is located). + +![Unreal Add Level](assets/unreal_add_level.png) + +After adding the environment level to the master level, you will need to set it as always loaded by right clicking it, and selecting **Change Streaming Method** and selecting **Always Loaded**. + +![Unreal Level Streaming Method](assets/unreal_level_streaming_method.png) + +### Update layouts + +To manage loaded layouts, click on the OpenPype icon in Unreal’s main taskbar, and select **Manage**. + +![Unreal OP Tools Manage](assets/unreal_openpype_tools_manage.png) + +You will get a list of all the assets that have been loaded in the project. +The version number will be in red if it isn’t the latest version. Right click on the element, and select Update if you need to update the layout. + +:::note +**DO NOT** update rigs or models imported with a layout. Update only the layout. +::: + +## Rendering + +:::note +The rendering requires a layout loaded with the option to create the level sequences **on**. +::: + +To render and publish an episode, a scene or a shot, you will need to create a publish instance. The publish instance for the rendering is based on one level sequence. That means that if you want to render the whole episode, you will need to create it for the level sequence of the episode, but if you want to render just one shot, you will need to create it for that shot. + +Navigate to the folder that contains the level sequence that you need to render. Select the level sequence, and then click on the OpenPype icon in Unreal’s main taskbar, and select **Create**. + +![Unreal OP Tools Create](assets/unreal_openpype_tools_create.png) + +In the Instance Creator, select **Unreal - Render**, give it a name, and click **Create**. + +![Unreal OP Instance Creator](assets/unreal_create_render.png) + +The render instance will be created in `/Content/OpenPype/PublishInstances`. + +Select the instance you need to render, and then click on the OpenPype icon in Unreal’s main taskbar, and select **Render**. You can render more than one instance at a time, if needed. Just select all the instances that you need to render before selecting the **Render** button from the OpenPype menu. + +![Unreal OP Tools Render](assets/unreal_openpype_tools_render.png) + +Once the render is finished, click on the OpenPype icon in Unreal’s main taskbar, and select **Publish**. + +![Unreal OP Tools Publish](assets/unreal_openpype_tools_publish.png) + +On the left, you will see the render instances. They will be automatically reorganised to have an instance for each shot. So, for example, if you have created the render instance for the whole episode, here you will have an instance for each shot in the episode. + +![Unreal Publish Render](assets/unreal_publish_render.png) + +Click on the play button in the bottom right, and it will start the publishing process. diff --git a/website/docs/artist_kitsu.md b/website/docs/artist_kitsu.md new file mode 100644 index 0000000000..9ef782c297 --- /dev/null +++ b/website/docs/artist_kitsu.md @@ -0,0 +1,17 @@ +--- +id: artist_kitsu +title: Kitsu +sidebar_label: Kitsu +--- + +# How to use Kitsu in OpenPype + +## Login to Kitsu module in OpenPype +1. Launch OpenPype, the `Kitsu Credentials` window will open automatically, if not, or if you want to log-in with another account, go to systray OpenPype icon and click on `Kitsu Connect`. +2. Enter your credentials and press *Ok*: + + ![kitsu-login](assets/kitsu/kitsu_credentials.png) + +:::tip +In Kitsu, All the publish actions executed by `pyblish` will be attributed to the currently logged-in user. +::: \ No newline at end of file diff --git a/website/docs/artist_tools.md b/website/docs/artist_tools.md index 38fe1a8af8..30e5cec84f 100644 --- a/website/docs/artist_tools.md +++ b/website/docs/artist_tools.md @@ -4,483 +4,18 @@ title: Tools sidebar_label: Tools --- -## Set Context +# Tools +OpenPype offers a collection of core tools in tandem with the Integrations: -
-
+- [Context Manager](artist_tools_context_manager) +- [Creator](artist_tools_creator) +- [Loader](artist_tools_loader) +- [Library Loader](artist_tools_library_loader) +- [Publisher](artist_tools_publisher) +- [Inventory](artist_tools_inventory) +- [Workfiles](artist_tools_workfiles) +- [Look Assigner](artist_tools_look_assigner) +- [Subset Manager](artist_tools_subset_manager) +- [Sync Queue](artist_tools_sync_queue) -Any time your host app is open in defined context it can be changed to different hierarchy, asset or task within a project. This will allow you to change your opened session to any other asset, shot and tasks within the same project. This is useful particularly in cases where your host takes long time to start. - -
-
- -![workfiles_1](assets/tools_context_manager.png) -
-
- - - -:::note - -Notice that the window doesn't close after hitting `Accept` and confirming the change of context. This behaviour let's you keep the window open and change the context multiple times in a row. -::: - -## Creator - -### Details - -Despite the name, Creator isn't for making new content in your scene, but rather taking what's already in it and creating all the metadata your content needs to be published. - -In Maya this means creating a set with everything you want to publish and assigning custom attributes to it so it gets picked up during publishing stage. - -In Nuke it's either converting an existing write node to a publishable one, or simply creating a write node with all the correct settings and outputs already set. - -### Usage - -1. select what you want to publish from your scenes -2. Open *Creator* from OpenPype menu -3. Choose what family (data type) you need to export -4. Type the name for you export. This name is how others are going to be able to refer to this particular subset when loading it into their scenes. Every assets should have a Main subset, but can have any number of other variants. -5. Click on *Create* - - * * * - -## Loader -Loader loads published subsets into your current scene or script. - -### Usage -1. open *Loader* from OpenPype menu -2. select the asset where the subset you want to load is published -3. from subset list select the subset you want -4. right-click the subset -5. from action menu select what you want to do *(load, reference, ...)* - - -![tools_loader_1](assets/tools/tools_loader_1.png) - -
-
- -### Refresh data -Data are not auto-refreshed to avoid database issues. To refresh assets or subsets press refresh button. - -
-
- -![tools_loader_50](assets/tools/tools_loader_50.png) - -
-
- -### Load another version -Loader by default load last version, but you can of course load another versions. Double-click on the subset in the version column to expose the drop down, choose version you want to load and continue from point 4 of the [Usage](#usage-1). - -
-
- - ![tools_loader_21](assets/tools/tools_loader_21.png) -
-
- - ![tools_loader_22](assets/tools/tools_loader_22.png) -
-
- - -### Filtering - -#### Filter Assets and Subsets by name -To filter assets/subsets by name just type name or part of name to filter text input. Only assets/subsets containing the entered string remain. - -- **Assets filtering example** *(it works the same for subsets)*: - -
-
- -![tools_loader_4](assets/tools/tools_loader_4-small.png) - -
-
- -![tools_loader_5](assets/tools/tools_loader_5-small.png) - -
-
- - -#### Filter Subsets by Family - -
-
- -To filter [subsets](artist_concepts.md#subset) by their [families](artist_publish.md#families) you can use families list where you can check families you want to see or uncheck families you are not interested in. - -
-
- -![tools_loader_30](assets/tools/tools_loader_30-small.png) - -
-
- - - -### Subset groups -Subsets may be grouped which can help to make the subset list more transparent. You can toggle visibility of groups with `Enable Grouping` checkbox. - -![tools_loader_40](assets/tools/tools_loader_40-small.png) - - -#### Add to group or change current group -You can set group of selected subsets with shortcut `Ctrl + G`. - -![tools_loader_41](assets/tools/tools_loader_41-small.png) - - -:::warning -You'll set the group in Avalon database so your changes will take effect for all users. -::: - -### Site Sync support - -If **Site Sync** is enabled additional widget is shown in right bottom corner. -It contains list of all representations of selected version(s). It also shows availability of representation files -on particular site (*active* - mine, *remote* - theirs). - -![site_sync_support](assets/site_sync_loader.png) - -On this picture you see that representation files are available only on remote site (could be GDrive or other). -If artist wants to work with the file(s) they need to be downloaded first. That could be done by right mouse click on -particular representation (or multiselect all) and select *Download*. - -This will mark representation to be download which will happen in the background if OpenPype Tray is running. - -For more details of progress, state or possible error details artist should open **[Sync Queue](#Sync-Queue)** item in Tray app. - -Work in progress... - -## Library Loader - -Library loader is extended [loader](#loader) which allows to load published subsets from Library projects. Controls are same but library loader has extra Combo Box which allows you to choose project you want to load from. - -
-
- -![tools_library_1](assets/tools/tools_library_1-small.png) - -
-
- -![tools_library_2](assets/tools/tools_library_2-small.png) - -
-
- -### Delivery Action ### - -Library Loader contains functionality to export any selected asset, subsets and their version to configurable folder. -Delivery follows structure based on defined template, this template must be configured first by Admin in the Settings. - -![delivery_action](assets/tools/tools_delivery_loader.png) - -* Usage -- Select all required subsets for export (you can change theirs versions by double clicking on 'Version' value) -- Right click and select **Deliver Versions** from context menu -- Select predefined Delivery template (must be configured by Admin system or project wide) -- Fill value for root folder (folder will be created if it doesn't exist) -- Filter out type of representation you are not interested in -- Push **Deliver** button -- Dialog must be kept open until export is finished -- In a case of problems with any of the representation, that one will be skipped, description of error will be provided in the dialog -* * * - -## Publisher - -> Use publish to share your work with others. It collects, validates and exports the data in standardized way. - -### Details - -When you run pyblish, the UI is made of 2 main parts. On the left, you see all the items pyblish will be working with (called instances), and on the right a list of actions that are going to process these items. -Even though every task type has some pre-defined settings of what should be collected from the scene and what items will be published by default. You can technically publish any output type from any task type. -Each item is passed through multiple plugins, each doing a small piece of work. These are organized into 4 areas and run in sequence. - -### Using Pyblish - -In the best case scenario, you open pyblish from the Avalon menu, press play, wait for it to finish, and you’re done. -These are the steps in detail, for cases, where the default settings don’t work for you or you know that the task you’re working on, requires a different treatment. - -#### Collect - -Finds all the important data in the scene and makes it ready for publishing - -#### Validate - -Each validator makes sure your output complies to one particular condition. This could be anything from naming conventions, scene setting, to plugin usage. An item can only be published if all validators pass. - -#### Extract - -Extractor takes the item and saves it to the disk. Usually to temporary location. Each extractor represents one file format and there can be multiple file formats exported for each item. - -#### Integrate - -Integrator takes the extracted files, categorizes and moves them to a correct location on the disk or on the server. - -* * * - -## Inventory - -With Scene Inventory, you can browse, update and change subsets loaded with [Loader](#loader) into your scene or script. - -:::note -You should first understand [Key concepts](artist_concepts) to understand how you can use this tool. -::: - -### Details - - -Once a subset is loaded, it turns into a container within a scene. This containerization allows us to have a good overview of everything in the scene, but also makes it possible to change versions, notify user if something is outdated, replace one asset for another, etc. - - -The scene manager has a simple GUI focused on efficiency. You can see everything that has been previously loaded into the scene, how many time it's been loaded, what version and a lot of other information. Loaded assets are grouped by their asset name, subset name and representation. This grouping gives ability to apply changes for all instances of the loaded asset *(e.g. when __tree__ is loaded 20 times you can easily update version for all of them)*. - -![tools_scene_inventory_10](assets/tools/tools_scene_inventory_10-small.png) - -To interact with any container, you need to right click it and you'll see a drop down with possible actions. The key actions for production are already implemented, but more will be added over time. - -![tools_scene_inventory_20](assets/tools/tools_scene_inventory_20.png) - -### Usage - -#### Change version -You can change versions of loaded subsets with scene inventory tool. Version of loaded assets is colored to red when newer version is available. - - -![tools_scene_inventory_40](assets/tools/tools_scene_inventory_40.png) - -##### Update to the latest version -Select containers or subsets you want to update, right-click selection and press `Update to latest`. - -##### Change to specific version -Select containers or subsets you want to change, right-click selection, press `Set version`, select from dropdown version you want change to and press `OK` button to confirm. - - -![tools_scene_inventory_30](assets/tools/tools_scene_inventory_30.png) - - -#### Switch Asset -It's tool in Scene inventory tool that gives ability to switch asset, subset and representation of loaded assets. - - -![tools_scene_inventory_50](assets/tools/tools_scene_inventory_50.png) - - -Because loaded asset is in fact representation of version published in asset's subset it is possible to switch each of this part *(representation, version, subset and asset)*, but with limitations. Limitations are obvious as you can imagine when you have loaded `.ma` representation of `modelMain` subset from `car` asset it is not possible to switch subset to `modelHD` and keep same representation if `modelHD` does not have published `.ma` representation. It is possible to switch multiple loaded assets at once that makes this tool very powerful helper if all published assets contain same subsets and representations. - -Switch tool won't let you cross the border of limitations and inform you when you have to specify more if impossible combination occurs *(It is also possible that there will be no possible combination for selected assets)*. Border is colored to red and confirm button is not enabled when specification is required. - - -![tools_scene_inventory_55](assets/tools/tools_scene_inventory_55.png) - - -Possible switches: -- switch **representation** (`.ma` to `.abc`, `.exr` to `.dpx`, etc.) -- switch **subset** (`modelMain` to `modelHD`, etc.) - - `AND` keep same **representation** *(with limitations)* - - `AND` switch **representation** *(with limitations)* -- switch **asset** (`oak` to `elm`, etc.) - - `AND` keep same **subset** and **representation** *(with limitations)* - - `AND` keep same **subset** and switch **representation** *(with limitations)* - - `AND` switch **subset** and keep same **representation** *(with limitations)* - - `AND` switch **subset** and **representation** *(with limitations)* - -We added one more switch layer above subset for LOD (Level Of Depth). That requires to have published subsets with name ending with **"_LOD{number}"** where number represents level (e.g. modelMain_LOD1). Has the same limitations as mentioned above. This is handy when you want to change only subset but keep same LOD or keep same subset but change LOD for multiple assets. This option is hidden if you didn't select subset that have published subset with LODs. - -![tools_scene_inventory_54](assets/tools/tools_scene_inventory_54.png) -### Filtering - -#### Filter by name - -There is a search bar on the top for cases when you have a complex scene with many assets and need to find a specific one. - -
-
- -![tools_scene_inventory_60](assets/tools/tools_scene_inventory_60-small.png) - -
-
- -![tools_scene_inventory_61](assets/tools/tools_scene_inventory_61-small.png) - -
-
- - -#### Filter with Cherry-pick selection - -
-
- -To keep only selected subsets right-click selection and press `Cherry-Pick (Hierarchy)` *(Border of subset list change to **orange** color when Cherry-pick filtering is set so you know filter is applied).* - -
-
- -![tools_scene_inventory_62-small](assets/tools/tools_scene_inventory_62-small.png) - -
-
- -
-
- -To return to original state right-click anywhere in subsets list and press `Back to Full-View`. - -
-
- -![tools_scene_inventory_63-small](assets/tools/tools_scene_inventory_63-small.png) - -
-
- - -:::tip -You can Cherry-pick from Cherry-picked subsets. -::: - -* * * - -## Workfiles - -Save new working scenes or scripts, or open the ones you previously worked on. - -### Details - -Instead of digging through your software native file browser, you can simply open the workfiles app and see all the files for the asset or shot you're currently working with. The app takes care of all the naming and the location of your work files. - -When saving a scene you can also add a comment. It is completely up to you how you use this, however we recommend using it for subversion within your current working version. - -Let's say that the last version of the comp you published was v003 and now you're working on the file prj_sh010_compositing_v004.nk if you want to keep snapshots of your work, but not iterate on the main version because the supervisor is expecting next publish to be v004, you can use the comment to do this, so you can save the file under the name prj_sh010_compositing_v004_001 , prj_sh010_compositing_v004_002. the main version is automatically iterated every time you publish something. - -### Usage - -
-
- -#### To open existing file: - -1. Open Workfiles tool from OpenPype menu -2. Select file from list - the latest version is the highest *(descendent ordering)* -3. Press `Open` button - -
-
- -![workfiles_1](assets/workfiles_1.png) - -
-
- - -#### To save new workfile -1. Open Workfiles tool from OpenPype menu -2. Press `Save As` button -3. You can add optional comment to the filename, that will be appended at the end -4. Press `OK` - -:::note -You can manually override the workfile version by unticking next available version and using the version menu to choose your own. -::: - -## Look Assigner - -> The Look Manager takes care of assigning published looks to the correct model in the scene. - -### Details - -When a look is published it also stores the information about what shading networks need to be assigned to which models, but it also stores all the render attributes on the mesh necessary for a successful render. - -### Usage - -Look Assigner has GUI is made of two parts. On the left you will see the list of all the available models in the scene and on the right side, all the looks that can be associate with them. To assign a look to a model you just need to: - -1. Click on "load all subsets" -2. Choose a subset from the menu on the left -3. Right click on a look from the list on the right -4. Choose "Assign" - -At this point you should have a model with all it's shaders applied correctly. The tool automatically loads the latest look available. - - -## Subset Manager - -> Subset Manager lists all items which are meant for publishig and will be published if Publish is triggered - -### Details - -One or more items (instances) could be published any time Publish process is started. Each this publishable -item must be created by Creator tool previously. Subset Manager provides easy way how to check which items, -and how many, will be published. - -It also provides clean and preferable way how to remove unwanted item from publishing. - -### Usage - -Subset Manager has GUI is made of two parts. On the left you will see the list of all the available publishable items in the scene and on the right side, details about these items. - -
- -![subset_manager](assets/tools_subset_manager.png) -
- -Any time new item is Created, it will show up here. - -Currently there is only single action, 'Remove instance' which cleans workfile file from publishable item metadata. -This might not remove underlying host item, it depends on host and implementation! - -It might also happen that user deletes underlying host item(for example layer in Photoshop) directly in the host, but metadata will stay. -This could result in phantom issues during publishing. Use Subset Manager to purge workfile from abandoned items. - -Please check behaviour in host of your choice. - -## Sync Queue - -### Details - -If **Site Sync** is configured for a project, each asset is marked to be synchronized to a remote site during publishing. -Each artist's OpenPype Tray application handles synchronization in background, it looks for all representation which -are marked with the site of the user (unique site name per artist) and remote site. - -Artists then can see progress of synchronization via **Sync Queue** link in the Tray application. - -Artists can see all synced representation in this dialog with helpful information such as when representation was created, when it was synched, -status of synchronization (OK or Fail) etc. - -### Usage - -With this app artists can modify synchronized representation, for example mark failed representation for re-sync etc. - -![Sync Queue](assets/site_sync_sync_queue.png) - -Actions accessible by context menu on single (or multiple representations): -- *Open in Explorer* - if site is locally accessible, open folder with it with OS based explorer -- *Re-sync Active Site* - mark artist own side for re-download (repre must be accessible on remote side) -- *Re-sync Remote Site* - mark representation for re-upload -- *Completely remove from local* - removes tag of synchronization to artist's local site, removes files from disk (available only for personal sites) -- *Change priority* - mark representations with higher priority for faster synchronization run - -Double click on any of the representation open Detail dialog with information about all files for particular representation. -In this dialog error details could be accessed in the context menu. - -#### Context menu on project name -Artists can also Pause whole server or specific project for synchronization. In that state no download/upload is being run. -This might be helpful if the artist is not interested in a particular project for a while or wants to save bandwidth data limit for a bit. - -Another option is `Validate files on active site`. This option triggers process where all representation of the selected project are looped through, file paths are resolved for active site and -if paths point to local system, paths are physically checked if files are existing. If file exists and representation is not marked to be present on 'active_site' in DB, DB is updated -to follow that. - -This might be useful if artist has representation files that Site Sync doesn't know about (newly attached external drive with representations from studio). -This project might take a while! \ No newline at end of file diff --git a/website/docs/artist_tools_context_manager.md b/website/docs/artist_tools_context_manager.md new file mode 100644 index 0000000000..254401e9de --- /dev/null +++ b/website/docs/artist_tools_context_manager.md @@ -0,0 +1,17 @@ +--- +id: artist_tools_context_manager +title: Context Manager +sidebar_label: Context Manager +description: A tool to manage the context within a host app. +--- + +# Context Manager + +Any time your host app is open in a defined context it can be changed to different hierarchy, asset or task within a project. This will allow you to change your opened session to any other asset, shot and tasks within the same project. This is useful particularly in cases where your host takes long time to start. + +![workfiles_1](assets/tools_context_manager.png) + + +:::note +Notice that the window doesn't close after hitting `Accept` and confirming the change of context. This behaviour let's you keep the window open and change the context multiple times in a row. +::: diff --git a/website/docs/artist_tools_creator.md b/website/docs/artist_tools_creator.md new file mode 100644 index 0000000000..e2f3f3b482 --- /dev/null +++ b/website/docs/artist_tools_creator.md @@ -0,0 +1,25 @@ +--- +id: artist_tools_creator +title: Creator +sidebar_label: Creator +description: A tool to generate metadata for asset publishing. +--- + +# Creator + +## Details + +Despite the name, Creator isn't for making new content in your scene, but rather taking what's already in it and creating all the metadata your content needs to be published. + +In Maya this means creating a set with everything you want to publish and assigning custom attributes to it so it gets picked up during publishing stage. + +In Nuke it's either converting an existing write node to a publishable one, or simply creating a write node with all the correct settings and outputs already set. + +## Usage + +1. Select what you want to publish from your scenes. +2. Open *Creator* from OpenPype menu. +3. Choose what family (data type) you need to export. +4. Type the name for you export. This name is how others are going to be able to refer to this particular subset when loading it into their scenes. Every assets should have a Main subset, but can have any number of other variants. +5. Click on *Create*. + diff --git a/website/docs/artist_tools_inventory.md b/website/docs/artist_tools_inventory.md new file mode 100644 index 0000000000..95207e2b47 --- /dev/null +++ b/website/docs/artist_tools_inventory.md @@ -0,0 +1,129 @@ +--- +id: artist_tools_inventory +title: Inventory +sidebar_label: Inventory +description: Manage already loaded subsets. +--- + +# Inventory + +With Scene Inventory, you can browse, update and change subsets loaded with [Loader](artist_tools_loader) into your scene or script. + +:::note +You should first understand [Key concepts](artist_concepts) to understand how you can use this tool. +::: + +## Details + + +Once a subset is loaded, it turns into a container within a scene. This containerization allows us to have a good overview of everything in the scene, but also makes it possible to change versions, notify user if something is outdated, replace one asset for another, etc. + + +The scene manager has a simple GUI focused on efficiency. You can see everything that has been previously loaded into the scene, how many time it's been loaded, what version and a lot of other information. Loaded assets are grouped by their asset name, subset name and representation. This grouping gives ability to apply changes for all instances of the loaded asset *(e.g. when __tree__ is loaded 20 times you can easily update version for all of them)*. + +![tools_scene_inventory_10](assets/tools/tools_scene_inventory_10-small.png) + +To interact with any container, you need to right click it and you'll see a drop down with possible actions. The key actions for production are already implemented, but more will be added over time. + +![tools_scene_inventory_20](assets/tools/tools_scene_inventory_20.png) + +## Usage + +### Change version +You can change versions of loaded subsets with scene inventory tool. Version of loaded assets is colored to red when newer version is available. + + +![tools_scene_inventory_40](assets/tools/tools_scene_inventory_40.png) + +#### Update to the latest version +Select containers or subsets you want to update, right-click selection and press `Update to latest`. + +#### Change to specific version +Select containers or subsets you want to change, right-click selection, press `Set version`, select from dropdown version you want change to and press `OK` button to confirm. + + +![tools_scene_inventory_30](assets/tools/tools_scene_inventory_30.png) + + +### Switch Asset +It's tool in Scene inventory tool that gives ability to switch asset, subset and representation of loaded assets. + + +![tools_scene_inventory_50](assets/tools/tools_scene_inventory_50.png) + + +Because loaded asset is in fact representation of version published in asset's subset it is possible to switch each of this part *(representation, version, subset and asset)*, but with limitations. Limitations are obvious as you can imagine when you have loaded `.ma` representation of `modelMain` subset from `car` asset it is not possible to switch subset to `modelHD` and keep same representation if `modelHD` does not have published `.ma` representation. It is possible to switch multiple loaded assets at once that makes this tool very powerful helper if all published assets contain same subsets and representations. + +Switch tool won't let you cross the border of limitations and inform you when you have to specify more if impossible combination occurs *(It is also possible that there will be no possible combination for selected assets)*. Border is colored to red and confirm button is not enabled when specification is required. + + +![tools_scene_inventory_55](assets/tools/tools_scene_inventory_55.png) + + +Possible switches: +- switch **representation** (`.ma` to `.abc`, `.exr` to `.dpx`, etc.) +- switch **subset** (`modelMain` to `modelHD`, etc.) + - `AND` keep same **representation** *(with limitations)* + - `AND` switch **representation** *(with limitations)* +- switch **asset** (`oak` to `elm`, etc.) + - `AND` keep same **subset** and **representation** *(with limitations)* + - `AND` keep same **subset** and switch **representation** *(with limitations)* + - `AND` switch **subset** and keep same **representation** *(with limitations)* + - `AND` switch **subset** and **representation** *(with limitations)* + +We added one more switch layer above subset for LOD (Level Of Depth). That requires to have published subsets with name ending with **"_LOD{number}"** where number represents level (e.g. modelMain_LOD1). Has the same limitations as mentioned above. This is handy when you want to change only subset but keep same LOD or keep same subset but change LOD for multiple assets. This option is hidden if you didn't select subset that have published subset with LODs. + +![tools_scene_inventory_54](assets/tools/tools_scene_inventory_54.png) + +## Filtering + +### Filter by name + +There is a search bar on the top for cases when you have a complex scene with many assets and need to find a specific one. + +
+
+ +![tools_scene_inventory_60](assets/tools/tools_scene_inventory_60-small.png) + +
+
+ +![tools_scene_inventory_61](assets/tools/tools_scene_inventory_61-small.png) + +
+
+ + +### Filter with Cherry-pick selection + +
+
+ +To keep only selected subsets right-click selection and press `Cherry-Pick (Hierarchy)` *(Border of subset list change to **orange** color when Cherry-pick filtering is set so you know filter is applied).* + +
+
+ +![tools_scene_inventory_62-small](assets/tools/tools_scene_inventory_62-small.png) + +
+
+ +
+
+ +To return to original state right-click anywhere in subsets list and press `Back to Full-View`. + +
+
+ +![tools_scene_inventory_63-small](assets/tools/tools_scene_inventory_63-small.png) + +
+
+ + +:::tip +You can Cherry-pick from Cherry-picked subsets. +::: diff --git a/website/docs/artist_tools_library_loader.md b/website/docs/artist_tools_library_loader.md new file mode 100644 index 0000000000..f85d4e6117 --- /dev/null +++ b/website/docs/artist_tools_library_loader.md @@ -0,0 +1,42 @@ +--- +id: artist_tools_library_loader +title: Library Loader +sidebar_label: Library Loader +description: Allows loading published subsets from projects of type "Library". +--- + +# Library Loader + +Library loader is extended [loader](artist_tools_loader) which allows to load published subsets from Library projects. Controls are same but library loader has extra Combo Box which allows you to choose project you want to load from. + +
+
+ +![tools_library_1](assets/tools/tools_library_1-small.png) + +
+
+ +![tools_library_2](assets/tools/tools_library_2-small.png) + +
+
+ +## Delivery Action + +Library Loader contains functionality to export any selected asset, subsets and their version to configurable folder. +Delivery follows structure based on defined template, this template must be configured first by Admin in the Settings. + +![delivery_action](assets/tools/tools_delivery_loader.png) + +* Usage +- Select all required subsets for export (you can change theirs versions by double clicking on 'Version' value) +- Right click and select **Deliver Versions** from context menu +- Select predefined Delivery template (must be configured by Admin system or project wide) +- Fill value for root folder (folder will be created if it doesn't exist) +- Filter out type of representation you are not interested in +- Push **Deliver** button +- Dialog must be kept open until export is finished +- In a case of problems with any of the representation, that one will be skipped, description of error will be provided in the dialog + + diff --git a/website/docs/artist_tools_loader.md b/website/docs/artist_tools_loader.md new file mode 100644 index 0000000000..3ae69b1cf6 --- /dev/null +++ b/website/docs/artist_tools_loader.md @@ -0,0 +1,121 @@ +--- +id: artist_tools_loader +title: Loader +sidebar_label: Loader +description: Allows loading published subsets from the same project. +--- + +# Loader +Loader loads published subsets into your current scene or script. + +## Usage +1. Open *Loader* from OpenPype menu. +2. Select the asset where the subset you want to load is published. +3. From subset list select the subset you want. +4. Right-click the subset. +5. From action menu select what you want to do *(load, reference, ...)*. + + +![tools_loader_1](assets/tools/tools_loader_1.png) + +
+
+ +## Refresh data +Data are not auto-refreshed to avoid database issues. To refresh assets or subsets press refresh button. + +
+
+ +![tools_loader_50](assets/tools/tools_loader_50.png) + +
+
+ +## Load another version +Loader by default load last version, but you can of course load another versions. Double-click on the subset in the version column to expose the drop down, choose version you want to load and continue from point 4 of the [Usage](#usage-1). + +
+
+ + ![tools_loader_21](assets/tools/tools_loader_21.png) +
+
+ + ![tools_loader_22](assets/tools/tools_loader_22.png) +
+
+ + +## Filtering + +### Filter Assets and Subsets by name +To filter assets/subsets by name just type name or part of name to filter text input. Only assets/subsets containing the entered string remain. + +- **Assets filtering example** *(it works the same for subsets)*: + +
+
+ +![tools_loader_4](assets/tools/tools_loader_4-small.png) + +
+
+ +![tools_loader_5](assets/tools/tools_loader_5-small.png) + +
+
+ + +### Filter Subsets by Family + +
+
+ +To filter [subsets](artist_concepts.md#subset) by their [families](artist_publish.md#families) you can use families list where you can check families you want to see or uncheck families you are not interested in. + +
+
+ +![tools_loader_30](assets/tools/tools_loader_30-small.png) + +
+
+ + + +## Subset groups +Subsets may be grouped which can help to make the subset list more transparent. You can toggle visibility of groups with `Enable Grouping` checkbox. + +![tools_loader_40](assets/tools/tools_loader_40-small.png) + + +### Add to group or change current group +You can set group of selected subsets with shortcut `Ctrl + G`. + +![tools_loader_41](assets/tools/tools_loader_41-small.png) + + +:::warning +You'll set the group in Avalon database so your changes will take effect for all users. +::: + +## Site Sync support + +If **Site Sync** is enabled additional widget is shown in right bottom corner. +It contains list of all representations of selected version(s). It also shows availability of representation files +on particular site (*active* - mine, *remote* - theirs). + +![site_sync_support](assets/site_sync_loader.png) + +On this picture you see that representation files are available only on remote site (could be GDrive or other). +If artist wants to work with the file(s) they need to be downloaded first. That could be done by right mouse click on +particular representation (or multiselect all) and select *Download*. + +This will mark representation to be download which will happen in the background if OpenPype Tray is running. + +For more details of progress, state or possible error details artist should open **[Sync Queue](#Sync-Queue)** item in Tray app. + +Work in progress... + diff --git a/website/docs/artist_tools_look_assigner.md b/website/docs/artist_tools_look_assigner.md new file mode 100644 index 0000000000..29002802b0 --- /dev/null +++ b/website/docs/artist_tools_look_assigner.md @@ -0,0 +1,26 @@ +--- +id: artist_tools_look_assigner +title: Look Assigner +sidebar_label: Look Assigner +description: Manage published looks to their respective model(s). +--- + +# Look Assigner + +The Look Manager takes care of assigning published looks to the correct model in the scene. + +## Details + +When a look is published it also stores the information about what shading networks need to be assigned to which models, but it also stores all the render attributes on the mesh necessary for a successful render. + +## Usage + +Look Assigner has GUI is made of two parts. On the left you will see the list of all the available models in the scene and on the right side, all the looks that can be associate with them. To assign a look to a model you just need to: + +1. Click on "load all subsets". +2. Choose a subset from the menu on the left. +3. Right click on a look from the list on the right. +4. Choose "Assign". + +At this point you should have a model with all it's shaders applied correctly. The tool automatically loads the latest look available. + diff --git a/website/docs/artist_tools_publisher.md b/website/docs/artist_tools_publisher.md new file mode 100644 index 0000000000..456049d824 --- /dev/null +++ b/website/docs/artist_tools_publisher.md @@ -0,0 +1,38 @@ +--- +id: artist_tools_publisher +title: Publisher +sidebar_label: Publisher +description: Publish versioned work progress into the project. +--- + +# Publisher + +Use publish to share your work with others. It collects, validates and exports the data in standardized way. + +## Details + +When you run pyblish, the UI is made of 2 main parts. On the left, you see all the items pyblish will be working with (called instances), and on the right a list of actions that are going to process these items. +Even though every task type has some pre-defined settings of what should be collected from the scene and what items will be published by default. You can technically publish any output type from any task type. +Each item is passed through multiple plugins, each doing a small piece of work. These are organized into 4 areas and run in sequence. + +## Using Pyblish + +In the best case scenario, you open pyblish from the Avalon menu, press play, wait for it to finish, and you’re done. +These are the steps in detail, for cases, where the default settings don’t work for you or you know that the task you’re working on, requires a different treatment. + +### Collect + +Finds all the important data in the scene and makes it ready for publishing + +### Validate + +Each validator makes sure your output complies to one particular condition. This could be anything from naming conventions, scene setting, to plugin usage. An item can only be published if all validators pass. + +### Extract + +Extractor takes the item and saves it to the disk. Usually to temporary location. Each extractor represents one file format and there can be multiple file formats exported for each item. + +### Integrate + +Integrator takes the extracted files, categorizes and moves them to a correct location on the disk or on the server. + diff --git a/website/docs/artist_tools_subset_manager.md b/website/docs/artist_tools_subset_manager.md new file mode 100644 index 0000000000..fd1bc5f477 --- /dev/null +++ b/website/docs/artist_tools_subset_manager.md @@ -0,0 +1,38 @@ +--- +id: artist_tools_subset_manager +title: Subset Manager +sidebar_label: Subset Manager +description: Manage all the publish-able elements. +--- + +# Subset Manager + +Subset Manager lists all items which are meant for publishig and will be published if Publish is triggered + +## Details + +One or more items (instances) could be published any time Publish process is started. Each this publishable +item must be created by Creator tool previously. Subset Manager provides easy way how to check which items, +and how many, will be published. + +It also provides clean and preferable way how to remove unwanted item from publishing. + +## Usage + +Subset Manager has GUI is made of two parts. On the left you will see the list of all the available publishable items in the scene and on the right side, details about these items. + +
+ +![subset_manager](assets/tools_subset_manager.png) +
+ +Any time new item is Created, it will show up here. + +Currently there is only single action, 'Remove instance' which cleans workfile file from publishable item metadata. +This might not remove underlying host item, it depends on host and implementation! + +It might also happen that user deletes underlying host item(for example layer in Photoshop) directly in the host, but metadata will stay. +This could result in phantom issues during publishing. Use Subset Manager to purge workfile from abandoned items. + +Please check behaviour in host of your choice. + diff --git a/website/docs/artist_tools_sync_queu.md b/website/docs/artist_tools_sync_queu.md new file mode 100644 index 0000000000..770c2f77ad --- /dev/null +++ b/website/docs/artist_tools_sync_queu.md @@ -0,0 +1,46 @@ +--- +id: artist_tools_sync_queue +title: Sync Queue +sidebar_label: Sync Queue +description: Track sites syncronization progress. +--- + +# Sync Queue + +## Details + +If **Site Sync** is configured for a project, each asset is marked to be synchronized to a remote site during publishing. +Each artist's OpenPype Tray application handles synchronization in background, it looks for all representation which +are marked with the site of the user (unique site name per artist) and remote site. + +Artists then can see progress of synchronization via **Sync Queue** link in the Tray application. + +Artists can see all synced representation in this dialog with helpful information such as when representation was created, when it was synched, +status of synchronization (OK or Fail) etc. + +## Usage + +With this app artists can modify synchronized representation, for example mark failed representation for re-sync etc. + +![Sync Queue](assets/site_sync_sync_queue.png) + +Actions accessible by context menu on single (or multiple representations): +- *Open in Explorer* - if site is locally accessible, open folder with it with OS based explorer +- *Re-sync Active Site* - mark artist own side for re-download (repre must be accessible on remote side) +- *Re-sync Remote Site* - mark representation for re-upload +- *Completely remove from local* - removes tag of synchronization to artist's local site, removes files from disk (available only for personal sites) +- *Change priority* - mark representations with higher priority for faster synchronization run + +Double click on any of the representation open Detail dialog with information about all files for particular representation. +In this dialog error details could be accessed in the context menu. + +#### Context menu on project name +Artists can also Pause whole server or specific project for synchronization. In that state no download/upload is being run. +This might be helpful if the artist is not interested in a particular project for a while or wants to save bandwidth data limit for a bit. + +Another option is `Validate files on active site`. This option triggers process where all representation of the selected project are looped through, file paths are resolved for active site and +if paths point to local system, paths are physically checked if files are existing. If file exists and representation is not marked to be present on 'active_site' in DB, DB is updated +to follow that. + +This might be useful if artist has representation files that Site Sync doesn't know about (newly attached external drive with representations from studio). +This project might take a while! diff --git a/website/docs/artist_tools_workfiles.md b/website/docs/artist_tools_workfiles.md new file mode 100644 index 0000000000..2e1d939c97 --- /dev/null +++ b/website/docs/artist_tools_workfiles.md @@ -0,0 +1,49 @@ +--- +id: artist_tools_workfiles +title: Workfiles +sidebar_label: Workfiles +description: Save versioned progress files. +--- + +# Workfiles + +Save new working scenes or scripts, or open the ones you previously worked on. + +## Details + +Instead of digging through your software native file browser, you can simply open the workfiles app and see all the files for the asset or shot you're currently working with. The app takes care of all the naming and the location of your work files. + +When saving a scene you can also add a comment. It is completely up to you how you use this, however we recommend using it for subversion within your current working version. + +Let's say that the last version of the comp you published was v003 and now you're working on the file prj_sh010_compositing_v004.nk if you want to keep snapshots of your work, but not iterate on the main version because the supervisor is expecting next publish to be v004, you can use the comment to do this, so you can save the file under the name prj_sh010_compositing_v004_001 , prj_sh010_compositing_v004_002. the main version is automatically iterated every time you publish something. + +## Usage + +
+
+ +### To open existing file: + +1. Open Workfiles tool from OpenPype menu +2. Select file from list - the latest version is the highest *(descendent ordering)* +3. Press `Open` button + +
+
+ +![workfiles_1](assets/workfiles_1.png) + +
+
+ + +### To save new workfile +1. Open Workfiles tool from OpenPype menu +2. Press `Save As` button +3. You can add optional comment to the filename, that will be appended at the end +4. Press `OK` + +:::note +You can manually override the workfile version by unticking next available version and using the version menu to choose your own. +::: + diff --git a/website/docs/assets/3dsmax_SavingFirstFile2_OP.png b/website/docs/assets/3dsmax_SavingFirstFile2_OP.png new file mode 100644 index 0000000000..4066ee0f1a Binary files /dev/null and b/website/docs/assets/3dsmax_SavingFirstFile2_OP.png differ diff --git a/website/docs/assets/3dsmax_SavingFirstFile_OP.png b/website/docs/assets/3dsmax_SavingFirstFile_OP.png new file mode 100644 index 0000000000..c4832ca6bb Binary files /dev/null and b/website/docs/assets/3dsmax_SavingFirstFile_OP.png differ diff --git a/website/docs/assets/3dsmax_context.png b/website/docs/assets/3dsmax_context.png new file mode 100644 index 0000000000..9b84cb2587 Binary files /dev/null and b/website/docs/assets/3dsmax_context.png differ diff --git a/website/docs/assets/3dsmax_menu_OP.png b/website/docs/assets/3dsmax_menu_OP.png new file mode 100644 index 0000000000..bce2f9aac0 Binary files /dev/null and b/website/docs/assets/3dsmax_menu_OP.png differ diff --git a/website/docs/assets/3dsmax_menu_first_OP.png b/website/docs/assets/3dsmax_menu_first_OP.png new file mode 100644 index 0000000000..c3a7b00cbb Binary files /dev/null and b/website/docs/assets/3dsmax_menu_first_OP.png differ diff --git a/website/docs/assets/3dsmax_model_OP.png b/website/docs/assets/3dsmax_model_OP.png new file mode 100644 index 0000000000..293c06642c Binary files /dev/null and b/website/docs/assets/3dsmax_model_OP.png differ diff --git a/website/docs/assets/3dsmax_tray_OP.png b/website/docs/assets/3dsmax_tray_OP.png new file mode 100644 index 0000000000..cfd0b07ef6 Binary files /dev/null and b/website/docs/assets/3dsmax_tray_OP.png differ diff --git a/website/docs/assets/aftereffects_publish_failed.png b/website/docs/assets/aftereffects_publish_failed.png new file mode 100644 index 0000000000..5821fcbb31 Binary files /dev/null and b/website/docs/assets/aftereffects_publish_failed.png differ diff --git a/website/docs/assets/aftereffects_publish_instance.png b/website/docs/assets/aftereffects_publish_instance.png new file mode 100644 index 0000000000..7ce7f194b9 Binary files /dev/null and b/website/docs/assets/aftereffects_publish_instance.png differ diff --git a/website/docs/assets/aftereffects_render_instance.png b/website/docs/assets/aftereffects_render_instance.png new file mode 100644 index 0000000000..b9be8b3f5d Binary files /dev/null and b/website/docs/assets/aftereffects_render_instance.png differ diff --git a/website/docs/assets/deadline_job_version.png b/website/docs/assets/deadline_job_version.png new file mode 100644 index 0000000000..0b78d6a35c Binary files /dev/null and b/website/docs/assets/deadline_job_version.png differ diff --git a/website/docs/assets/experimental_tools_menu.png b/website/docs/assets/experimental_tools_menu.png new file mode 100644 index 0000000000..79fa8d3655 Binary files /dev/null and b/website/docs/assets/experimental_tools_menu.png differ diff --git a/website/docs/assets/experimental_tools_settings.png b/website/docs/assets/experimental_tools_settings.png new file mode 100644 index 0000000000..4d514e8a8f Binary files /dev/null and b/website/docs/assets/experimental_tools_settings.png differ diff --git a/website/docs/assets/photoshop_publish.PNG b/website/docs/assets/harmony_publish.png similarity index 100% rename from website/docs/assets/photoshop_publish.PNG rename to website/docs/assets/harmony_publish.png diff --git a/website/docs/assets/photoshop_publish_actions.PNG b/website/docs/assets/harmony_publish_actions.png similarity index 100% rename from website/docs/assets/photoshop_publish_actions.PNG rename to website/docs/assets/harmony_publish_actions.png diff --git a/website/docs/assets/photoshop_publish_expand.PNG b/website/docs/assets/harmony_publish_expand.png similarity index 100% rename from website/docs/assets/photoshop_publish_expand.PNG rename to website/docs/assets/harmony_publish_expand.png diff --git a/website/docs/assets/photoshop_publish_inspect.PNG b/website/docs/assets/harmony_publish_inspect.png similarity index 100% rename from website/docs/assets/photoshop_publish_inspect.PNG rename to website/docs/assets/harmony_publish_inspect.png diff --git a/website/docs/assets/photoshop_publish_repair.gif b/website/docs/assets/harmony_publish_repair.gif similarity index 100% rename from website/docs/assets/photoshop_publish_repair.gif rename to website/docs/assets/harmony_publish_repair.gif diff --git a/website/docs/assets/hiero-admin_scriptsmenu.png b/website/docs/assets/hiero-admin_scriptsmenu.png new file mode 100644 index 0000000000..6de136a434 Binary files /dev/null and b/website/docs/assets/hiero-admin_scriptsmenu.png differ diff --git a/website/docs/assets/houdini-admin_shelvesmanager.png b/website/docs/assets/houdini-admin_shelvesmanager.png new file mode 100644 index 0000000000..ba2f15a6a5 Binary files /dev/null and b/website/docs/assets/houdini-admin_shelvesmanager.png differ diff --git a/website/docs/assets/kitsu/kitsu_credentials.png b/website/docs/assets/kitsu/kitsu_credentials.png new file mode 100644 index 0000000000..25c1ad93c4 Binary files /dev/null and b/website/docs/assets/kitsu/kitsu_credentials.png differ diff --git a/website/docs/assets/maya-arnold_scene_source.png b/website/docs/assets/maya-arnold_scene_source.png new file mode 100644 index 0000000000..4150b78aac Binary files /dev/null and b/website/docs/assets/maya-arnold_scene_source.png differ diff --git a/website/docs/assets/maya-arnold_standin.png b/website/docs/assets/maya-arnold_standin.png new file mode 100644 index 0000000000..74571a86fa Binary files /dev/null and b/website/docs/assets/maya-arnold_standin.png differ diff --git a/website/docs/assets/maya-build_workfile_from_template.png b/website/docs/assets/maya-build_workfile_from_template.png new file mode 100644 index 0000000000..7ef87861fe Binary files /dev/null and b/website/docs/assets/maya-build_workfile_from_template.png differ diff --git a/website/docs/assets/maya-create_placeholder.png b/website/docs/assets/maya-create_placeholder.png new file mode 100644 index 0000000000..3f49fe2e2b Binary files /dev/null and b/website/docs/assets/maya-create_placeholder.png differ diff --git a/website/docs/assets/maya-multiverse_openpype_asset_creator.png b/website/docs/assets/maya-multiverse_openpype_asset_creator.png new file mode 100644 index 0000000000..0426e9f823 Binary files /dev/null and b/website/docs/assets/maya-multiverse_openpype_asset_creator.png differ diff --git a/website/docs/assets/maya-multiverse_openpype_composition_creator.png b/website/docs/assets/maya-multiverse_openpype_composition_creator.png new file mode 100644 index 0000000000..eb0e5e3348 Binary files /dev/null and b/website/docs/assets/maya-multiverse_openpype_composition_creator.png differ diff --git a/website/docs/assets/maya-multiverse_openpype_loader.png b/website/docs/assets/maya-multiverse_openpype_loader.png new file mode 100644 index 0000000000..0579e0dcde Binary files /dev/null and b/website/docs/assets/maya-multiverse_openpype_loader.png differ diff --git a/website/docs/assets/maya-multiverse_openpype_look_creator.png b/website/docs/assets/maya-multiverse_openpype_look_creator.png new file mode 100644 index 0000000000..fd27d5fd1a Binary files /dev/null and b/website/docs/assets/maya-multiverse_openpype_look_creator.png differ diff --git a/website/docs/assets/maya-multiverse_openpype_override_creator.png b/website/docs/assets/maya-multiverse_openpype_override_creator.png new file mode 100644 index 0000000000..d7b1299ba6 Binary files /dev/null and b/website/docs/assets/maya-multiverse_openpype_override_creator.png differ diff --git a/website/docs/assets/maya-multiverse_openpype_publishers.png b/website/docs/assets/maya-multiverse_openpype_publishers.png new file mode 100644 index 0000000000..bee6c79fe9 Binary files /dev/null and b/website/docs/assets/maya-multiverse_openpype_publishers.png differ diff --git a/website/docs/assets/maya-multiverse_setup.png b/website/docs/assets/maya-multiverse_setup.png new file mode 100644 index 0000000000..72bdb0d379 Binary files /dev/null and b/website/docs/assets/maya-multiverse_setup.png differ diff --git a/website/docs/assets/maya-placeholder_new.png b/website/docs/assets/maya-placeholder_new.png new file mode 100644 index 0000000000..106a5275cd Binary files /dev/null and b/website/docs/assets/maya-placeholder_new.png differ diff --git a/website/docs/assets/maya-pointcache_setup.png b/website/docs/assets/maya-pointcache_setup.png index 8904baa239..b2dc126901 100644 Binary files a/website/docs/assets/maya-pointcache_setup.png and b/website/docs/assets/maya-pointcache_setup.png differ diff --git a/website/docs/assets/maya-workfile-outliner.png b/website/docs/assets/maya-workfile-outliner.png new file mode 100644 index 0000000000..fbd1bbd03b Binary files /dev/null and b/website/docs/assets/maya-workfile-outliner.png differ diff --git a/website/docs/assets/nuke-admin_gizmomenu.png b/website/docs/assets/nuke-admin_gizmomenu.png new file mode 100644 index 0000000000..81e63b2041 Binary files /dev/null and b/website/docs/assets/nuke-admin_gizmomenu.png differ diff --git a/website/docs/assets/nuke-admin_scriptsmenu.png b/website/docs/assets/nuke-admin_scriptsmenu.png new file mode 100644 index 0000000000..cad2a4411d Binary files /dev/null and b/website/docs/assets/nuke-admin_scriptsmenu.png differ diff --git a/website/docs/assets/nuke_addProfile.png b/website/docs/assets/nuke_addProfile.png new file mode 100644 index 0000000000..37578df7f5 Binary files /dev/null and b/website/docs/assets/nuke_addProfile.png differ diff --git a/website/docs/assets/nuke_buildWorfileFromTemplate.png b/website/docs/assets/nuke_buildWorfileFromTemplate.png new file mode 100644 index 0000000000..77d2de2ff8 Binary files /dev/null and b/website/docs/assets/nuke_buildWorfileFromTemplate.png differ diff --git a/website/docs/assets/nuke_buildworkfile.png b/website/docs/assets/nuke_buildworkfile.png new file mode 100644 index 0000000000..e3d8d07f7c Binary files /dev/null and b/website/docs/assets/nuke_buildworkfile.png differ diff --git a/website/docs/assets/nuke_createPlaceHolder.png b/website/docs/assets/nuke_createPlaceHolder.png new file mode 100644 index 0000000000..93fb4de9d0 Binary files /dev/null and b/website/docs/assets/nuke_createPlaceHolder.png differ diff --git a/website/docs/assets/nuke_fillingExtraAttributes.png b/website/docs/assets/nuke_fillingExtraAttributes.png new file mode 100644 index 0000000000..146c4d9db6 Binary files /dev/null and b/website/docs/assets/nuke_fillingExtraAttributes.png differ diff --git a/website/docs/assets/nuke_placeHolderNode.png b/website/docs/assets/nuke_placeHolderNode.png new file mode 100644 index 0000000000..ac9e83b9d6 Binary files /dev/null and b/website/docs/assets/nuke_placeHolderNode.png differ diff --git a/website/docs/assets/nuke_placeholder.png b/website/docs/assets/nuke_placeholder.png new file mode 100644 index 0000000000..d899ff742c Binary files /dev/null and b/website/docs/assets/nuke_placeholder.png differ diff --git a/website/docs/assets/nuke_publishedinstance.png b/website/docs/assets/nuke_publishedinstance.png new file mode 100644 index 0000000000..494b9e40a4 Binary files /dev/null and b/website/docs/assets/nuke_publishedinstance.png differ diff --git a/website/docs/assets/nuke_updatePlaceHolder.png b/website/docs/assets/nuke_updatePlaceHolder.png new file mode 100644 index 0000000000..58fac2a7e4 Binary files /dev/null and b/website/docs/assets/nuke_updatePlaceHolder.png differ diff --git a/website/docs/assets/nuke_updateWorkfile.png b/website/docs/assets/nuke_updateWorkfile.png new file mode 100644 index 0000000000..aeeebb123f Binary files /dev/null and b/website/docs/assets/nuke_updateWorkfile.png differ diff --git a/website/docs/assets/photoshop_creator.PNG b/website/docs/assets/photoshop_creator.png similarity index 100% rename from website/docs/assets/photoshop_creator.PNG rename to website/docs/assets/photoshop_creator.png diff --git a/website/docs/assets/photoshop_extension.PNG b/website/docs/assets/photoshop_extension.PNG deleted file mode 100644 index ef7081443d..0000000000 Binary files a/website/docs/assets/photoshop_extension.PNG and /dev/null differ diff --git a/website/docs/assets/photoshop_extension.png b/website/docs/assets/photoshop_extension.png new file mode 100644 index 0000000000..1f5c1792e1 Binary files /dev/null and b/website/docs/assets/photoshop_extension.png differ diff --git a/website/docs/assets/photoshop_loader.PNG b/website/docs/assets/photoshop_loader.png similarity index 100% rename from website/docs/assets/photoshop_loader.PNG rename to website/docs/assets/photoshop_loader.png diff --git a/website/docs/assets/photoshop_manage.PNG b/website/docs/assets/photoshop_manage.png similarity index 100% rename from website/docs/assets/photoshop_manage.PNG rename to website/docs/assets/photoshop_manage.png diff --git a/website/docs/assets/photoshop_publish.png b/website/docs/assets/photoshop_publish.png new file mode 100644 index 0000000000..23b61cc609 Binary files /dev/null and b/website/docs/assets/photoshop_publish.png differ diff --git a/website/docs/assets/photoshop_publish_failed.png b/website/docs/assets/photoshop_publish_failed.png new file mode 100644 index 0000000000..5aaafcbda2 Binary files /dev/null and b/website/docs/assets/photoshop_publish_failed.png differ diff --git a/website/docs/assets/photoshop_publish_images.png b/website/docs/assets/photoshop_publish_images.png new file mode 100644 index 0000000000..34f0b4755c Binary files /dev/null and b/website/docs/assets/photoshop_publish_images.png differ diff --git a/website/docs/assets/photoshop_publish_validations.png b/website/docs/assets/photoshop_publish_validations.png new file mode 100644 index 0000000000..2260c8d50b Binary files /dev/null and b/website/docs/assets/photoshop_publish_validations.png differ diff --git a/website/docs/assets/publisher_card_view.png b/website/docs/assets/publisher_card_view.png new file mode 100644 index 0000000000..57b012cb6d Binary files /dev/null and b/website/docs/assets/publisher_card_view.png differ diff --git a/website/docs/assets/publisher_create_dialog.png b/website/docs/assets/publisher_create_dialog.png new file mode 100644 index 0000000000..6e9275062d Binary files /dev/null and b/website/docs/assets/publisher_create_dialog.png differ diff --git a/website/docs/assets/publisher_list_view.png b/website/docs/assets/publisher_list_view.png new file mode 100644 index 0000000000..e9dc8a607a Binary files /dev/null and b/website/docs/assets/publisher_list_view.png differ diff --git a/website/docs/assets/settings/anatomy_attributes.png b/website/docs/assets/settings/anatomy_attributes.png new file mode 100644 index 0000000000..777b1c36ac Binary files /dev/null and b/website/docs/assets/settings/anatomy_attributes.png differ diff --git a/website/docs/assets/settings/anatomy_tasks.png b/website/docs/assets/settings/anatomy_tasks.png new file mode 100644 index 0000000000..16265cf8eb Binary files /dev/null and b/website/docs/assets/settings/anatomy_tasks.png differ diff --git a/website/docs/assets/settings/template_build_workfile.png b/website/docs/assets/settings/template_build_workfile.png new file mode 100644 index 0000000000..1bea5b01f5 Binary files /dev/null and b/website/docs/assets/settings/template_build_workfile.png differ diff --git a/website/docs/assets/settings_dev.png b/website/docs/assets/settings_dev.png new file mode 100644 index 0000000000..4d0359461e Binary files /dev/null and b/website/docs/assets/settings_dev.png differ diff --git a/website/docs/assets/unreal-avalon_tools.jpg b/website/docs/assets/unreal-avalon_tools.jpg deleted file mode 100644 index 531fbe516a..0000000000 Binary files a/website/docs/assets/unreal-avalon_tools.jpg and /dev/null differ diff --git a/website/docs/assets/unreal-container.jpg b/website/docs/assets/unreal-container.jpg deleted file mode 100644 index f0c0a61e95..0000000000 Binary files a/website/docs/assets/unreal-container.jpg and /dev/null differ diff --git a/website/docs/assets/unreal_add_level.png b/website/docs/assets/unreal_add_level.png new file mode 100644 index 0000000000..caeef03d10 Binary files /dev/null and b/website/docs/assets/unreal_add_level.png differ diff --git a/website/docs/assets/unreal_container.jpg b/website/docs/assets/unreal_container.jpg new file mode 100644 index 0000000000..0fda640b00 Binary files /dev/null and b/website/docs/assets/unreal_container.jpg differ diff --git a/website/docs/assets/unreal_create_render.png b/website/docs/assets/unreal_create_render.png new file mode 100644 index 0000000000..2e3ef20b35 Binary files /dev/null and b/website/docs/assets/unreal_create_render.png differ diff --git a/website/docs/assets/unreal_layout_loading_no_sequence.png b/website/docs/assets/unreal_layout_loading_no_sequence.png new file mode 100644 index 0000000000..ed05d77f53 Binary files /dev/null and b/website/docs/assets/unreal_layout_loading_no_sequence.png differ diff --git a/website/docs/assets/unreal_layout_loading_result.png b/website/docs/assets/unreal_layout_loading_result.png new file mode 100644 index 0000000000..55b329110b Binary files /dev/null and b/website/docs/assets/unreal_layout_loading_result.png differ diff --git a/website/docs/assets/unreal_level_list.png b/website/docs/assets/unreal_level_list.png new file mode 100644 index 0000000000..2fc0c1bfc7 Binary files /dev/null and b/website/docs/assets/unreal_level_list.png differ diff --git a/website/docs/assets/unreal_level_list_no_sequences.png b/website/docs/assets/unreal_level_list_no_sequences.png new file mode 100644 index 0000000000..7ed912b68b Binary files /dev/null and b/website/docs/assets/unreal_level_list_no_sequences.png differ diff --git a/website/docs/assets/unreal_level_streaming_method.png b/website/docs/assets/unreal_level_streaming_method.png new file mode 100644 index 0000000000..8f817abd2e Binary files /dev/null and b/website/docs/assets/unreal_level_streaming_method.png differ diff --git a/website/docs/assets/unreal_level_streaming_method_no_sequences.png b/website/docs/assets/unreal_level_streaming_method_no_sequences.png new file mode 100644 index 0000000000..77a2754ded Binary files /dev/null and b/website/docs/assets/unreal_level_streaming_method_no_sequences.png differ diff --git a/website/docs/assets/unreal_load_layout.png b/website/docs/assets/unreal_load_layout.png new file mode 100644 index 0000000000..ffad60ae9b Binary files /dev/null and b/website/docs/assets/unreal_load_layout.png differ diff --git a/website/docs/assets/unreal_load_layout_batch.png b/website/docs/assets/unreal_load_layout_batch.png new file mode 100644 index 0000000000..dd2f2f3e8f Binary files /dev/null and b/website/docs/assets/unreal_load_layout_batch.png differ diff --git a/website/docs/assets/unreal_openpype_tools.png b/website/docs/assets/unreal_openpype_tools.png new file mode 100644 index 0000000000..bf7d850ab2 Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools.png differ diff --git a/website/docs/assets/unreal_openpype_tools_create.png b/website/docs/assets/unreal_openpype_tools_create.png new file mode 100644 index 0000000000..9cfb95f2a1 Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_create.png differ diff --git a/website/docs/assets/unreal_openpype_tools_load.png b/website/docs/assets/unreal_openpype_tools_load.png new file mode 100644 index 0000000000..4909feac3b Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_load.png differ diff --git a/website/docs/assets/unreal_openpype_tools_manage.png b/website/docs/assets/unreal_openpype_tools_manage.png new file mode 100644 index 0000000000..af7b182842 Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_manage.png differ diff --git a/website/docs/assets/unreal_openpype_tools_publish.png b/website/docs/assets/unreal_openpype_tools_publish.png new file mode 100644 index 0000000000..ab4c10c4ca Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_publish.png differ diff --git a/website/docs/assets/unreal_openpype_tools_render.png b/website/docs/assets/unreal_openpype_tools_render.png new file mode 100644 index 0000000000..377dc2951e Binary files /dev/null and b/website/docs/assets/unreal_openpype_tools_render.png differ diff --git a/website/docs/assets/unreal_publish_render.png b/website/docs/assets/unreal_publish_render.png new file mode 100644 index 0000000000..674b0ac30e Binary files /dev/null and b/website/docs/assets/unreal_publish_render.png differ diff --git a/website/docs/assets/unreal_setting_level_sequence.png b/website/docs/assets/unreal_setting_level_sequence.png new file mode 100644 index 0000000000..5a8adc6257 Binary files /dev/null and b/website/docs/assets/unreal_setting_level_sequence.png differ diff --git a/website/docs/changelog.md b/website/docs/changelog.md deleted file mode 100644 index 448592b930..0000000000 --- a/website/docs/changelog.md +++ /dev/null @@ -1,1138 +0,0 @@ ---- -id: changelog -title: Changelog -sidebar_label: Changelog ---- - -## [2.18.0](https://github.com/pypeclub/openpype/tree/2.18.0) -_**release date:** (2021-05-18)_ - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.3...2.18.0) - -**Enhancements:** - -- Use SubsetLoader and multiple contexts for delete_old_versions [\#1484](ttps://github.com/pypeclub/OpenPype/pull/1484)) -- TVPaint: Increment workfile version on successful publish. [\#1489](https://github.com/pypeclub/OpenPype/pull/1489) -- Maya: Use of multiple deadline servers [\#1483](https://github.com/pypeclub/OpenPype/pull/1483) - -**Fixed bugs:** - -- Use instance frame start instead of timeline. [\#1486](https://github.com/pypeclub/OpenPype/pull/1486) -- Maya: Redshift - set proper start frame on proxy [\#1480](https://github.com/pypeclub/OpenPype/pull/1480) -- Maya: wrong collection of playblasted frames [\#1517](https://github.com/pypeclub/OpenPype/pull/1517) -- Existing subsets hints in creator [\#1502](https://github.com/pypeclub/OpenPype/pull/1502) - - -### [2.17.3](https://github.com/pypeclub/openpype/tree/2.17.3) -_**release date:** (2021-05-06)_ - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.3...2.17.3) - -**Fixed bugs:** - -- Nuke: workfile version synced to db version always [\#1479](https://github.com/pypeclub/OpenPype/pull/1479) - -### [2.17.2](https://github.com/pypeclub/openpype/tree/2.17.2) -_**release date:** (2021-05-04)_ - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-rc.1...2.17.2) - -**Enhancements:** - -- Forward/Backward compatible apps and tools with OpenPype 3 [\#1463](https://github.com/pypeclub/OpenPype/pull/1463) - -### [2.17.1](https://github.com/pypeclub/openpype/tree/2.17.1) -_**release date:** (2021-04-30)_ - -[Full Changelog](https://github.com/pypeclub/openpype/compare/2.17.0...2.17.1) - -**Enhancements:** - -- Faster settings UI loading [\#1442](https://github.com/pypeclub/OpenPype/pull/1442) -- Nuke: deadline submission with gpu [\#1414](https://github.com/pypeclub/OpenPype/pull/1414) -- TVPaint frame range definition [\#1424](https://github.com/pypeclub/OpenPype/pull/1424) -- PS - group all published instances [\#1415](https://github.com/pypeclub/OpenPype/pull/1415) -- Add task name to context pop up. [\#1383](https://github.com/pypeclub/OpenPype/pull/1383) -- Enhance review letterbox feature. [\#1371](https://github.com/pypeclub/OpenPype/pull/1371) -- AE add duration validation [\#1363](https://github.com/pypeclub/OpenPype/pull/1363) - -**Fixed bugs:** - -- Houdini menu filename [\#1417](https://github.com/pypeclub/OpenPype/pull/1417) -- Nuke: fixing undo for loaded mov and sequence [\#1433](https://github.com/pypeclub/OpenPype/pull/1433) -- AE - validation for duration was 1 frame shorter [\#1426](https://github.com/pypeclub/OpenPype/pull/1426) - -**Merged pull requests:** - -- Maya: Vray - problem getting all file nodes for look publishing [\#1399](https://github.com/pypeclub/OpenPype/pull/1399) -- Maya: Support for Redshift proxies [\#1360](https://github.com/pypeclub/OpenPype/pull/1360) - -## [2.17.0](https://github.com/pypeclub/openpype/tree/2.17.0) -_**release date:** (2021-04-20)_ - -[Full Changelog](https://github.com/pypeclub/openpype/compare/CI/3.0.0-beta.2...2.17.0) - -**Enhancements:** - -- Forward compatible ftrack group [\#1243](https://github.com/pypeclub/OpenPype/pull/1243) -- Maya: Make tx option configurable with presets [\#1328](https://github.com/pypeclub/OpenPype/pull/1328) -- TVPaint asset name validation [\#1302](https://github.com/pypeclub/OpenPype/pull/1302) -- TV Paint: Set initial project settings. [\#1299](https://github.com/pypeclub/OpenPype/pull/1299) -- TV Paint: Validate mark in and out. [\#1298](https://github.com/pypeclub/OpenPype/pull/1298) -- Validate project settings [\#1297](https://github.com/pypeclub/OpenPype/pull/1297) -- After Effects: added SubsetManager [\#1234](https://github.com/pypeclub/OpenPype/pull/1234) -- Show error message in pyblish UI [\#1206](https://github.com/pypeclub/OpenPype/pull/1206) - -**Fixed bugs:** - -- Hiero: fixing source frame from correct object [\#1362](https://github.com/pypeclub/OpenPype/pull/1362) -- Nuke: fix colourspace, prerenders and nuke panes opening [\#1308](https://github.com/pypeclub/OpenPype/pull/1308) -- AE remove orphaned instance from workfile - fix self.stub [\#1282](https://github.com/pypeclub/OpenPype/pull/1282) -- Nuke: deadline submission with search replaced env values from preset [\#1194](https://github.com/pypeclub/OpenPype/pull/1194) -- Ftrack custom attributes in bulks [\#1312](https://github.com/pypeclub/OpenPype/pull/1312) -- Ftrack optional pypclub role [\#1303](https://github.com/pypeclub/OpenPype/pull/1303) -- After Effects: remove orphaned instances [\#1275](https://github.com/pypeclub/OpenPype/pull/1275) -- Avalon schema names [\#1242](https://github.com/pypeclub/OpenPype/pull/1242) -- Handle duplication of Task name [\#1226](https://github.com/pypeclub/OpenPype/pull/1226) -- Modified path of plugin loads for Harmony and TVPaint [\#1217](https://github.com/pypeclub/OpenPype/pull/1217) -- Regex checks in profiles filtering [\#1214](https://github.com/pypeclub/OpenPype/pull/1214) -- Update custom ftrack session attributes [\#1202](https://github.com/pypeclub/OpenPype/pull/1202) -- Nuke: write node colorspace ignore `default\(\)` label [\#1199](https://github.com/pypeclub/OpenPype/pull/1199) - -## [2.16.0](https://github.com/pypeclub/pype/tree/2.16.0) - - _**release date:** 2021-03-22_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.3...2.16.0) - -**Enhancements:** - -- Nuke: deadline submit limit group filter [\#1167](https://github.com/pypeclub/pype/pull/1167) -- Maya: support for Deadline Group and Limit Groups - backport 2.x [\#1156](https://github.com/pypeclub/pype/pull/1156) -- Maya: fixes for Redshift support [\#1152](https://github.com/pypeclub/pype/pull/1152) -- Nuke: adding preset for a Read node name to all img and mov Loaders [\#1146](https://github.com/pypeclub/pype/pull/1146) -- nuke deadline submit with environ var from presets overrides [\#1142](https://github.com/pypeclub/pype/pull/1142) -- Change timers after task change [\#1138](https://github.com/pypeclub/pype/pull/1138) -- Nuke: shortcuts for Pype menu [\#1127](https://github.com/pypeclub/pype/pull/1127) -- Nuke: workfile template [\#1124](https://github.com/pypeclub/pype/pull/1124) -- Sites local settings by site name [\#1117](https://github.com/pypeclub/pype/pull/1117) -- Reset loader's asset selection on context change [\#1106](https://github.com/pypeclub/pype/pull/1106) -- Bulk mov render publishing [\#1101](https://github.com/pypeclub/pype/pull/1101) -- Photoshop: mark publishable instances [\#1093](https://github.com/pypeclub/pype/pull/1093) -- Added ability to define BG color for extract review [\#1088](https://github.com/pypeclub/pype/pull/1088) -- TVPaint extractor enhancement [\#1080](https://github.com/pypeclub/pype/pull/1080) -- Photoshop: added support for .psb in workfiles [\#1078](https://github.com/pypeclub/pype/pull/1078) -- Optionally add task to subset name [\#1072](https://github.com/pypeclub/pype/pull/1072) -- Only extend clip range when collecting. [\#1008](https://github.com/pypeclub/pype/pull/1008) -- Collect audio for farm reviews. [\#1073](https://github.com/pypeclub/pype/pull/1073) - - -**Fixed bugs:** - -- Fix path spaces in jpeg extractor [\#1174](https://github.com/pypeclub/pype/pull/1174) -- Maya: Bugfix: superclass for CreateCameraRig [\#1166](https://github.com/pypeclub/pype/pull/1166) -- Maya: Submit to Deadline - fix typo in condition [\#1163](https://github.com/pypeclub/pype/pull/1163) -- Avoid dot in repre extension [\#1125](https://github.com/pypeclub/pype/pull/1125) -- Fix versions variable usage in standalone publisher [\#1090](https://github.com/pypeclub/pype/pull/1090) -- Collect instance data fix subset query [\#1082](https://github.com/pypeclub/pype/pull/1082) -- Fix getting the camera name. [\#1067](https://github.com/pypeclub/pype/pull/1067) -- Nuke: Ensure "NUKE\_TEMP\_DIR" is not part of the Deadline job environment. [\#1064](https://github.com/pypeclub/pype/pull/1064) - -### [2.15.3](https://github.com/pypeclub/pype/tree/2.15.3) - - _**release date:** 2021-02-26_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.2...2.15.3) - -**Enhancements:** - -- Maya: speedup renderable camera collection [\#1053](https://github.com/pypeclub/pype/pull/1053) -- Harmony - add regex search to filter allowed task names for collectin… [\#1047](https://github.com/pypeclub/pype/pull/1047) - -**Fixed bugs:** - -- Ftrack integrate hierarchy fix [\#1085](https://github.com/pypeclub/pype/pull/1085) -- Explicit subset filter in anatomy instance data [\#1059](https://github.com/pypeclub/pype/pull/1059) -- TVPaint frame offset [\#1057](https://github.com/pypeclub/pype/pull/1057) -- Auto fix unicode strings [\#1046](https://github.com/pypeclub/pype/pull/1046) - -### [2.15.2](https://github.com/pypeclub/pype/tree/2.15.2) - - _**release date:** 2021-02-19_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.1...2.15.2) - -**Enhancements:** - -- Maya: Vray scene publishing [\#1013](https://github.com/pypeclub/pype/pull/1013) - -**Fixed bugs:** - -- Fix entity move under project [\#1040](https://github.com/pypeclub/pype/pull/1040) -- smaller nuke fixes from production [\#1036](https://github.com/pypeclub/pype/pull/1036) -- TVPaint thumbnail extract fix [\#1031](https://github.com/pypeclub/pype/pull/1031) - -### [2.15.1](https://github.com/pypeclub/pype/tree/2.15.1) - - _**release date:** 2021-02-12_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.15.0...2.15.1) - -**Enhancements:** - -- Delete version as loader action [\#1011](https://github.com/pypeclub/pype/pull/1011) -- Delete old versions [\#445](https://github.com/pypeclub/pype/pull/445) - -**Fixed bugs:** - -- PS - remove obsolete functions from pywin32 [\#1006](https://github.com/pypeclub/pype/pull/1006) -- Clone description of review session objects. [\#922](https://github.com/pypeclub/pype/pull/922) - -## [2.15.0](https://github.com/pypeclub/pype/tree/2.15.0) - - _**release date:** 2021-02-09_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.6...2.15.0) - -**Enhancements:** - -- Resolve - loading and updating clips [\#932](https://github.com/pypeclub/pype/pull/932) -- Release/2.15.0 [\#926](https://github.com/pypeclub/pype/pull/926) -- Photoshop: add option for template.psd and prelaunch hook [\#894](https://github.com/pypeclub/pype/pull/894) -- Nuke: deadline presets [\#993](https://github.com/pypeclub/pype/pull/993) -- Maya: Alembic only set attributes that exists. [\#986](https://github.com/pypeclub/pype/pull/986) -- Harmony: render local and handle fixes [\#981](https://github.com/pypeclub/pype/pull/981) -- PSD Bulk export of ANIM group [\#965](https://github.com/pypeclub/pype/pull/965) -- AE - added prelaunch hook for opening last or workfile from template [\#944](https://github.com/pypeclub/pype/pull/944) -- PS - safer handling of loading of workfile [\#941](https://github.com/pypeclub/pype/pull/941) -- Maya: Handling Arnold referenced AOVs [\#938](https://github.com/pypeclub/pype/pull/938) -- TVPaint: switch layer IDs for layer names during identification [\#903](https://github.com/pypeclub/pype/pull/903) -- TVPaint audio/sound loader [\#893](https://github.com/pypeclub/pype/pull/893) -- Clone review session with children. [\#891](https://github.com/pypeclub/pype/pull/891) -- Simple compositing data packager for freelancers [\#884](https://github.com/pypeclub/pype/pull/884) -- Harmony deadline submission [\#881](https://github.com/pypeclub/pype/pull/881) -- Maya: Optionally hide image planes from reviews. [\#840](https://github.com/pypeclub/pype/pull/840) -- Maya: handle referenced AOVs for Vray [\#824](https://github.com/pypeclub/pype/pull/824) -- DWAA/DWAB support on windows [\#795](https://github.com/pypeclub/pype/pull/795) -- Unreal: animation, layout and setdress updates [\#695](https://github.com/pypeclub/pype/pull/695) - -**Fixed bugs:** - -- Maya: Looks - disable hardlinks [\#995](https://github.com/pypeclub/pype/pull/995) -- Fix Ftrack custom attribute update [\#982](https://github.com/pypeclub/pype/pull/982) -- Prores ks in burnin script [\#960](https://github.com/pypeclub/pype/pull/960) -- terminal.py crash on import [\#839](https://github.com/pypeclub/pype/pull/839) -- Extract review handle bizarre pixel aspect ratio [\#990](https://github.com/pypeclub/pype/pull/990) -- Nuke: add nuke related env var to sumbission [\#988](https://github.com/pypeclub/pype/pull/988) -- Nuke: missing preset's variable [\#984](https://github.com/pypeclub/pype/pull/984) -- Get creator by name fix [\#979](https://github.com/pypeclub/pype/pull/979) -- Fix update of project's tasks on Ftrack sync [\#972](https://github.com/pypeclub/pype/pull/972) -- nuke: wrong frame offset in mov loader [\#971](https://github.com/pypeclub/pype/pull/971) -- Create project structure action fix multiroot [\#967](https://github.com/pypeclub/pype/pull/967) -- PS: remove pywin installation from hook [\#964](https://github.com/pypeclub/pype/pull/964) -- Prores ks in burnin script [\#959](https://github.com/pypeclub/pype/pull/959) -- Subset family is now stored in subset document [\#956](https://github.com/pypeclub/pype/pull/956) -- DJV new version arguments [\#954](https://github.com/pypeclub/pype/pull/954) -- TV Paint: Fix single frame Sequence [\#953](https://github.com/pypeclub/pype/pull/953) -- nuke: missing `file` knob update [\#933](https://github.com/pypeclub/pype/pull/933) -- Photoshop: Create from single layer was failing [\#920](https://github.com/pypeclub/pype/pull/920) -- Nuke: baking mov with correct colorspace inherited from write [\#909](https://github.com/pypeclub/pype/pull/909) -- Launcher fix actions discover [\#896](https://github.com/pypeclub/pype/pull/896) -- Get the correct file path for the updated mov. [\#889](https://github.com/pypeclub/pype/pull/889) -- Maya: Deadline submitter - shared data access violation [\#831](https://github.com/pypeclub/pype/pull/831) -- Maya: Take into account vray master AOV switch [\#822](https://github.com/pypeclub/pype/pull/822) - -**Merged pull requests:** - -- Refactor blender to 3.0 format [\#934](https://github.com/pypeclub/pype/pull/934) - -### [2.14.6](https://github.com/pypeclub/pype/tree/2.14.6) - - _**release date:** 2021-01-15_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.5...2.14.6) - -**Fixed bugs:** - -- Nuke: improving of hashing path [\#885](https://github.com/pypeclub/pype/pull/885) - -**Merged pull requests:** - -- Hiero: cut videos with correct secons [\#892](https://github.com/pypeclub/pype/pull/892) -- Faster sync to avalon preparation [\#869](https://github.com/pypeclub/pype/pull/869) - -### [2.14.5](https://github.com/pypeclub/pype/tree/2.14.5) - - _**release date:** 2021-01-06_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.4...2.14.5) - -**Merged pull requests:** - -- Pype logger refactor [\#866](https://github.com/pypeclub/pype/pull/866) - -### [2.14.4](https://github.com/pypeclub/pype/tree/2.14.4) - - _**release date:** 2020-12-18_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.3...2.14.4) - -**Merged pull requests:** - -- Fix - AE - added explicit cast to int [\#837](https://github.com/pypeclub/pype/pull/837) - -### [2.14.3](https://github.com/pypeclub/pype/tree/2.14.3) - - _**release date:** 2020-12-16_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.2...2.14.3) - -**Fixed bugs:** - -- TVPaint repair invalid metadata [\#809](https://github.com/pypeclub/pype/pull/809) -- Feature/push hier value to nonhier action [\#807](https://github.com/pypeclub/pype/pull/807) -- Harmony: fix palette and image sequence loader [\#806](https://github.com/pypeclub/pype/pull/806) - -**Merged pull requests:** - -- respecting space in path [\#823](https://github.com/pypeclub/pype/pull/823) - -### [2.14.2](https://github.com/pypeclub/pype/tree/2.14.2) - - _**release date:** 2020-12-04_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.1...2.14.2) - -**Enhancements:** - -- Collapsible wrapper in settings [\#767](https://github.com/pypeclub/pype/pull/767) - -**Fixed bugs:** - -- Harmony: template extraction and palettes thumbnails on mac [\#768](https://github.com/pypeclub/pype/pull/768) -- TVPaint store context to workfile metadata \(764\) [\#766](https://github.com/pypeclub/pype/pull/766) -- Extract review audio cut fix [\#763](https://github.com/pypeclub/pype/pull/763) - -**Merged pull requests:** - -- AE: fix publish after background load [\#781](https://github.com/pypeclub/pype/pull/781) -- TVPaint store members key [\#769](https://github.com/pypeclub/pype/pull/769) - -### [2.14.1](https://github.com/pypeclub/pype/tree/2.14.1) - - _**release date:** 2020-11-27_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.14.0...2.14.1) - -**Enhancements:** - -- Settings required keys in modifiable dict [\#770](https://github.com/pypeclub/pype/pull/770) -- Extract review may not add audio to output [\#761](https://github.com/pypeclub/pype/pull/761) - -**Fixed bugs:** - -- After Effects: frame range, file format and render source scene fixes [\#760](https://github.com/pypeclub/pype/pull/760) -- Hiero: trimming review with clip event number [\#754](https://github.com/pypeclub/pype/pull/754) -- TVPaint: fix updating of loaded subsets [\#752](https://github.com/pypeclub/pype/pull/752) -- Maya: Vray handling of default aov [\#748](https://github.com/pypeclub/pype/pull/748) -- Maya: multiple renderable cameras in layer didn't work [\#744](https://github.com/pypeclub/pype/pull/744) -- Ftrack integrate custom attributes fix [\#742](https://github.com/pypeclub/pype/pull/742) - - - -## [2.14.0](https://github.com/pypeclub/pype/tree/2.14.0) - - _**release date:** 2020-11-24_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.7...2.14.0) - -**Enhancements:** - -- Ftrack: Event for syncing shot or asset status with tasks.[\#736](https://github.com/pypeclub/pype/pull/736) -- Maya: add camera rig publishing option [\#721](https://github.com/pypeclub/pype/pull/721) -- Maya: Ask user to select non-default camera from scene or create a new. [\#678](https://github.com/pypeclub/pype/pull/678) -- Maya: Camera name can be added to burnins. [\#674](https://github.com/pypeclub/pype/pull/674) -- Sort instances by label in pyblish gui [\#719](https://github.com/pypeclub/pype/pull/719) -- Synchronize ftrack hierarchical and shot attributes [\#716](https://github.com/pypeclub/pype/pull/716) -- Standalone Publisher: Publish editorial from separate image sequences [\#699](https://github.com/pypeclub/pype/pull/699) -- Render publish plugins abstraction [\#687](https://github.com/pypeclub/pype/pull/687) -- TV Paint: image loader with options [\#675](https://github.com/pypeclub/pype/pull/675) -- **TV Paint (Beta):** initial implementation of creators and local rendering [\#693](https://github.com/pypeclub/pype/pull/693) -- **After Effects (Beta):** base integration with loaders [\#667](https://github.com/pypeclub/pype/pull/667) -- Harmony: Javascript refactoring and overall stability improvements [\#666](https://github.com/pypeclub/pype/pull/666) - -**Fixed bugs:** - -- TVPaint: extract review fix [\#740](https://github.com/pypeclub/pype/pull/740) -- After Effects: Review were not being sent to ftrack [\#738](https://github.com/pypeclub/pype/pull/738) -- Maya: vray proxy was not loading [\#722](https://github.com/pypeclub/pype/pull/722) -- Maya: Vray expected file fixes [\#682](https://github.com/pypeclub/pype/pull/682) - -**Deprecated:** - -- Removed artist view from pyblish gui [\#717](https://github.com/pypeclub/pype/pull/717) -- Maya: disable legacy override check for cameras [\#715](https://github.com/pypeclub/pype/pull/715) - - - - -### [2.13.7](https://github.com/pypeclub/pype/tree/2.13.7) - - _**release date:** 2020-11-19_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.6...2.13.7) - -**Merged pull requests:** - -- fix\(SP\): getting fps from context instead of nonexistent entity [\#729](https://github.com/pypeclub/pype/pull/729) - - - - -### [2.13.6](https://github.com/pypeclub/pype/tree/2.13.6) - - _**release date:** 2020-11-15_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.5...2.13.6) - -**Fixed bugs:** - -- Maya workfile version wasn't syncing with renders properly [\#711](https://github.com/pypeclub/pype/pull/711) -- Maya: Fix for publishing multiple cameras with review from the same scene [\#710](https://github.com/pypeclub/pype/pull/710) - - - - -### [2.13.5](https://github.com/pypeclub/pype/tree/2.13.5) - - _**release date:** 2020-11-12_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.4...2.13.5) - - -**Fixed bugs:** - -- Wrong thumbnail file was picked when publishing sequence in standalone publisher [\#703](https://github.com/pypeclub/pype/pull/703) -- Fix: Burnin data pass and FFmpeg tool check [\#701](https://github.com/pypeclub/pype/pull/701) - - - - -### [2.13.4](https://github.com/pypeclub/pype/tree/2.13.4) - - _**release date:** 2020-11-09_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.3...2.13.4) - - -**Fixed bugs:** - -- Photoshop unhiding hidden layers [\#688](https://github.com/pypeclub/pype/issues/688) -- Nuke: Favorite directories "shot dir" "project dir" - not working \#684 [\#685](https://github.com/pypeclub/pype/pull/685) - - - - - -### [2.13.3](https://github.com/pypeclub/pype/tree/2.13.3) - - _**release date:** _2020-11-03_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.2...2.13.3) - -**Fixed bugs:** - -- Fix ffmpeg executable path with spaces [\#680](https://github.com/pypeclub/pype/pull/680) -- Hotfix: Added default version number [\#679](https://github.com/pypeclub/pype/pull/679) - - - - -### [2.13.2](https://github.com/pypeclub/pype/tree/2.13.2) - - _**release date:** 2020-10-28_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.1...2.13.2) - -**Fixed bugs:** - -- Nuke: wrong conditions when fixing legacy write nodes [\#665](https://github.com/pypeclub/pype/pull/665) - - - - -### [2.13.1](https://github.com/pypeclub/pype/tree/2.13.1) - - _**release date:** 2020-10-23_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.13.0...2.13.1) - -**Fixed bugs:** - -- Photoshop: Layer name is not propagating to metadata [\#654](https://github.com/pypeclub/pype/issues/654) -- Photoshop: Loader in fails with "can't set attribute" [\#650](https://github.com/pypeclub/pype/issues/650) -- Hiero: Review video file adding one frame to the end [\#659](https://github.com/pypeclub/pype/issues/659) - - - -## [2.13.0](https://github.com/pypeclub/pype/tree/2.13.0) - - _**release date:** 2020-10-16_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.5...2.13.0) - -**Enhancements:** - -- Deadline Output Folder [\#636](https://github.com/pypeclub/pype/issues/636) -- Nuke Camera Loader [\#565](https://github.com/pypeclub/pype/issues/565) -- Deadline publish job shows publishing output folder [\#649](https://github.com/pypeclub/pype/pull/649) -- Get latest version in lib [\#642](https://github.com/pypeclub/pype/pull/642) -- Improved publishing of multiple representation from SP [\#638](https://github.com/pypeclub/pype/pull/638) -- TvPaint: launch shot work file from within Ftrack [\#631](https://github.com/pypeclub/pype/pull/631) -- Add mp4 support for RV action. [\#628](https://github.com/pypeclub/pype/pull/628) -- Maya: allow renders to have version synced with workfile [\#618](https://github.com/pypeclub/pype/pull/618) -- Renaming nukestudio host folder to hiero [\#617](https://github.com/pypeclub/pype/pull/617) -- Harmony: More efficient publishing [\#615](https://github.com/pypeclub/pype/pull/615) -- Ftrack server action improvement [\#608](https://github.com/pypeclub/pype/pull/608) -- Deadline user defaults to pype username if present [\#607](https://github.com/pypeclub/pype/pull/607) -- Standalone publisher now has icon [\#606](https://github.com/pypeclub/pype/pull/606) -- Nuke render write targeting knob improvement [\#603](https://github.com/pypeclub/pype/pull/603) -- Animated pyblish gui [\#602](https://github.com/pypeclub/pype/pull/602) -- Maya: Deadline - make use of asset dependencies optional [\#591](https://github.com/pypeclub/pype/pull/591) -- Nuke: Publishing, loading and updating alembic cameras [\#575](https://github.com/pypeclub/pype/pull/575) -- Maya: add look assigner to pype menu even if scriptsmenu is not available [\#573](https://github.com/pypeclub/pype/pull/573) -- Store task types in the database [\#572](https://github.com/pypeclub/pype/pull/572) -- Maya: Tiled EXRs to scanline EXRs render option [\#512](https://github.com/pypeclub/pype/pull/512) -- Fusion: basic integration refresh [\#452](https://github.com/pypeclub/pype/pull/452) - -**Fixed bugs:** - -- Burnin script did not propagate ffmpeg output [\#640](https://github.com/pypeclub/pype/issues/640) -- Pyblish-pype spacer in terminal wasn't transparent [\#646](https://github.com/pypeclub/pype/pull/646) -- Lib subprocess without logger [\#645](https://github.com/pypeclub/pype/pull/645) -- Nuke: prevent crash if we only have single frame in sequence [\#644](https://github.com/pypeclub/pype/pull/644) -- Burnin script logs better output [\#641](https://github.com/pypeclub/pype/pull/641) -- Missing audio on farm submission. [\#639](https://github.com/pypeclub/pype/pull/639) -- review from imagesequence error [\#633](https://github.com/pypeclub/pype/pull/633) -- Hiero: wrong order of fps clip instance data collecting [\#627](https://github.com/pypeclub/pype/pull/627) -- Add source for review instances. [\#625](https://github.com/pypeclub/pype/pull/625) -- Task processing in event sync [\#623](https://github.com/pypeclub/pype/pull/623) -- sync to avalon doesn t remove renamed task [\#619](https://github.com/pypeclub/pype/pull/619) -- Intent publish setting wasn't working with default value [\#562](https://github.com/pypeclub/pype/pull/562) -- Maya: Updating a look where the shader name changed, leaves the geo without a shader [\#514](https://github.com/pypeclub/pype/pull/514) - - -### [2.12.5](https://github.com/pypeclub/pype/tree/2.12.5) - -_**release date:** 2020-10-14_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.4...2.12.5) - -**Fixed Bugs:** - -- Harmony: Disable application launch logic [\#637](https://github.com/pypeclub/pype/pull/637) - -### [2.12.4](https://github.com/pypeclub/pype/tree/2.12.4) - -_**release date:** 2020-10-08_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.3...2.12.4) - -**Fixed bugs:** - -- Sync to avalon doesn't remove renamed task [\#605](https://github.com/pypeclub/pype/issues/605) - - -**Merged pull requests:** - -- NukeStudio: small fixes [\#622](https://github.com/pypeclub/pype/pull/622) -- NukeStudio: broken order of plugins [\#620](https://github.com/pypeclub/pype/pull/620) - -### [2.12.3](https://github.com/pypeclub/pype/tree/2.12.3) - -_**release date:** 2020-10-06_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.2...2.12.3) - -**Fixed bugs:** - -- Harmony: empty scene contamination [\#583](https://github.com/pypeclub/pype/issues/583) -- Edit publishing in SP doesn't respect shot selection for publishing [\#542](https://github.com/pypeclub/pype/issues/542) -- Pathlib breaks compatibility with python2 hosts [\#281](https://github.com/pypeclub/pype/issues/281) -- Maya: fix maya scene type preset exception [\#569](https://github.com/pypeclub/pype/pull/569) -- Standalone publisher editorial plugins interfering [\#580](https://github.com/pypeclub/pype/pull/580) - -### [2.12.2](https://github.com/pypeclub/pype/tree/2.12.2) - -_**release date:** 2020-09-25_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.1...2.12.2) - -**Fixed bugs:** - -- Harmony: Saving heavy scenes will crash [\#507](https://github.com/pypeclub/pype/issues/507) -- Extract review a representation name with `\*\_burnin` [\#388](https://github.com/pypeclub/pype/issues/388) -- Hierarchy data was not considering active instances [\#551](https://github.com/pypeclub/pype/pull/551) - -### [2.12.1](https://github.com/pypeclub/pype/tree/2.12.1) - -_**release date:** 2020-09-15_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.12.0...2.12.1) - -**Fixed bugs:** - -- dependency security alert ! [\#484](https://github.com/pypeclub/pype/issues/484) -- Maya: RenderSetup is missing update [\#106](https://github.com/pypeclub/pype/issues/106) -- \ extract effects creates new instance [\#78](https://github.com/pypeclub/pype/issues/78) - - - - -## [2.12.0](https://github.com/pypeclub/pype/tree/2.12.0) ## - -_**release date:** 09 Sept 2020_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.8...2.12.0) - -**Enhancements:** - -- Pype now uses less mongo connections [\#509](https://github.com/pypeclub/pype/pull/509) -- Nuke: adding image loader [\#499](https://github.com/pypeclub/pype/pull/499) -- Completely new application launcher [\#443](https://github.com/pypeclub/pype/pull/443) -- Maya: Optional skip review on renders. [\#441](https://github.com/pypeclub/pype/pull/441) -- Ftrack: Option to push status from task to latest version [\#440](https://github.com/pypeclub/pype/pull/440) -- Maya: Properly containerize image plane loads. [\#434](https://github.com/pypeclub/pype/pull/434) -- Option to keep the review files. [\#426](https://github.com/pypeclub/pype/pull/426) -- Maya: Isolate models during preview publishing [\#425](https://github.com/pypeclub/pype/pull/425) -- Ftrack attribute group is backwards compatible [\#418](https://github.com/pypeclub/pype/pull/418) -- Maya: Publishing of tile renderings on Deadline [\#398](https://github.com/pypeclub/pype/pull/398) -- Slightly better logging gui [\#383](https://github.com/pypeclub/pype/pull/383) -- Standalonepublisher: editorial family features expansion [\#411](https://github.com/pypeclub/pype/pull/411) - -**Fixed bugs:** - -- Maya: Fix tile order for Draft Tile Assembler [\#511](https://github.com/pypeclub/pype/pull/511) -- Remove extra dash [\#501](https://github.com/pypeclub/pype/pull/501) -- Fix: strip dot from repre names in single frame renders [\#498](https://github.com/pypeclub/pype/pull/498) -- Better handling of destination during integrating [\#485](https://github.com/pypeclub/pype/pull/485) -- Fix: allow thumbnail creation for single frame renders [\#460](https://github.com/pypeclub/pype/pull/460) -- added missing argument to launch\_application in ftrack app handler [\#453](https://github.com/pypeclub/pype/pull/453) -- Burnins: Copy bit rate of input video to match quality. [\#448](https://github.com/pypeclub/pype/pull/448) -- Standalone publisher is now independent from tray [\#442](https://github.com/pypeclub/pype/pull/442) -- Bugfix/empty enumerator attributes [\#436](https://github.com/pypeclub/pype/pull/436) -- Fixed wrong order of "other" category collapssing in publisher [\#435](https://github.com/pypeclub/pype/pull/435) -- Multiple reviews where being overwritten to one. [\#424](https://github.com/pypeclub/pype/pull/424) -- Cleanup plugin fail on instances without staging dir [\#420](https://github.com/pypeclub/pype/pull/420) -- deprecated -intra parameter in ffmpeg to new `-g` [\#417](https://github.com/pypeclub/pype/pull/417) -- Delivery action can now work with entered path [\#397](https://github.com/pypeclub/pype/pull/397) - - - - - -### [2.11.8](https://github.com/pypeclub/pype/tree/2.11.8) ## - -_**release date:** 27 Aug 2020_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.7...2.11.8) - -**Fixed bugs:** - -- pyblish pype - other group is collapsed before plugins are done [\#431](https://github.com/pypeclub/pype/issues/431) -- Alpha white edges in harmony on PNGs [\#412](https://github.com/pypeclub/pype/issues/412) -- harmony image loader picks wrong representations [\#404](https://github.com/pypeclub/pype/issues/404) -- Clockify crash when response contain symbol not allowed by UTF-8 [\#81](https://github.com/pypeclub/pype/issues/81) - - - - -### [2.11.7](https://github.com/pypeclub/pype/tree/2.11.7) ## - -_**release date:** 21 Aug 2020_ - - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.6...2.11.7) - -**Fixed bugs:** - -- Clean Up Baked Movie [\#369](https://github.com/pypeclub/pype/issues/369) -- celaction last workfile wasn't picked up correctly [\#459](https://github.com/pypeclub/pype/pull/459) - - - -### [2.11.5](https://github.com/pypeclub/pype/tree/2.11.5) ## - -_**release date:** 13 Aug 2020_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.4...2.11.5) - -**Enhancements:** - -- Standalone publisher now only groups sequence if the extension is known [\#439](https://github.com/pypeclub/pype/pull/439) - -**Fixed bugs:** - -- Logs have been disable for editorial by default to speed up publishing [\#433](https://github.com/pypeclub/pype/pull/433) -- Various fixes for celaction [\#430](https://github.com/pypeclub/pype/pull/430) -- Harmony: invalid variable scope in validate scene settings [\#428](https://github.com/pypeclub/pype/pull/428) -- Harmomny: new representation name for audio was not accepted [\#427](https://github.com/pypeclub/pype/pull/427) - - - - -### [2.11.3](https://github.com/pypeclub/pype/tree/2.11.3) ## - -_**release date:** 4 Aug 2020_ - -[Full Changelog](https://github.com/pypeclub/pype/compare/2.11.2...2.11.3) - -**Fixed bugs:** - -- Harmony: publishing performance issues [\#408](https://github.com/pypeclub/pype/pull/408) - - - - -## 2.11.0 ## - -_**release date:** 27 July 2020_ - -**new:** -- _(blender)_ namespace support [\#341](https://github.com/pypeclub/pype/pull/341) -- _(blender)_ start end frames [\#330](https://github.com/pypeclub/pype/pull/330) -- _(blender)_ camera asset [\#322](https://github.com/pypeclub/pype/pull/322) -- _(pype)_ toggle instances per family in pyblish GUI [\#320](https://github.com/pypeclub/pype/pull/320) -- _(pype)_ current release version is now shown in the tray menu [#379](https://github.com/pypeclub/pype/pull/379) - - -**improved:** -- _(resolve)_ tagging for publish [\#239](https://github.com/pypeclub/pype/issues/239) -- _(pype)_ Support publishing a subset of shots with standalone editorial [\#336](https://github.com/pypeclub/pype/pull/336) -- _(harmony)_ Basic support for palettes [\#324](https://github.com/pypeclub/pype/pull/324) -- _(photoshop)_ Flag outdated containers on startup and publish. [\#309](https://github.com/pypeclub/pype/pull/309) -- _(harmony)_ Flag Outdated containers [\#302](https://github.com/pypeclub/pype/pull/302) -- _(photoshop)_ Publish review [\#298](https://github.com/pypeclub/pype/pull/298) -- _(pype)_ Optional Last workfile launch [\#365](https://github.com/pypeclub/pype/pull/365) - - -**fixed:** -- _(premiere)_ workflow fixes [\#346](https://github.com/pypeclub/pype/pull/346) -- _(pype)_ pype-setup does not work with space in path [\#327](https://github.com/pypeclub/pype/issues/327) -- _(ftrack)_ Ftrack delete action cause circular error [\#206](https://github.com/pypeclub/pype/issues/206) -- _(nuke)_ Priority was forced to 50 [\#345](https://github.com/pypeclub/pype/pull/345) -- _(nuke)_ Fix ValidateNukeWriteKnobs [\#340](https://github.com/pypeclub/pype/pull/340) -- _(maya)_ If camera attributes are connected, we can ignore them. [\#339](https://github.com/pypeclub/pype/pull/339) -- _(pype)_ stop appending of tools environment to existing env [\#337](https://github.com/pypeclub/pype/pull/337) -- _(ftrack)_ Ftrack timeout needs to look at AVALON\_TIMEOUT [\#325](https://github.com/pypeclub/pype/pull/325) -- _(harmony)_ Only zip files are supported. [\#310](https://github.com/pypeclub/pype/pull/310) -- _(pype)_ hotfix/Fix event server mongo uri [\#305](https://github.com/pypeclub/pype/pull/305) -- _(photoshop)_ Subset was not named or validated correctly. [\#304](https://github.com/pypeclub/pype/pull/304) - - - - - -## 2.10.0 ## - -_**release date:** 17 June 2020_ - -**new:** -- _(harmony)_ **Toon Boom Harmony** has been greatly extended to support rigging, scene build, animation and rendering workflows. [#270](https://github.com/pypeclub/pype/issues/270) [#271](https://github.com/pypeclub/pype/issues/271) [#190](https://github.com/pypeclub/pype/issues/190) [#191](https://github.com/pypeclub/pype/issues/191) [#172](https://github.com/pypeclub/pype/issues/172) [#168](https://github.com/pypeclub/pype/issues/168) -- _(pype)_ Added support for rudimentary **edl publishing** into individual shots. [#265](https://github.com/pypeclub/pype/issues/265) -- _(celaction)_ Simple **Celaction** integration has been added with support for workfiles and rendering. [#255](https://github.com/pypeclub/pype/issues/255) -- _(maya)_ Support for multiple job types when submitting to the farm. We can now render Maya or Standalone render jobs for Vray and Arnold (limited support for arnold) [#204](https://github.com/pypeclub/pype/issues/204) -- _(photoshop)_ Added initial support for Photoshop [#232](https://github.com/pypeclub/pype/issues/232) - -**improved:** -- _(blender)_ Updated support for rigs and added support Layout family [#233](https://github.com/pypeclub/pype/issues/233) [#226](https://github.com/pypeclub/pype/issues/226) -- _(premiere)_ It is now possible to choose different storage root for workfiles of different task types. [#255](https://github.com/pypeclub/pype/issues/255) -- _(maya)_ Support for unmerged AOVs in Redshift multipart EXRs [#197](https://github.com/pypeclub/pype/issues/197) -- _(pype)_ Pype repository has been refactored in preparation for 3.0 release [#169](https://github.com/pypeclub/pype/issues/169) -- _(deadline)_ All file dependencies are now passed to deadline from maya to prevent premature start of rendering if caches or textures haven't been coppied over yet. [#195](https://github.com/pypeclub/pype/issues/195) -- _(nuke)_ Script validation can now be made optional. [#194](https://github.com/pypeclub/pype/issues/194) -- _(pype)_ Publishing can now be stopped at any time. [#194](https://github.com/pypeclub/pype/issues/194) - -**fix:** -- _(pype)_ Pyblish-lite has been integrated into pype repository, plus various publishing GUI fixes. [#274](https://github.com/pypeclub/pype/issues/274) [#275](https://github.com/pypeclub/pype/issues/275) [#268](https://github.com/pypeclub/pype/issues/268) [#227](https://github.com/pypeclub/pype/issues/227) [#238](https://github.com/pypeclub/pype/issues/238) -- _(maya)_ Alembic extractor was getting wrong frame range type in certain scenarios [#254](https://github.com/pypeclub/pype/issues/254) -- _(maya)_ Attaching a render to subset in maya was not passing validation in certain scenarios [#256](https://github.com/pypeclub/pype/issues/256) -- _(ftrack)_ Various small fixes to ftrack sync [#263](https://github.com/pypeclub/pype/issues/263) [#259](https://github.com/pypeclub/pype/issues/259) -- _(maya)_ Look extraction is now able to skp invalid connections in shaders [#207](https://github.com/pypeclub/pype/issues/207) - - - - - -## 2.9.0 ## - -_**release date:** 25 May 2020_ - -**new:** -- _(pype)_ Support for **Multiroot projects**. You can now store project data on multiple physical or virtual storages and target individual publishes to these locations. For instance render can be stored on a faster storage than the rest of the project. [#145](https://github.com/pypeclub/pype/issues/145), [#38](https://github.com/pypeclub/pype/issues/38) -- _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) -- _(pype)_ OSX support is in public beta now. There are issues to be expected, but the main implementation should be functional. [#141](https://github.com/pypeclub/pype/issues/141) - - -**improved:** - -- _(pype)_ **Review extractor** has been completely rebuilt. It now supports granular filtering so you can create **multiple outputs** for different tasks, families or hosts. [#103](https://github.com/pypeclub/pype/issues/103), [#166](https://github.com/pypeclub/pype/issues/166), [#165](https://github.com/pypeclub/pype/issues/165) -- _(pype)_ **Burnin** generation had been extended to **support same multi-output filtering** as review extractor [#103](https://github.com/pypeclub/pype/issues/103) -- _(pype)_ Publishing file templates can now be specified in config for each individual family [#114](https://github.com/pypeclub/pype/issues/114) -- _(pype)_ Studio specific plugins can now be appended to pype standard publishing plugins. [#112](https://github.com/pypeclub/pype/issues/112) -- _(nukestudio)_ Reviewable clips no longer need to be previously cut, exported and re-imported to timeline. **Pype can now dynamically cut reviewable quicktimes** from continuous offline footage during publishing. [#23](https://github.com/pypeclub/pype/issues/23) -- _(deadline)_ Deadline can now correctly differentiate between staging and production pype. [#154](https://github.com/pypeclub/pype/issues/154) -- _(deadline)_ `PYPE_PYTHON_EXE` env variable can now be used to direct publishing to explicit python installation. [#120](https://github.com/pypeclub/pype/issues/120) -- _(nuke)_ Nuke now check for new version of loaded data on file open. [#140](https://github.com/pypeclub/pype/issues/140) -- _(nuke)_ frame range and limit checkboxes are now exposed on write node. [#119](https://github.com/pypeclub/pype/issues/119) - - - -**fix:** - -- _(nukestudio)_ Project Location was using backslashes which was breaking nukestudio native exporting in certains configurations [#82](https://github.com/pypeclub/pype/issues/82) -- _(nukestudio)_ Duplicity in hierarchy tags was prone to throwing publishing error [#130](https://github.com/pypeclub/pype/issues/130), [#144](https://github.com/pypeclub/pype/issues/144) -- _(ftrack)_ multiple stability improvements [#157](https://github.com/pypeclub/pype/issues/157), [#159](https://github.com/pypeclub/pype/issues/159), [#128](https://github.com/pypeclub/pype/issues/128), [#118](https://github.com/pypeclub/pype/issues/118), [#127](https://github.com/pypeclub/pype/issues/127) -- _(deadline)_ multipart EXRs were stopping review publishing on the farm. They are still not supported for automatic review generation, but the publish will go through correctly without the quicktime. [#155](https://github.com/pypeclub/pype/issues/155) -- _(deadline)_ If deadline is non-responsive it will no longer freeze host when publishing [#149](https://github.com/pypeclub/pype/issues/149) -- _(deadline)_ Sometimes deadline was trying to launch render before all the source data was coppied over. [#137](https://github.com/pypeclub/pype/issues/137) _(harmony)_ Basic implementation of **Toon Boom Harmony** has been added. [#142](https://github.com/pypeclub/pype/issues/142) -- _(nuke)_ Filepath knob wasn't updated properly. [#131](https://github.com/pypeclub/pype/issues/131) -- _(maya)_ When extracting animation, the "Write Color Set" options on the instance were not respected. [#108](https://github.com/pypeclub/pype/issues/108) -- _(maya)_ Attribute overrides for AOV only worked for the legacy render layers. Now it works for new render setup as well [#132](https://github.com/pypeclub/pype/issues/132) -- _(maya)_ Stability and usability improvements in yeti workflow [#104](https://github.com/pypeclub/pype/issues/104) - - - - - -## 2.8.0 ## - -_**release date:** 20 April 2020_ - -**new:** - -- _(pype)_ Option to generate slates from json templates. [PYPE-628] [#26](https://github.com/pypeclub/pype/issues/26) -- _(pype)_ It is now possible to automate loading of published subsets into any scene. Documentation will follow :). [PYPE-611] [#24](https://github.com/pypeclub/pype/issues/24) - -**fix:** - -- _(maya)_ Some Redshift render tokens could break publishing. [PYPE-778] [#33](https://github.com/pypeclub/pype/issues/33) -- _(maya)_ Publish was not preserving maya file extension. [#39](https://github.com/pypeclub/pype/issues/39) -- _(maya)_ Rig output validator was failing on nodes without shapes. [#40](https://github.com/pypeclub/pype/issues/40) -- _(maya)_ Yeti caches can now be properly versioned up in the scene inventory. [#40](https://github.com/pypeclub/pype/issues/40) -- _(nuke)_ Build first workfiles was not accepting jpeg sequences. [#34](https://github.com/pypeclub/pype/issues/34) -- _(deadline)_ Trying to generate ffmpeg review from multipart EXRs no longer crashes publishing. [PYPE-781] -- _(deadline)_ Render publishing is more stable in multiplatform environments. [PYPE-775] - - - - - -## 2.7.0 ## - -_**release date:** 30 March 2020_ - -**new:** - -- _(maya)_ Artist can now choose to load multiple references of the same subset at once [PYPE-646, PYPS-81] -- _(nuke)_ Option to use named OCIO colorspaces for review colour baking. [PYPS-82] -- _(pype)_ Pype can now work with `master` versions for publishing and loading. These are non-versioned publishes that are overwritten with the latest version during publish. These are now supported in all the GUIs, but their publishing is deactivated by default. [PYPE-653] -- _(blender)_ Added support for basic blender workflow. We currently support `rig`, `model` and `animation` families. [PYPE-768] -- _(pype)_ Source timecode can now be used in burn-ins. [PYPE-777] -- _(pype)_ Review outputs profiles can now specify delivery resolution different than project setting [PYPE-759] -- _(nuke)_ Bookmark to current context is now added automatically to all nuke browser windows. [PYPE-712] - -**change:** - -- _(maya)_ It is now possible to publish camera without. baking. Keep in mind that unbaked cameras can't be guaranteed to work in other hosts. [PYPE-595] -- _(maya)_ All the renders from maya are now grouped in the loader by their Layer name. [PYPE-482] -- _(nuke/hiero)_ Any publishes from nuke and hiero can now be versioned independently of the workfile. [PYPE-728] - - -**fix:** - -- _(nuke)_ Mixed slashes caused issues in ocio config path. -- _(pype)_ Intent field in pyblish GUI was passing label instead of value to ftrack. [PYPE-733] -- _(nuke)_ Publishing of pre-renders was inconsistent. [PYPE-766] -- _(maya)_ Handles and frame ranges were inconsistent in various places during publishing. -- _(nuke)_ Nuke was crashing if it ran into certain missing knobs. For example DPX output missing `autocrop` [PYPE-774] -- _(deadline)_ Project overrides were not working properly with farm render publishing. -- _(hiero)_ Problems with single frame plates publishing. -- _(maya)_ Redshift RenderPass token were breaking render publishing. [PYPE-778] -- _(nuke)_ Build first workfile was not accepting jpeg sequences. -- _(maya)_ Multipart (Multilayer) EXRs were breaking review publishing due to FFMPEG incompatiblity [PYPE-781] - - - - -## 2.6.0 ## - -_**release date:** 9 March 2020_ - -**change:** -- _(maya)_ render publishing has been simplified and made more robust. Render setup layers are now automatically added to publishing subsets and `render globals` family has been replaced with simple `render` [PYPE-570] -- _(avalon)_ change context and workfiles apps, have been merged into one, that allows both actions to be performed at the same time. [PYPE-747] -- _(pype)_ thumbnails are now automatically propagate to asset from the last published subset in the loader -- _(ftrack)_ publishing comment and intent are now being published to ftrack note as well as describtion. [PYPE-727] -- _(pype)_ when overriding existing version new old representations are now overriden, instead of the new ones just being appended. (to allow this behaviour, the version validator need to be disabled. [PYPE-690]) -- _(pype)_ burnin preset has been significantly simplified. It now doesn't require passing function to each field, but only need the actual text template. to use this, all the current burnin PRESETS MUST BE UPDATED for all the projects. -- _(ftrack)_ credentials are now stored on a per server basis, so it's possible to switch between ftrack servers without having to log in and out. [PYPE-723] - - -**new:** -- _(pype)_ production and development deployments now have different colour of the tray icon. Orange for Dev and Green for production [PYPE-718] -- _(maya)_ renders can now be attached to a publishable subset rather than creating their own subset. For example it is possible to create a reviewable `look` or `model` render and have it correctly attached as a representation of the subsets [PYPE-451] -- _(maya)_ after saving current scene into a new context (as a new shot for instance), all the scene publishing subsets data gets re-generated automatically to match the new context [PYPE-532] -- _(pype)_ we now support project specific publish, load and create plugins [PYPE-740] -- _(ftrack)_ new action that allow archiving/deleting old published versions. User can keep how many of the latest version to keep when the action is ran. [PYPE-748, PYPE-715] -- _(ftrack)_ it is now possible to monitor and restart ftrack event server using ftrack action. [PYPE-658] -- _(pype)_ validator that prevent accidental overwrites of previously published versions. [PYPE-680] -- _(avalon)_ avalon core updated to version 5.6.0 -- _(maya)_ added validator to make sure that relative paths are used when publishing arnold standins. -- _(nukestudio)_ it is now possible to extract and publish audio family from clip in nuke studio [PYPE-682] - -**fix**: -- _(maya)_ maya set framerange button was ignoring handles [PYPE-719] -- _(ftrack)_ sync to avalon was sometime crashing when ran on empty project -- _(nukestudio)_ publishing same shots after they've been previously archived/deleted would result in a crash. [PYPE-737] -- _(nuke)_ slate workflow was breaking in certain scenarios. [PYPE-730] -- _(pype)_ rendering publish workflow has been significantly improved to prevent error resulting from implicit render collection. [PYPE-665, PYPE-746] -- _(pype)_ launching application on a non-synced project resulted in obscure [PYPE-528] -- _(pype)_ missing keys in burnins no longer result in an error. [PYPE-706] -- _(ftrack)_ create folder structure action was sometimes failing for project managers due to wrong permissions. -- _(Nukestudio)_ using `source` in the start frame tag could result in wrong frame range calculation -- _(ftrack)_ sync to avalon action and event have been improved by catching more edge cases and provessing them properly. - - - - -## 2.5.0 ## - -_**release date:** 11 Feb 2020_ - -**change:** -- _(pype)_ added many logs for easier debugging -- _(pype)_ review presets can now be separated between 2d and 3d renders [PYPE-693] -- _(pype)_ anatomy module has been greatly improved to allow for more dynamic pulblishing and faster debugging [PYPE-685] -- _(pype)_ avalon schemas have been moved from `pype-config` to `pype` repository, for simplification. [PYPE-670] -- _(ftrack)_ updated to latest ftrack API -- _(ftrack)_ publishing comments now appear in ftrack also as a note on version with customisable category [PYPE-645] -- _(ftrack)_ delete asset/subset action had been improved. It is now able to remove multiple entities and descendants of the selected entities [PYPE-361, PYPS-72] -- _(workfiles)_ added date field to workfiles app [PYPE-603] -- _(maya)_ old deprecated loader have been removed in favour of a single unified reference loader (old scenes will upgrade automatically to the new loader upon opening) [PYPE-633, PYPE-697] -- _(avalon)_ core updated to 5.5.15 [PYPE-671] -- _(nuke)_ library loader is now available in nuke [PYPE-698] - - -**new:** -- _(pype)_ added pype render wrapper to allow rendering on mixed platform farms. [PYPE-634] -- _(pype)_ added `pype launch` command. It let's admin run applications with dynamically built environment based on the given context. [PYPE-634] -- _(pype)_ added support for extracting review sequences with burnins [PYPE-657] -- _(publish)_ users can now set intent next to a comment when publishing. This will then be reflected on an attribute in ftrack. [PYPE-632] -- _(burnin)_ timecode can now be added to burnin -- _(burnin)_ datetime keys can now be added to burnin and anatomy [PYPE-651] -- _(burnin)_ anatomy templates can now be used in burnins. [PYPE=626] -- _(nuke)_ new validator for render resolution -- _(nuke)_ support for attach slate to nuke renders [PYPE-630] -- _(nuke)_ png sequences were added to loaders -- _(maya)_ added maya 2020 compatibility [PYPE-677] -- _(maya)_ ability to publish and load .ASS standin sequences [PYPS-54] -- _(pype)_ thumbnails can now be published and are visible in the loader. `AVALON_THUMBNAIL_ROOT` environment variable needs to be set for this to work [PYPE-573, PYPE-132] -- _(blender)_ base implementation of blender was added with publishing and loading of .blend files [PYPE-612] -- _(ftrack)_ new action for preparing deliveries [PYPE-639] - - -**fix**: -- _(burnin)_ more robust way of finding ffmpeg for burnins. -- _(pype)_ improved UNC paths remapping when sending to farm. -- _(pype)_ float frames sometimes made their way to representation context in database, breaking loaders [PYPE-668] -- _(pype)_ `pype install --force` was failing sometimes [PYPE-600] -- _(pype)_ padding in published files got calculated wrongly sometimes. It is now instead being always read from project anatomy. [PYPE-667] -- _(publish)_ comment publishing was failing in certain situations -- _(ftrack)_ multiple edge case scenario fixes in auto sync and sync-to-avalon action -- _(ftrack)_ sync to avalon now works on empty projects -- _(ftrack)_ thumbnail update event was failing when deleting entities [PYPE-561] -- _(nuke)_ loader applies proper colorspaces from Presets -- _(nuke)_ publishing handles didn't always work correctly [PYPE-686] -- _(maya)_ assembly publishing and loading wasn't working correctly - - - - - - -## 2.4.0 ## - -_**release date:** 9 Dec 2019_ - -**change:** -- _(ftrack)_ version to status ftrack event can now be configured from Presets - - based on preset `presets/ftracc/ftrack_config.json["status_version_to_task"]` -- _(ftrack)_ sync to avalon event has been completely re-written. It now supports most of the project management situations on ftrack including moving, renaming and deleting entities, updating attributes and working with tasks. -- _(ftrack)_ sync to avalon action has been also re-writen. It is now much faster (up to 100 times depending on a project structure), has much better logging and reporting on encountered problems, and is able to handle much more complex situations. -- _(ftrack)_ sync to avalon trigger by checking `auto-sync` toggle on ftrack [PYPE-504] -- _(pype)_ various new features in the REST api -- _(pype)_ new visual identity used across pype -- _(pype)_ started moving all requirements to pip installation rather than vendorising them in pype repository. Due to a few yet unreleased packages, this means that pype can temporarily be only installed in the offline mode. - -**new:** -- _(nuke)_ support for publishing gizmos and loading them as viewer processes -- _(nuke)_ support for publishing nuke nodes from backdrops and loading them back -- _(pype)_ burnins can now work with start and end frames as keys - - use keys `{frame_start}`, `{frame_end}` and `{current_frame}` in burnin preset to use them. [PYPS-44,PYPS-73, PYPE-602] -- _(pype)_ option to filter logs by user and level in loggin GUI -- _(pype)_ image family added to standalone publisher [PYPE-574] -- _(pype)_ matchmove family added to standalone publisher [PYPE-574] -- _(nuke)_ validator for comparing arbitrary knobs with values from presets -- _(maya)_ option to force maya to copy textures in the new look publish rather than hardlinking them -- _(pype)_ comments from pyblish GUI are now being added to ftrack version -- _(maya)_ validator for checking outdated containers in the scene -- _(maya)_ option to publish and load arnold standin sequence [PYPE-579, PYPS-54] - -**fix**: -- _(pype)_ burnins were not respecting codec of the input video -- _(nuke)_ lot's of various nuke and nuke studio fixes across the board [PYPS-45] -- _(pype)_ workfiles app is not launching with the start of the app by default [PYPE-569] -- _(ftrack)_ ftrack integration during publishing was failing under certain situations [PYPS-66] -- _(pype)_ minor fixes in REST api -- _(ftrack)_ status change event was crashing when the target status was missing [PYPS-68] -- _(ftrack)_ actions will try to reconnect if they fail for some reason -- _(maya)_ problems with fps mapping when using float FPS values -- _(deadline)_ overall improvements to deadline publishing -- _(setup)_ environment variables are now remapped on the fly based on the platform pype is running on. This fixes many issues in mixed platform environments. - - - - -## 2.3.6 # - -_**release date:** 27 Nov 2019_ - -**hotfix**: -- _(ftrack)_ was hiding important debug logo -- _(nuke)_ crashes during workfile publishing -- _(ftrack)_ event server crashes because of signal problems -- _(muster)_ problems with muster render submissions -- _(ftrack)_ thumbnail update event syntax errors - - - - -## 2.3.0 ## - -_release date: 6 Oct 2019_ - -**new**: -- _(maya)_ support for yeti rigs and yeti caches -- _(maya)_ validator for comparing arbitrary attributes against ftrack -- _(pype)_ burnins can now show current date and time -- _(muster)_ pools can now be set in render globals in maya -- _(pype)_ Rest API has been implemented in beta stage -- _(nuke)_ LUT loader has been added -- _(pype)_ rudimentary user module has been added as preparation for user management -- _(pype)_ a simple logging GUI has been added to pype tray -- _(nuke)_ nuke can now bake input process into mov -- _(maya)_ imported models now have selection handle displayed by defaulting -- _(avalon)_ it's is now possible to load multiple assets at once using loader -- _(maya)_ added ability to automatically connect yeti rig to a mesh upon loading - -**changed**: -- _(ftrack)_ event server now runs two parallel processes and is able to keep queue of events to process. -- _(nuke)_ task name is now added to all rendered subsets -- _(pype)_ adding more families to standalone publisher -- _(pype)_ standalone publisher now uses pyblish-lite -- _(pype)_ standalone publisher can now create review quicktimes -- _(ftrack)_ queries to ftrack were sped up -- _(ftrack)_ multiple ftrack action have been deprecated -- _(avalon)_ avalon upstream has been updated to 5.5.0 -- _(nukestudio)_ published transforms can now be animated -- - -**fix**: -- _(maya)_ fps popup button didn't work in some cases -- _(maya)_ geometry instances and references in maya were losing shader assignments -- _(muster)_ muster rendering templates were not working correctly -- _(maya)_ arnold tx texture conversion wasn't respecting colorspace set by the artist -- _(pype)_ problems with avalon db sync -- _(maya)_ ftrack was rounding FPS making it inconsistent -- _(pype)_ wrong icon names in Creator -- _(maya)_ scene inventory wasn't showing anything if representation was removed from database after it's been loaded to the scene -- _(nukestudio)_ multiple bugs squashed -- _(loader)_ loader was taking long time to show all the loading action when first launcher in maya - -## 2.2.0 ## -_**release date:** 8 Sept 2019_ - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(nuke)_ option to choose deadline chunk size on write nodes -- _(nukestudio)_ added option to publish soft effects (subTrackItems) from NukeStudio as subsets including LUT files. these can then be loaded in nuke or NukeStudio -- _(nuke)_ option to build nuke script from previously published latest versions of plate and render subsets. -- _(nuke)_ nuke writes now have deadline tab. -- _(ftrack)_ Prepare Project action can now be used for creating the base folder structure on disk and in ftrack, setting up all the initial project attributes and it automatically prepares `pype_project_config` folder for the given project. -- _(clockify)_ Added support for time tracking in clockify. This currently in addition to ftrack time logs, but does not completely replace them. -- _(pype)_ any attributes in Creator and Loader plugins can now be customised using pype preset system - -**changed**: -- nukestudio now uses workio API for workfiles -- _(maya)_ "FIX FPS" prompt in maya now appears in the middle of the screen -- _(muster)_ can now be configured with custom templates -- _(pype)_ global publishing plugins can now be configured using presets as well as host specific ones - - -**fix**: -- wrong version retrieval from path in certain scenarios -- nuke reset resolution wasn't working in certain scenarios - -## 2.1.0 ## -_release date: 6 Aug 2019_ - -A large cleanup release. Most of the change are under the hood. - -**new**: -- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts -- _(pype)_ Added configurable option to add burnins to any generated quicktimes -- _(ftrack)_ Action that identifies what machines pype is running on. -- _(system)_ unify subprocess calls -- _(maya)_ add audio to review quicktimes -- _(nuke)_ add crop before write node to prevent overscan problems in ffmpeg -- **Nuke Studio** publishing and workfiles support -- **Muster** render manager support -- _(nuke)_ Framerange, FPS and Resolution are set automatically at startup -- _(maya)_ Ability to load published sequences as image planes -- _(system)_ Ftrack event that sets asset folder permissions based on task assignees in ftrack. -- _(maya)_ Pyblish plugin that allow validation of maya attributes -- _(system)_ added better startup logging to tray debug, including basic connection information -- _(avalon)_ option to group published subsets to groups in the loader -- _(avalon)_ loader family filters are working now - -**changed**: -- change multiple key attributes to unify their behaviour across the pipeline - - `frameRate` to `fps` - - `startFrame` to `frameStart` - - `endFrame` to `frameEnd` - - `fstart` to `frameStart` - - `fend` to `frameEnd` - - `handle_start` to `handleStart` - - `handle_end` to `handleEnd` - - `resolution_width` to `resolutionWidth` - - `resolution_height` to `resolutionHeight` - - `pixel_aspect` to `pixelAspect` - -- _(nuke)_ write nodes are now created inside group with only some attributes editable by the artist -- rendered frames are now deleted from temporary location after their publishing is finished. -- _(ftrack)_ RV action can now be launched from any entity -- after publishing only refresh button is now available in pyblish UI -- added context instance pyblish-lite so that artist knows if context plugin fails -- _(avalon)_ allow opening selected files using enter key -- _(avalon)_ core updated to v5.2.9 with our forked changes on top - -**fix**: -- faster hierarchy retrieval from db -- _(nuke)_ A lot of stability enhancements -- _(nuke studio)_ A lot of stability enhancements -- _(nuke)_ now only renders a single write node on farm -- _(ftrack)_ pype would crash when launcher project level task -- work directory was sometimes not being created correctly -- major pype.lib cleanup. Removing of unused functions, merging those that were doing the same and general house cleaning. -- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner diff --git a/website/docs/dev_build.md b/website/docs/dev_build.md index c797326ce6..a1af8a86f7 100644 --- a/website/docs/dev_build.md +++ b/website/docs/dev_build.md @@ -11,7 +11,7 @@ import TabItem from '@theme/TabItem'; To build Pype you currently need (on all platforms): -- **[Python 3.7](https://www.python.org/downloads/)** as we are following [vfx platform](https://vfxplatform.com). +- **[Python 3.9](https://www.python.org/downloads/)** as we are following [vfx platform CY2022](https://vfxplatform.com). - **[git](https://git-scm.com/downloads)** We use [CX_Freeze](https://cx-freeze.readthedocs.io/en/latest) to freeze the code and all dependencies and @@ -51,7 +51,9 @@ development tools like [CMake](https://cmake.org/) and [Visual Studio](https://v #### Run from source -For development purposes it is possible to run OpenPype directly from the source. We provide a simple launcher script for this. +For development purposes it is possible to run OpenPype directly from the source. We provide a simple launcher script for this. To run the powershell scripts you may have to enable unrestricted execution as administrator: + +`Set-ExecutionPolicy -ExecutionPolicy unrestricted` To start OpenPype from source you need to @@ -114,8 +116,8 @@ To build OpenPype on Linux you will need: - **[curl](https://curl.se)** on systems that doesn't have one preinstalled. - **bzip2**, **readline**, **sqlite3** and other libraries. -Because some Linux distros come with newer Python version pre-installed, you might -need to install **3.7** version and make use of it explicitly. +Because some Linux distros come with older Python version pre-installed, you might +need to install **3.9** version and make use of it explicitly. Your best bet is probably using [pyenv](https://github.com/pyenv/pyenv). You can use your package manager to install **git** and other packages to your build @@ -136,16 +138,16 @@ $ eval "$(pyenv virtualenv-init -)" # reload shell $ exec $SHELL -# install Python 3.7.10 +# install Python 3.9.6 # python will be downloaded and build so please make sure # you have all necessary requirements installed (see below). -$ pyenv install -v 3.7.10 +$ pyenv install -v 3.9.6 # change path to pype 3 $ cd /path/to/pype-3 # set local python version -$ pyenv local 3.7.10 +$ pyenv local 3.9.6 ``` :::note Install build requirements for **Ubuntu** @@ -214,25 +216,25 @@ $ brew install cmake 3) Install [pyenv](https://github.com/pyenv/pyenv): ```shell $ brew install pyenv -$ echo 'eval "$(pypenv init -)"' >> ~/.zshrc +$ echo 'eval "$(pyenv init -)"' >> ~/.zshrc $ pyenv init $ exec "$SHELL" $ PATH=$(pyenv root)/shims:$PATH ``` -4) Pull in required Python version 3.7.x +4) Pull in required Python version 3.9.x ```shell # install Python build dependences $ brew install openssl readline sqlite3 xz zlib -# replace with up-to-date 3.7.x version -$ pyenv install 3.7.9 +# replace with up-to-date 3.9.x version +$ pyenv install 3.9.6 ``` 5) Set local Python version ```shell # switch to Pype source directory -$ pyenv local 3.7.9 +$ pyenv local 3.9.6 ``` 6) Install `create-dmg` @@ -256,7 +258,7 @@ to `pyproject.toml` to `[tool.poetry.dependencies]` section. ```toml title="/pyproject.toml" [tool.poetry.dependencies] -python = "3.7.*" +python = "3.9.*" aiohttp = "^3.7" aiohttp_json_rpc = "*" # TVPaint server acre = { git = "https://github.com/pypeclub/acre.git" } diff --git a/website/docs/dev_colorspace.md b/website/docs/dev_colorspace.md new file mode 100644 index 0000000000..fe9a0ec1e3 --- /dev/null +++ b/website/docs/dev_colorspace.md @@ -0,0 +1,120 @@ +--- +id: dev_colorspace +title: Colorspace Management and Distribution +sidebar_label: Colorspace +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Introduction +Defines the distribution of colors and OCIO config during publishing. Once colorspace data are captured and integrated into representation loaders could use them for loading image and video data with correct colorspace. + +:::warning Color Management (ImageIO) +Adding the `imagio` settings schema is required for any host or module which is processing pixel data. +::: + +## Data model +Published representations that are extracted with color managed data store a **colorspaceData** entry in its data: `representation_doc["data"]["colorspaceData"]`. + +It's up to the Host implementation to pre-configure the application or workfile to have the correct OCIO config applied. +It's up to the Extractors to set these values for the representation during publishing. +It's up to the Loaders to read these values and apply the correct expected color space. + +### Keys +- **colorspace** - string value used in other publish plugins and loaders +- **config** - storing two versions of path. + - **path** - is formated and with baked platform root. It is used for posible need to find out where we were sourcing color config during publishing. + - **template** - unformated tempate resolved from settings. It is used for other plugins targeted to remote publish which could be processed at different platform. + +### Example + { + "colorspace": "linear", + "config": { + "path": "/abs/path/to/config.ocio", + "template": "{project[root]}/path/to/config.ocio" + } + } + + +## How to integrate it into a host +1. The settings for a host should add the `imagio` schema. Ideally near the top of all categories in its `/settings/entities/schemas/system_scheams/host_settings/schema_{host}.json` so it matches the settings layout other hosts. +```json +{ + "key": "imageio", + "type": "dict", + "label": "Color Management (ImageIO)", + "is_group": true, + "children": [ + { + "type": "schema", + "name": "schema_imageio_config" + }, + { + "type": "schema", + "name": "schema_imageio_file_rules" + } + + ] +} +``` + +2. Set the OCIO config path for the host to the path returned from `openpype.pipeline.colorspace.get_imageio_config`, for example: + - set the `OCIO` environment variable before launching the host via a prelaunch hook + - or (if the host allows) to set the workfile OCIO config path using the host's API + +3. Each Extractor exporting pixel data (e.g. image or video) has to use parent class `openpype.pipeline.publish.publish_plugins.ExtractorColormanaged` and use `self.set_representation_colorspace` on the representations to be integrated. + +The **set_representation_colorspace** method adds `colorspaceData` to the representation. If the `colorspace` passed is not `None` then it is added directly to the representation with resolved config path otherwise a color space is assumed using the configured file rules. If no file rule matches the `colorspaceData` is **not** added to the representation. + +An example implementation can be found here: `openpype\hosts\nuke\plugins\publish\extract_render_local.py` + + +4. The Loader plug-ins should take into account the `colorspaceData` in the published representation's data to allow the DCC to read in the expected color space. +```python +from openpype.pipeline.colorspace import ( + get_imageio_colorspace_from_filepath, + get_imageio_config, + get_imageio_file_rules +) + +class YourLoader(api.Loader): + def load(self, context, name=None, namespace=None, options=None): + path = self.fname + colorspace_data = context["representation"]["data"].get("colorspaceData", {}) + colorspace = ( + colorspace_data.get("colorspace") + # try to match colorspace from file rules + or self.get_colorspace_from_file_rules(path, context) + ) + + # pseudocode + load_file(path, colorspace=colorspace) + + def get_colorspace_from_file_rules(self, path, context) + project_name = context.data["projectName"] + host_name = context.data["hostName"] + anatomy_data = context.data["anatomyData"] + project_settings_ = context.data["project_settings"] + + config_data = get_imageio_config( + project_name, host_name, + project_settings=project_settings_, + anatomy_data=anatomy_data + ) + file_rules = get_imageio_file_rules( + project_name, host_name, + project_settings=project_settings_ + ) + # get matching colorspace from rules + colorspace = get_imageio_colorspace_from_filepath( + path, host_name, project_name, + config_data=config_data, + file_rules=file_rules, + project_settings=project_settings + ) +``` + +:::warning Loading +A custom OCIO config can be set per asset/shot and thus it can happen the current session you are loading into uses a different config than the original context's **colorspaceData** was published with. It's up the loader's implementation to take that into account and decide what to do if the colorspace differs and or might not exist. +::: \ No newline at end of file diff --git a/website/docs/dev_deadline.md b/website/docs/dev_deadline.md new file mode 100644 index 0000000000..310b2e0983 --- /dev/null +++ b/website/docs/dev_deadline.md @@ -0,0 +1,38 @@ +--- +id: dev_deadline +title: Deadline integration +sidebar_label: Deadline integration +toc_max_heading_level: 4 +--- + +Deadline is not host as usual, it is missing most of the host features, but it does have +its own set of publishing plugins. + +## How to test OpenPype on Deadline + +### Versions + +Since 3.14 job submitted from OpenPype is bound to OpenPype version used to submit it. So +if you submit job with 3.14.8, Deadline will try to find that particular version and use it +for rendering. This is handled by `OPENPYPE_VERSION` variable on job - you can delete it from +there and then the version set in studio Settings will be used. + +![Deadline Job Version](assets/deadline_job_version.png) + +Deadline needs to bootstrap this version so it will try to look the closest compatible +build. So to use version 3.14.8 on Deadline it is enough to have build 3.14.0 or similar - important +are the first two version numbers - major and minor. If they match, the version +is considered compatible. + +### Testing + +So to test various changes you don't need to build again an again OpenPype and putting +it to directory where Deadline is looking for versions - this needs to be done only on +minor version change. That build will then be used to bootstrap whatever is set on the +job or in the studio Settings. + +So you can either use zip version if it suits you, or better set your sources directory +so it will be find as a version - for example with symlink. + +That way you can only modify `OPENPYPE_VERSION` variable on job to point it to version +you would like to test. \ No newline at end of file diff --git a/website/docs/dev_host_implementation.md b/website/docs/dev_host_implementation.md new file mode 100644 index 0000000000..3702483ad1 --- /dev/null +++ b/website/docs/dev_host_implementation.md @@ -0,0 +1,89 @@ +--- +id: dev_host_implementation +title: Host implementation +sidebar_label: Host implementation +toc_max_heading_level: 4 +--- + +Host is an integration of DCC but in most of cases have logic that need to be handled before DCC is launched. Then based on abilities (or purpose) of DCC the integration can support different pipeline workflows. + +## Pipeline workflows +Workflows available in OpenPype are Workfiles, Load and Create-Publish. Each of them may require some functionality available in integration (e.g. call host API to achieve certain functionality). We'll go through them later. + +## How to implement and manage host +At this moment there is not fully unified way how host should be implemented but we're working on it. Host should have a "public face" code that can be used outside of DCC and in-DCC integration code. The main reason is that in-DCC code can have specific dependencies for python modules not available out of it's process. Hosts are located in `openpype/hosts/{host name}` folder. Current code (at many places) expect that the host name has equivalent folder there. So each subfolder should be named with the name of host it represents. + +### Recommended folder structure +```python +openpype/hosts/{host name} +│ +│ # Content of DCC integration - with in-DCC imports +├─ api +│ ├─ __init__.py +│ └─ [DCC integration files] +│ +│ # Plugins related to host - dynamically imported (can contain in-DCC imports) +├─ plugins +│ ├─ create +│ │ └─ [create plugin files] +│ ├─ load +│ │ └─ [load plugin files] +│ └─ publish +│ └─ [publish plugin files] +│ +│ # Launch hooks - used to modify how application is launched +├─ hooks +│ └─ [some pre/post launch hooks] +| +│ # Code initializing host integration in-DCC (DCC specific - example from Maya) +├─ startup +│ └─ userSetup.py +│ +│ # Public interface +├─ __init__.py +└─ [other public code] +``` + +### Launch Hooks +Launch hooks are not directly connected to host implementation, but they can be used to modify launch of process which may be crutial for the implementation. Launch hook are plugins called when DCC is launched. They are processed in sequence before and after launch. Pre launch hooks can change how process of DCC is launched, e.g. change subprocess flags, modify environments or modify launch arguments. If prelaunch hook crashes the application is not launched at all. Postlaunch hooks are triggered after launch of subprocess. They can be used to change statuses in your project tracker, start timer, etc. Crashed postlaunch hooks have no effect on rest of postlaunch hooks or launched process. They can be filtered by platform, host and application and order is defined by integer value. Hooks inside host are automatically loaded (one reason why folder name should match host name) or can be defined from modules. Hooks execution share same launch context where can be stored data used across multiple hooks (please be very specific in stored keys e.g. 'project' vs. 'project_name'). For more detailed information look into `openpype/lib/applications.py`. + +### Public interface +Public face is at this moment related to launching of the DCC. At this moment there there is only option to modify environment variables before launch by implementing function `add_implementation_envs` (must be available in `openpype/hosts/{host name}/__init__.py`). The function is called after pre launch hooks, as last step before subprocess launch, to be able set environment variables crutial for proper integration. It is also good place for functions that are used in prelaunch hooks and in-DCC integration. Future plans are to be able get workfiles extensions from here. Right now workfiles extensions are hardcoded in `openpype/pipeline/constants.py` under `HOST_WORKFILE_EXTENSIONS`, we would like to handle hosts as addons similar to OpenPype modules, and more improvements which are now hardcoded. + +### Integration +We've prepared base class `HostBase` in `openpype/host/host.py` to define minimum requirements and provide some default method implementations. The minimum requirement for a host is `name` attribute, this host would not be able to do much but is valid. To extend functionality we've prepared interfaces that helps to identify what is host capable of and if is possible to use certain tools with it. For those cases we defined interfaces for each workflow. `IWorkfileHost` interface add requirement to implement workfiles related methods which makes host usable in combination with Workfiles tool. `ILoadHost` interface add requirements to be able load, update, switch or remove referenced representations which should add support to use Loader and Scene Inventory tools. `INewPublisher` interface is required to be able use host with new OpenPype publish workflow. This is what must or can be implemented to allow certain functionality. `HostBase` will have more responsibility which will be taken from global variables in future. This process won't happen at once, but will be slow to keep backwards compatibility for some time. + +#### Example +```python +from openpype.host import HostBase, IWorkfileHost, ILoadHost + + +class MayaHost(HostBase, IWorkfileHost, ILoadHost): + def open_workfile(self, filepath): + ... + + def save_current_workfile(self, filepath=None): + ... + + def get_current_workfile(self): + ... + ... +``` + +### Install integration +We have prepared a host class, now where and how to initialize it's object? This part is DCC specific. In DCCs like Maya with embedded python and Qt we use advantage of being able to initialize object of the class directly in DCC process on start, the same happens in Nuke, Hiero and Houdini. In DCCs like Photoshop or Harmony there is launched OpenPype (python) process next to it which handles host initialization and communication with the DCC process (e.g. using sockects). Created object of host must be installed and registered to global scope of OpenPype. Which means that at this moment one process can handle only one host at a time. + +#### Install example (Maya startup file) +```python +from openpype.pipeline import install_host +from openpype.hosts.maya.api import MayaHost + + +host = MayaHost() +install_host(host) +``` + +Function `install_host` cares about installing global plugins, callbacks and register host. Host registration means that the object is kept in memory and is accessible using `get_registered_host()`. + +### Using UI tools +Most of functionality in DCCs is provided to artists by using UI tools. We're trying to keep UIs consistent so we use same set of tools in each host, all or most of them are Qt based. There is a `HostToolsHelper` in `openpype/tools/utils/host_tools.py` which unify showing of default tools, they can be showed almost at any point. Some of them are validating if host is capable of using them (Workfiles, Loader and Scene Inventory) which is related to [pipeline workflows](#pipeline-workflows). `HostToolsHelper` provides API to show tools but host integration must care about giving artists ability to show them. Most of DCCs have some extendable menu bar where is possible to add custom actions, which is preferred approach how to give ability to show the tools. diff --git a/website/docs/dev_publishing.md b/website/docs/dev_publishing.md new file mode 100644 index 0000000000..135f6cd985 --- /dev/null +++ b/website/docs/dev_publishing.md @@ -0,0 +1,599 @@ +--- +id: dev_publishing +title: Publishing +sidebar_label: Publishing +toc_max_heading_level: 4 +--- + +Publishing workflow consists of 2 parts: +- Creating - Mark what will be published and how. +- Publishing - Use data from Creating to go through the pyblish process. + +OpenPype is using [pyblish](https://pyblish.com/) for the publishing process. OpenPype extends and modifies its few functions a bit, mainly for reports and UI purposes. The main differences are that OpenPype's publish UI allows to enable/disable instances or plugins during Creating part instead of in the publishing part and has limited plugin actions only for failed validation plugins. + +## **Creating** + +Concept of Creating does not have to "create" anything yet, but prepare and store metadata about an "instance" (becomes a subset after the publish process). Created instance always has `family` which defines what kind of data will be published, the best example is `workfile` family. Storing of metadata is host specific and may be even a Creator plugin specific. Most hosts are storing metadata into a workfile (Maya scene, Nuke script, etc.) to an item or a node the same way as regular Pyblish instances, so consistency of host implementation is kept, but some features may require a different approach that is the reason why it is creator plugin responsibility. Storing the metadata to the workfile persists values, so the artist does not have to create and set what should be published and how over and over. + +### Created instance + +Objected representation of created instance metadata defined by class **CreatedInstance**. Has access to **CreateContext** and **BaseCreator** that initialized the object. Is a dictionary-like object with few immutable keys (marked with start `*` in table). The immutable keys are set by the creator plugin or create context on initialization and their values can't change. Instance can have more arbitrary data, for example ids of nodes in scene but keep in mind that some keys are reserved. + +| Key | Type | Description | +|---|---|---| +| *id | str | Identifier of metadata type. ATM constant **"pyblish.avalon.instance"** | +| *instance_id | str | Unique ID of instance. Set automatically on instance creation using `str(uuid.uuid4())` | +| *family | str | Instance's family representing type defined by creator plugin. | +| *creator_identifier | str | Identifier of creator that collected/created the instance. | +| *creator_attributes | dict | Dictionary of attributes that are defined by the creator plugin (`get_instance_attr_defs`). | +| *publish_attributes | dict | Dictionary of attributes that are defined by publish plugins. | +| variant | str | Variant is entered by the artist on creation and may affect **subset**. | +| subset | str | Name of instance. This name will be used as a subset name during publishing. Can be changed on context change or variant change. | +| active | bool | Is the instance active and will be published or not. | +| asset | str | Name of asset in which context was created. | +| task | str | Name of task in which context was created. Can be set to `None`. | + +:::note +Task should not be required until the subset name template expects it. +::: + +object of **CreatedInstance** has method **data_to_store** which returns a dictionary that can be parsed to a json string. This method will return all data related to the instance so it can be re-created using `CreatedInstance.from_existing(data)`. + +#### *Create context* {#category-doc-link} + +Controller and wrapper around Creating is `CreateContext` which cares about loading of plugins needed for Creating. And validates required functions in host implementation. + +Context discovers creator and publish plugins. Trigger collections of existing instances on creators and trigger Creating itself. Also it keeps in mind instance objects by their ids. + +Creator plugins can call **creator_adds_instance** or **creator_removed_instance** to add/remove instances but these methods are not meant to be called directly out of the creator. The reason is that it is the creator's responsibility to remove metadata or decide if it should remove the instance. + +During reset are re-cached Creator plugins, re-collected instances, refreshed host context and more. Object of `CreateContext` supply shared data during the reset. They can be used by creators to share same data needed during collection phase or during creation for autocreators. + +#### Required functions in host implementation +It is recommended to use `HostBase` class (`from openpype.host import HostBase`) as base for host implementation with combination of `IPublishHost` interface (`from openpype.host import IPublishHost`). These abstract classes should guide you to fill missing attributes and methods. + +To sum them and in case host implementation is inheriting `HostBase` the implementation **must** implement **get_context_data** and **update_context_data**. These two functions are needed to store metadata that are not related to any instance but are needed for Creating and publishing process. Right now only data about enabled/disabled optional publish plugins is stored there. When data is not stored and loaded properly, reset of publishing will cause that they will be set to default value. Context data also parsed to json string similarly as instance data. + +There are also few optional functions. For UI purposes it is possible to implement **get_context_title** which can return a string shown in UI as a title. Output string may contain html tags. It is recommended to return context path (it will be created function this purposes) in this order `"{project name}/{asset hierarchy}/{asset name}/{task name}"` (this is default implementation in `HostBase`). + +Another optional function is **get_current_context**. This function is handy in hosts where it is possible to open multiple workfiles in one process so using global context variables is not relevant because artists can switch between opened workfiles without being acknowledged. When a function is not implemented or won't return the right keys the global context is used. +```json +# Expected keys in output +{ + "project_name": "MyProject", + "asset_name": "sq01_sh0010", + "task_name": "Modeling" +} +``` + +### Create plugin +Main responsibility of create plugin is to create, update, collect and remove instance metadata and propagate changes to create context. Has access to **CreateContext** (`self.create_context`) that discovered the plugin so has also access to other creators and instances. Create plugins have a lot of responsibility so it is recommended to implement common code per host. + +#### *BaseCreator* +Base implementation of creator plugin. It is not recommended to use this class as base for production plugins but rather use one of **HiddenCreator**, **AutoCreator** and **Creator** variants. + +**Access to shared data** +Functions to work with "Collection shared data" can be used during reset phase of `CreateContext`. Creators can cache there data that are common for them. For example list of nodes in scene. Methods are implemented on `CreateContext` but their usage is primarily for Create plugins as nothing else should use it. Each creator can access `collection_shared_data` attribute which is a dictionary where shared data can be stored. + +**Abstractions** +- **`family`** (class attr) - Tells what kind of instance will be created. +```python +class WorkfileCreator(Creator): + family = "workfile" +``` + +- **`collect_instances`** (method) - Collect already existing instances from the workfile and add them to create context. This method is called on initialization or reset of **CreateContext**. Each creator is responsible to find its instance metadata, convert them to **CreatedInstance** object and add them to create context (`self._add_instance_to_context(instnace_obj)`). +```python +def collect_instances(self): + # Using 'pipeline.list_instances' is just example how to get existing instances from scene + # - getting existing instances is different per host implementation + for instance_data in pipeline.list_instances(): + # Process only instances that were created by this creator + creator_id = instance_data.get("creator_identifier") + if creator_id == self.identifier: + # Create instance object from existing data + instance = CreatedInstance.from_existing( + instance_data, self + ) + # Add instance to create context + self._add_instance_to_context(instance) +``` + +- **`create`** (method) - Create a new object of **CreatedInstance** store its metadata to the workfile and add the instance into the created context. Failed Creating should raise **CreatorError** if an error happens that artists can fix or give them some useful information. Triggers and implementation differs for **Creator**, **HiddenCreator** and **AutoCreator**. + +- **`update_instances`** (method) - Update data of instances. Receives tuple with **instance** and **changes**. +```python +def update_instances(self, update_list): + # Loop over changed instances + for instance, changes in update_list: + # Example possible usage of 'changes' to use different node on change + # of node id in instance data (MADE UP) + node = None + if "node_id" in changes: + old_value, new_value = changes["node_id"] + if new_value is not None: + node = pipeline.get_node_by_id(new_value) + + if node is None: + node = pipeline.get_node_by_instance_id(instance.id) + # Get node in scene that represents the instance + # Imprind data to a node + pipeline.imprint(node, instance.data_to_store()) + + +# Most implementations will probably ignore 'changes' completely +def update_instances(self, update_list): + for instance, _ in update_list: + # Get node from scene + node = pipeline.get_node_by_instance_id(instance.id) + # Imprint data to node + pipeline.imprint(node, instance.data_to_store()) +``` + +- **`remove_instances`** (method) - Remove instance metadata from workfile and from create context. +```python +# Possible way how to remove instance +def remove_instances(self, instances): + for instance in instances: + # Remove instance metadata from workflle + pipeline.remove_instance(instance.id) + # Remove instance from create context + self._remove_instance_from_context(instance) + + +# Default implementation of `AutoCreator` +def remove_instances(self, instances): + pass +``` + +:::note +When host implementation use universal way how to store and load instances you should implement host specific creator plugin base class with implemented **collect_instances**, **update_instances** and **remove_instances**. +::: + +**Optional implementations** + +- **`enabled`** (attr) - Boolean if the creator plugin is enabled and used. +- **`identifier`** (class attr) - Consistent unique string identifier of the creator plugin. Is used to identify source plugin of existing instances. There can't be 2 creator plugins with the same identifier. Default implementation returns `family` attribute. +```python +class RenderLayerCreator(Creator): + family = "render" + identifier = "render_layer" + + +class RenderPassCreator(Creator): + family = "render" + identifier = "render_pass" +``` + +- **`label`** (attr) - String label of creator plugin which will show up in UI, `identifier` is used when not set. It should be possible to use html tags. +```python +class RenderLayerCreator(Creator): + label = "Render Layer" +``` + +- **`get_icon`** (attr) - Icon of creator and its instances. Value can be a path to an image file, full name of qtawesome icon, `QPixmap` or `QIcon`. For complex cases or cases when `Qt` objects are returned it is recommended to override `get_icon` method and handle the logic or import `Qt` inside the method to not break headless usage of creator plugin. For list of qtawesome icons check qtawesome github repository (look for the used version in pyproject.toml). Default implementation return **icon** attribute. +- **`icon`** (method) - Attribute for default implementation of **get_icon**. +```python +class RenderLayerCreator(Creator): + # Use font awesome 5 icon + icon = "fa5.building" +``` + +- **`get_instance_attr_defs`** (method) - Attribute definitions of instance. Creator can define attribute values with default values for each instance. These attributes may affect how instances will be instance processed during publishing. Attribute defiitions can be used from `openpype.lib.attribute_definitions`. Attribute definitions define basic types of values for different cases e.g. boolean, number, string, enumerator, etc. Default implementation returns **instance_attr_defs**. +- **`instance_attr_defs`** (attr) - Attribute for default implementation of **get_instance_attr_defs**. + +```python +from openpype.lib import attribute_definitions + + +class RenderLayerCreator(Creator): + def get_instance_attr_defs(self): + # Return empty list if '_allow_farm_render' is not enabled (can be set during initialization) + if not self._allow_farm_render: + return [] + # Give artist option to change if should be rendered on farm or locally + return [ + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ] +``` + +- **`get_subset_name`** (method) - Calculate subset name based on passed data. Data can be extended using the `get_dynamic_data` method. Default implementation is using `get_subset_name` from `openpype.lib` which is recommended. + +- **`get_dynamic_data`** (method) - Can be used to extend data for subset templates which may be required in some cases. + +Methods are used before instance creation and on instance subset name update. Update may require to have access to existing instance because dynamic data should be filled from there. Because of that is instance passed to `get_subset_name` and `get_dynamic_data` so the creator can handle that cases. + +This is one example where subset name template may contain `"{layer}"` which is filled during creation because the value is taken from selection. In that case `get_dynamic_data` returns value for `"layer"` -> `"{layer}"` so it can be filled in creation. But when subset name of already existing instance is updated it should return already existing value. Note: Creator must make sure the value is available on instance. + +```python +from openpype.lib import prepare_template_data +from my_host import get_selected_layer + + +class SomeCreator(Creator): + def get_dynamic_data( + self, variant, task_name, asset_doc, project_name, host_name, instance + ): + # Before instance is created return unfilled key + # - the key will be filled during creation + if instance is None: + return {"layer": "{layer}"} + # Take value from existing instance + # - creator must know where to look for the value + return {"layer": instance.data["layer"]} + + def create(self, subset_name, instance_data, pre_create_data): + # Fill the layer name in + layer = get_selected_layer() + layer_name = layer["name"] + layer_fill_data = prepare_template_data({"layer": layer_name}) + subset_name = subset_name.format(**layer_fill_data) + instance_data["layer"] = layer_name + ... +``` + + +#### *HiddenCreator* +Creator which is not showed in UI so artist can't trigger it directly but is available for other creators. This creator is primarily meant for cases when creation should create different types of instances. For example during editorial publishing where input is single edl file but should create 2 or more kind of instances each with different family, attributes and abilities. Arguments for creation were limited to `instance_data` and `source_data`. Data of `instance_data` should follow what is sent to other creators and `source_data` can be used to send custom data defined by main creator. It is expected that `HiddenCreator` has specific main or "parent" creator. + +```python +def create(self, instance_data, source_data): + variant = instance_data["variant"] + task_name = instance_data["task"] + asset_name = instance_data["asset"] + asset_doc = get_asset_by_name(self.project_name, asset_name) + self.get_subset_name( + variant, task_name, asset_doc, self.project_name, self.host_name) +``` + + +#### *AutoCreator* +Creator that is triggered on reset of create context. Can be used for families that are expected to be created automatically without artist interaction (e.g. **workfile**). Method `create` is triggered after collecting all creators. + +:::important +**AutoCreator** has implemented **remove_instances** to do nothing as removing of auto created instances would lead to creating new instance immediately or on refresh. +::: + +```python +def __init__( + self, create_context, system_settings, project_settings, *args, **kwargs +): + super(MyCreator, self).__init__( + create_context, system_settings, project_settings, *args, **kwargs + ) + # Get variant value from settings + variant_name = ( + project_settings["my_host"][self.identifier]["variant"] + ).strip() + if not variant_name: + variant_name = "Main" + self._variant_name = variant_name + +# Create does not expect any arguments +def create(self): + # Look for existing instance in create context + existing_instance = None + for instance in self.create_context.instances: + if instance.creator_identifier == self.identifier: + existing_instance = instance + break + + # Collect current context information + # - variant can be filled from settings + variant = self._variant_name + # Only place where we can look for current context + project_name = self.project_name + asset_name = legacy_io.Session["AVALON_ASSET"] + task_name = legacy_io.Session["AVALON_TASK"] + host_name = legacy_io.Session["AVALON_APP"] + + # Create new instance if does not exist yet + if existing_instance is None: + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + data = { + "asset": asset_name, + "task": task_name, + "variant": variant + } + data.update(self.get_dynamic_data( + variant, task_name, asset_doc, project_name, host_name + )) + + new_instance = CreatedInstance( + self.family, subset_name, data, self + ) + self._add_instance_to_context(new_instance) + + # Update instance context if is not the same + elif ( + existing_instance["asset"] != asset_name + or existing_instance["task"] != task_name + ): + asset_doc = get_asset_by_name(project_name, asset_name) + subset_name = self.get_subset_name( + variant, task_name, asset_doc, project_name, host_name + ) + existing_instance["asset"] = asset_name + existing_instance["task"] = task_name +``` + +#### *Creator* +Implementation of creator plugin that is triggered manually by the artist in UI (or by code). Has extended options for UI purposes than **AutoCreator** and **create** method expect more arguments. + +**Optional implementations** +- **`create_allow_context_change`** (class attr) - Allow to set context in UI before Creating. Some creators may not allow it or their logic would not use the context selection (e.g. bulk creators). Is set to `True` but default. +```python +class BulkRenderCreator(Creator): + create_allow_context_change = False +``` +- **`get_default_variants`** (method) - Returns list of default variants that are listed in create dialog for user. Returns **default_variants** attribute by default. +- **`default_variants`** (attr) - Attribute for default implementation of **get_default_variants**. + +- **`get_default_variant`** (method) - Returns default variant that is prefilled in UI (value does not have to be in default variants). By default returns **default_variant** attribute. If returns `None` then UI logic will take first item from **get_default_variants** if there is any otherwise **"Main"** is used. +- **`default_variant`** (attr) - Attribute for default implementation of **get_default_variant**. + +- **`get_description`** (method) - Returns a short string description of the creator. Returns **description** attribute by default. +- **`description`** (attr) - Attribute for default implementation of **get_description**. + +- **`get_detailed_description`** (method) - Returns detailed string description of creator. Can contain markdown. Returns **detailed_description** attribute by default. +- **`detailed_description`** (attr) - Attribute for default implementation of **get_detailed_description**. + +- **`get_pre_create_attr_defs`** (method) - Similar to **get_instance_attr_defs** returns attribute definitions but they are filled before creation. When creation is called from UI the values are passed to **create** method. Returns **pre_create_attr_defs** attribute by default. +- **`pre_create_attr_defs`** (attr) - Attribute for default implementation of **get_pre_create_attr_defs**. + +```python +from openpype.lib import attribute_definitions +from openpype.pipeline.create import Creator + + +class CreateRender(Creator): + family = "render" + label = "Render" + icon = "fa.eye" + description = "Render scene viewport" + + def __init__( + self, context, system_settings, project_settings, *args, **kwargs + ): + super(CreateRender, self).__init__( + context, system_settings, project_settings, *args, **kwargs + ) + plugin_settings = ( + project_settings["my_host"]["create"][self.__class__.__name__] + ) + # Get information if studio has enabled farm publishing + self._allow_farm_render = plugin_settings["allow_farm_render"] + # Get default variants from settings + self.default_variants = plugin_settings["variants"] + + def get_instance_attr_defs(self): + # Return empty list if '_allow_farm_render' is not enabled (can be set during initialization) + if not self._allow_farm_render: + return [] + # Give artist option to change if should be rendered on farm or locally + return [ + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ] + + def get_pre_create_attr_defs(self): + # Give user option to use selection or not + attrs = [ + attribute_definitions.BoolDef( + "use_selection", + default=False, + label="Use selection" + ) + ] + if self._allow_farm_render: + # Set to render on farm in creator dialog + # - this value is not automatically passed to instance attributes + # creator must do that during creation + attrs.append( + attribute_definitions.BoolDef( + "render_farm", + default=False, + label="Render on Farm" + ) + ) + return attrs + + def create(self, subset_name, instance_data, pre_create_data): + # ARGS: + # - 'subset_name' - precalculated subset name + # - 'instance_data' - context data + # - 'asset' - asset name + # - 'task' - task name + # - 'variant' - variant + # - 'family' - instnace family + + # Check if should use selection or not + if pre_create_data.get("use_selection"): + items = pipeline.get_selection() + else: + items = [pipeline.create_write()] + + # Validations related to selection + if len(items) > 1: + raise CreatorError("Please select only single item at time.") + + elif not items: + raise CreatorError("Nothing to create. Select at least one item.") + + # Create instence object + new_instance = CreatedInstance(self.family, subset_name, data, self) + # Pass value from pre create attribute to instance + # - use them only when pre create date contain the data + if "render_farm" in pre_create_data: + use_farm = pre_create_data["render_farm"] + new_instance.creator_attributes["render_farm"] = use_farm + + # Store metadata to workfile + pipeline.imprint(new_instance.id, new_instance.data_to_store()) + + # Add instance to context + self._add_instance_to_context(new_instance) +``` + +## **Publish** +### Exceptions +OpenPype define few specific exceptions that should be used in publish plugins. + +#### *Validation exception* +Validation plugins should raise `PublishValidationError` to show to an artist what's wrong and give him actions to fix it. The exception says that errors in the plugin can be fixed by the artist himself (with or without action on plugin). Any other errors will stop publishing immediately. The exception `PublishValidationError` raised after validation order has the same effect as any other exception. + +Exception `PublishValidationError` expects 4 arguments: +- **message** Which is not used in UI but for headless publishing. +- **title** Short description of error (2-5 words). Title is used for grouping of exceptions per plugin. +- **description** Detailed description of the issue where markdown and html can be used. +- **detail** Is optional to give even more detailed information for advanced users. At this moment the detail is shown directly under description but it is in plan to have detail in a collapsible widget. + +Extended version is `PublishXmlValidationError` which uses xml files with stored descriptions. This helps to avoid having huge markdown texts inside code. The exception has 4 arguments: +- **plugin** The plugin object which raises the exception to find its related xml file. +- **message** Exception message for publishing without UI or different pyblish UI. +- **key** Optional argument says which error from xml is used as a validation plugin may raise error with different messages based on the current errors. Default is **"main"**. +- **formatting_data** Optional dictionary to format data in the error. This is used to fill detailed description with data from the publishing so artist can get more precise information. + +**Where and how to create xml file** + +Xml files for `PublishXmlValidationError` must be located in **./help** subfolder next to the plugin and the filename must match the filename of the plugin. +``` +# File location related to plugin file +└ publish + ├ help + │ ├ validate_scene.xml + │ └ ... + ├ validate_scene.py + └ ... +``` + +Xml file content has **<root>** node which may contain any amount of **<error>** nodes, but each of them must have **id** attribute with unique value. That is then used for **key**. Each error must have **<title>** and **<description>** and **<detail>**. Text content may contain python formatting keys that can be filled when an exception is raised. +```xml + + + + Subset context + ## Invalid subset context + +Context of the given subset doesn't match your current scene. + +### How to repair? + +You can fix this with the "Repair" button on the right. This will use '{expected_asset}' asset name and overwrite '{found_asset}' asset name in scene metadata. + +After that restart publishing with Reload button. + + +### How could this happen? + +The subset was created in different scene with different context +or the scene file was copy pasted from different context. + + + +``` + +#### *Known errors* +When there is a known error that can't be fixed by the user (e.g. can't connect to deadline service, etc.) `KnownPublishError` should be raised. The only difference is that its message is shown in UI to the artist otherwise a neutral message without context is shown. + +### Plugin extension +Publish plugins can be extended by additional logic when inheriting from `OpenPypePyblishPluginMixin` which can be used as mixin (additional inheritance of class). Publish plugins that inherit from this mixin can define attributes that will be shown in **CreatedInstance**. One of the most important usages is to be able turn on/off optional plugins. + +Attributes are defined by the return value of `get_attribute_defs` method. Attribute definitions are for families defined in plugin's `families` attribute if it's instance plugin or for whole context if it's context plugin. To convert existing values (or to remove legacy values) can be re-implemented `convert_attribute_values`. Default implementation just converts the values to right types. + +:::Important +Values of publish attributes from created instance are never removed automatically so implementing this method is the best way to remove legacy data or convert them to new data structure. +::: + +Possible attribute definitions can be found in `openpype/pipeline/lib/attribute_definitions.py`. + +
+Example plugin +

+ +```python +import pyblish.api +from openpype.lib import attribute_definitions +from openpype.pipeline import OpenPypePyblishPluginMixin + + +# Example context plugin +class MyExtendedPlugin( + pyblish.api.ContextPlugin, OpenPypePyblishPluginMixin +): + optional = True + active = True + + @classmethod + def get_attribute_defs(cls): + return [ + attribute_definitions.BoolDef( + # Key under which it will be stored + "process", + # Use 'active' as default value + default=cls.active, + # Use plugin label as label for attribute + label=cls.label + ) + ] + + def process_plugin(self, context): + # First check if plugin is optional + if not self.optional: + return True + + # Attribute values are stored by class names + # - for those purposes was implemented 'get_attr_values_from_data' + # to help with accessing it + attribute_values = self.get_attr_values_from_data(context.data) + # Get 'process' key + process_value = attribute_values.get("process") + if process_value is None or process_value: + return True + return False + + def process(self, context): + if not self.process_plugin(context): + return + # Do plugin logic + ... +``` +

+
+ +## **UI examples** +### Main publish window +Main window of publisher shows instances and their values, collected by creators. + +**Card view** +![Publisher UI - Card view](assets/publisher_card_view.png) +**List view** +![Publisher UI - List view](assets/publisher_list_view.png) + +#### *Instances views* +List of instances always contains an `Options` item which is used to show attributes of context plugins. Values from the item are saved and loaded using [host implementation](#required-functions-in-host-implementation) **get_context_data** and **update_context_data**. Instances are grouped by family and can be shown in card view (single selection) or list view (multi selection). + +Instance view has at the bottom 3 buttons. Plus sign opens [create dialog](#create-dialog), bin removes selected instances and stripes swap card and list view. + +#### *Context options* +It is possible to change variant or asset and task context of instances at the top part but all changes there must be confirmed. Confirmation will trigger recalculation of subset names and all new data are stored to instances. + +#### *Create attributes* +Instance attributes display all created attributes of all selected instances. All attributes that have the same definition are grouped into one input and are visually indicated if values are not the same for selected instances. In most cases have **< Multiselection >** placeholder. + +#### *Publish attributes* +Publish attributes work the same way as create attributes but the source of attribute definitions are pyblish plugins. Attributes are filtered based on families of selected instances and families defined in the pyblish plugin. + +### Create dialog +![Publisher UI - Create dialog](assets/publisher_create_dialog.png) +Create dialog is used by artist to create new instances in a context. The context selection can be enabled/disabled by changing `create_allow_context_change` on [creator plugin](#creator). In the middle part the artist selects what will be created and what variant it is. On the right side is information about the selected creator and its pre-create attributes. There is also a question mark button which extends the window and displays more detailed information about the creator. \ No newline at end of file diff --git a/website/docs/dev_requirements.md b/website/docs/dev_requirements.md index 6c87054ba0..f8b796d997 100644 --- a/website/docs/dev_requirements.md +++ b/website/docs/dev_requirements.md @@ -14,7 +14,7 @@ The main things you will need to run and build pype are: - **Terminal** in your OS - PowerShell 5.0+ (Windows) - Bash (Linux) -- [**Python 3.7.8**](#python) or higher +- [**Python 3.9.x**](#python) - [**MongoDB**](#database) @@ -39,13 +39,13 @@ Pype needs site-wide installation of **MongoDB**. It should be installed on reliable server, that all workstations (and possibly render nodes) can connect. This server holds **Avalon** database that is at the core of everything -Depending on project size and number of artists working connection speed and +Depending on project size and number of artists working, connection speed and latency influence performance experienced by artists. If remote working is required, this mongodb server must be accessible from Internet or cloud solution can be used. Reasonable backup plan or high availability options are recommended. *Replication* feature of MongoDB should be considered. This is beyond the scope of this documentation, please refer to [MongoDB Documentation](https://docs.mongodb.com/manual/replication/). -Pype can run it's own instance of mongodb, mostly for testing and development purposes. +Pype can run its own instance of mongodb, mostly for testing and development purposes. For that it uses locally installed MongoDB. Download it from [mognoDB website](https://www.mongodb.com/download-center/community), install it and @@ -55,13 +55,14 @@ To run mongoDB on server, use your server distribution tools to set it up (on Li ## Python -**Python 3.7.8** is the recommended version to use (as per [VFX platform CY2021](https://vfxplatform.com/)). +**Python 3.9.x** is the recommended version to use (as per [VFX platform CY2022](https://vfxplatform.com/)). +**Note**: We do not support 3.9.0 because of [this bug](https://github.com/python/cpython/pull/22670). Please, use higher versions of 3.9.x. -If you're planning to run openPYPE on workstations from built executables (highly recommended), you will only need python for building and development, however, if you'd like to run from source centrally, every user will need python installed. +If you're planning to run openPYPE on workstations from built executables (highly recommended), you will only need python for building and development, however, if you'd like to run from source centrally, every user will need python installed. ## Hardware -openPYPE should be installed on all workstations that need to use it, the same as any other application. +openPYPE should be installed on all workstations that need to use it, the same as any other application. There are no specific requirements for the hardware. If the workstation can run the major DCCs, it most probably can run openPYPE. @@ -69,7 +70,7 @@ the major DCCs, it most probably can run openPYPE. Installed, it takes around 400MB of space, depending on the platform -For well functioning ftrack event server, we recommend a linux virtual server with Ubuntu or CentOS. CPU and RAM allocation needs differ based on the studio size, but a 2GB of ram, with a dual core CPU and around 4GB of storage should suffice +For a well functioning ftrack event server, we recommend a linux virtual server with Ubuntu or CentOS. CPU and RAM allocation needs differ based on the studio size, but a 2GB of ram, with a dual core CPU and around 4GB of storage should suffice ## Deployment @@ -87,9 +88,11 @@ This can also be hosted on the cloud in fully distributed deployments. - [**Avalon**](https://github.com/getavalon) - [**Pyblish**](https://github.com/pyblish) - [**OpenTimelineIO**](https://github.com/PixarAnimationStudios/OpenTimelineIO) -- [**OpenImageIO**](https://github.com/OpenImageIO/oiio) +- [**OpenImageIO**](https://github.com/OpenImageIO/oiio) [^centos7] - [**FFmpeg**](https://github.com/FFmpeg/FFmpeg) +[^centos7]: On Centos 7 you need to install additional libraries to support OIIO there - mainly boost +and libraw (`sudo yum install boost-1.53.0` and `sudo yum install LibRaw`) ### Python modules we use and their licenses diff --git a/website/docs/dev_settings.md b/website/docs/dev_settings.md new file mode 100644 index 0000000000..94590345e8 --- /dev/null +++ b/website/docs/dev_settings.md @@ -0,0 +1,899 @@ +--- +id: dev_settings +title: Settings +sidebar_label: Settings +--- + +Settings give the ability to change how OpenPype behaves in certain situations. Settings are split into 3 categories **system settings**, **project anatomy** and **project settings**. Project anatomy and project settings are grouped into a single category but there is a technical difference (explained later). Only difference in system and project settings is that system settings can't be technically handled on a project level or their values must be available no matter in which project the values are received. Settings have headless entities or settings UI. + +There is one more category **local settings** but they don't have ability to be changed or defined easily. Local settings can change how settings work per machine, can affect both system and project settings but they're hardcoded for predefined values at this moment. + +## Settings schemas +System and project settings are defined by settings schemas. Schema defines the structure of output value, what value types output will contain, how settings are stored and how its UI input will look. + +## Settings values +Output of settings is a json serializable value. There are 3 possible types of value **default values**, **studio overrides** and **project overrides**. Default values must be always available for all settings schemas, their values are stored to code. Default values are what everyone who just installed OpenPype will use as default values. It is good practice to set example values but they should be actually relevant. + +Setting overrides is what makes settings a powerful tool. Overrides contain only a part of settings with additional metadata that describe which parts of settings values should be replaced from overrides values. Using overrides gives the ability to save only specific values and use default values for rest. It is super useful in project settings which have up to 2 levels of overrides. In project settings are used **default values** as base on which are applied **studio overrides** and then **project overrides**. In practice it is possible to save only studio overrides which affect all projects. Changes in studio overrides are then propagated to all projects without project overrides. But values can be locked on project level so studio overrides are not used. + +## Settings storage +As was mentioned default values are stored into repository files. Overrides are stored in the Mongo database. The value in mongo contain only overrides with metadata so their content on it's own is useless and must be used with combination of default values. System settings and project settings are stored into special collection. Single document represents one set of overrides with OpenPype version for which is stored. Settings are versioned and are loaded in specific order - current OpenPype version overrides or first lower available. If there are any overrides with the same or lower version then the first higher version is used. If there are any overrides then no overrides are applied. + +Project anatomy is stored into a project document thus is not versioned and its values are always overridden. Any changes in anatomy schema may have a drastic effect on production and OpenPype updates. + +## Settings schema items +As was mentioned schema items define output type of values, how they are stored and how they look in UI. +- schemas are (by default) defined by json files +- OpenPype core system settings schemas are stored in `~/openpype/settings/entities/schemas/system_schema/` and project settings in `~/openpype/settings/entities/schemas/projects_schema/` + - both contain `schema_main.json` which are entry points +- OpenPype modules/addons can define their settings schemas using `BaseModuleSettingsDef` in that case some functionality may be slightly modified +- single schema item is represented by dictionary (object) in json which has `"type"` key. + - **type** is only common key which is required for all schema items +- each item may have "input modifiers" (other keys in dictionary) and they may be required or optional based on the type +- there are special keys across all items + - `"is_file"` - this key is used when defaults values are stored in the file. Its value matches the filename where values are stored + - key is validated, must be unique in hierarchy otherwise it won't be possible to store default values + - make sense to fill it only if it's value if `true` + + - `"is_group"` - define that all values under a key in settings hierarchy will be overridden if any value is modified + - this key is not allowed for all inputs as they may not have technical ability to handle it + - key is validated, must be unique in hierarchy and is automatically filled on last possible item if is not defined in schemas + - make sense to fill it only if it's value if `true` +- all entities can have set `"tooltip"` key with description which will be shown in UI on hover + +### Inner schema +Settings schemas are big json files which would become unmanageable if they were in a single file. To be able to split them into multiple files to help organize them special types `schema` and `template` were added. Both types are related to a different file by filename. If a json file contains a dictionary it is considered as `schema` if it contains a list it is considered as a `template`. + +#### schema +Schema item is replaced by content of entered schema name. It is recommended that the schema file is used only once in settings hierarchy. Templates are meant for reusing. +- schema must have `"name"` key which is name of schema that should be used + +```javascript +{ + "type": "schema", + "name": "my_schema_name" +} +``` + +#### template +Templates are almost the same as schema items but can contain one or more items which can be formatted with additional data or some keys can be skipped if needed. Templates are meant for reusing the same schemas with ability to modify content. + +- legacy name is `schema_template` (still usable) +- template must have `"name"` key which is name of template file that should be used +- to fill formatting keys use `"template_data"` +- all items in template, except `__default_values__`, will replace `template` item in original schema +- template may contain other templates + +```javascript +// Example template json file content +[ + { + // Define default values for formatting values + // - gives ability to set the value but have default value + "__default_values__": { + "multipath_executables": true + } + }, { + "type": "raw-json", + "label": "{host_label} Environments", + "key": "{host_name}_environments" + }, { + "type": "path", + "key": "{host_name}_executables", + "label": "{host_label} - Full paths to executables", + "multiplatform": "{multipath_executables}", + "multipath": true + } +] +``` +```javascript +// Example usage of the template in schema +{ + "type": "dict", + "key": "template_examples", + "label": "Schema template examples", + "children": [ + { + "type": "template", + "name": "example_template", + "template_data": [ + { + "host_label": "Maya 2019", + "host_name": "maya_2019", + "multipath_executables": false + }, + { + "host_label": "Maya 2020", + "host_name": "maya_2020" + }, + { + "host_label": "Maya 2021", + "host_name": "maya_2021" + } + ] + } + ] +} +``` +```javascript +// The same schema defined without templates +{ + "type": "dict", + "key": "template_examples", + "label": "Schema template examples", + "children": [ + { + "type": "raw-json", + "label": "Maya 2019 Environments", + "key": "maya_2019_environments" + }, { + "type": "path", + "key": "maya_2019_executables", + "label": "Maya 2019 - Full paths to executables", + "multiplatform": false, + "multipath": true + }, { + "type": "raw-json", + "label": "Maya 2020 Environments", + "key": "maya_2020_environments" + }, { + "type": "path", + "key": "maya_2020_executables", + "label": "Maya 2020 - Full paths to executables", + "multiplatform": true, + "multipath": true + }, { + "type": "raw-json", + "label": "Maya 2021 Environments", + "key": "maya_2021_environments" + }, { + "type": "path", + "key": "maya_2021_executables", + "label": "Maya 2021 - Full paths to executables", + "multiplatform": true, + "multipath": true + } + ] +} +``` + +Template data can be used only to fill templates in values but not in keys. It is also possible to define default values for unfilled fields to do so one of the items in the list must be a dictionary with key "__default_values__"` and value as dictionary with default key: values (as in example above). +```javascript +{ + ... + // Allowed + "key": "{to_fill}" + ... + // Not allowed + "{to_fill}": "value" + ... +} +``` + +Because formatting values can be only string it is possible to use formatting values which are replaced with different types. +```javascript +// Template data +{ + "template_data": { + "executable_multiplatform": { + "type": "schema", + "name": "my_multiplatform_schema" + } + } +} +// Template content +{ + ... + // Allowed - value is replaced with dictionary + "multiplatform": "{executable_multiplatform}" + ... + // Not allowed - there is no way how it could be replaced + "multiplatform": "{executable_multiplatform}_enhanced_string" + ... +} +``` + +#### dynamic_schema +Dynamic schema item marks a place in settings schema where schemas defined by `BaseModuleSettingsDef` can be placed. +- example: +```javascript +{ + "type": "dynamic_schema", + "name": "project_settings/global" +} +``` +- `BaseModuleSettingsDef` with implemented `get_settings_schemas` can return a dictionary where key defines a dynamic schema name and value schemas that will be put there +- dynamic schemas work almost the same way as templates + - one item can be replaced by multiple items (or by 0 items) +- goal is to dynamically load settings of OpenPype modules without having their schemas or default values in core repository + - values of these schemas are saved using the `BaseModuleSettingsDef` methods +- we recommend to use `JsonFilesSettingsDef` which has full implementation of storing default values to json files + - requires only to implement method `get_settings_root_path` which should return path to root directory where settings schema can be found and default values will be saved + +### Basic Dictionary inputs +These inputs wraps another inputs into {key: value} relation + +#### dict +- this is dictionary type wrapping more inputs with keys defined in schema +- may be used as dynamic children (e.g. in [list](#list) or [dict-modifiable](#dict-modifiable)) + - in that case the only key modifier is `children` which is a list of its keys + - USAGE: e.g. List of dictionaries where each dictionary has the same structure. +- if is not used as dynamic children then must have defined `"key"` under which are it's values stored +- may be with or without `"label"` (only for GUI) + - `"label"` must be set to be able to mark item as group with `"is_group"` key set to True +- item with label can visually wrap its children + - this option is enabled by default to turn off set `"use_label_wrap"` to `False` + - label wrap is by default collapsible + - that can be set with key `"collapsible"` to `True`/`False` + - with key `"collapsed"` as `True`/`False` can be set that is collapsed when GUI is opened (Default: `False`) + - it is possible to add lighter background with `"highlight_content"` (Default: `False`) + - lighter background has limits of maximum applies after 3-4 nested highlighted items there is not much difference in the color + - output is dictionary `{the "key": children values}` +```javascript +// Example +{ + "key": "applications", + "type": "dict", + "label": "Applications", + "collapsible": true, + "highlight_content": true, + "is_group": true, + "is_file": true, + "children": [ + ...ITEMS... + ] +} + +// Without label +{ + "type": "dict", + "key": "global", + "children": [ + ...ITEMS... + ] +} + +// When used as widget +{ + "type": "list", + "key": "profiles", + "label": "Profiles", + "object_type": { + "type": "dict", + "children": [ + { + "key": "families", + "label": "Families", + "type": "list", + "object_type": "text" + }, { + "key": "hosts", + "label": "Hosts", + "type": "list", + "object_type": "text" + } + ... + ] + } +} +``` + +#### dict-roots +- entity can be used only in Project settings +- keys of dictionary are based on current project roots +- they are not updated "live" it is required to save root changes and then + modify values on this entity + # TODO do live updates +```javascript +{ + "type": "dict-roots", + "key": "roots", + "label": "Roots", + "object_type": { + "type": "path", + "multiplatform": true, + "multipath": false + } +} +``` + +#### dict-conditional +- is similar to `dict` but has always available one enum entity + - the enum entity has single selection and it's value define other children entities +- each value of enumerator have defined children that will be used + - there is no way how to have shared entities across multiple enum items +- value from enumerator is also stored next to other values + - to define the key under which will be enum value stored use `enum_key` + - `enum_key` must match key regex and any enum item can't have children with same key + - `enum_label` is label of the entity for UI purposes +- enum items are define with `enum_children` + - it's a list where each item represents single item for the enum + - all items in `enum_children` must have at least `key` key which represents value stored under `enum_key` + - enum items can define `label` for UI purposes + - most important part is that item can define `children` key where are definitions of it's children (`children` value works the same way as in `dict`) +- to set default value for `enum_key` set it with `enum_default` +- entity must have defined `"label"` if is not used as widget +- is set as group if any parent is not group (can't have children as group) +- may be with or without `"label"` (only for GUI) + - `"label"` must be set to be able to mark item as group with `"is_group"` key set to True +- item with label can visually wrap its children + - this option is enabled by default to turn off set `"use_label_wrap"` to `False` + - label wrap is by default collapsible + - that can be set with key `"collapsible"` to `True`/`False` + - with key `"collapsed"` as `True`/`False` can be set that is collapsed when GUI is opened (Default: `False`) + - it is possible to add lighter background with `"highlight_content"` (Default: `False`) + - lighter background has limits of maximum applies after 3-4 nested highlighted items there is not much difference in the color +- for UI purposes was added `enum_is_horizontal` which will make combobox appear next to children inputs instead of on top of them (Default: `False`) + - this has extended ability of `enum_on_right` which will move combobox to right side next to children widgets (Default: `False`) +- output is dictionary `{the "key": children values}` +- using this type as template item for list type can be used to create infinite hierarchies + +```javascript +// Example +{ + "type": "dict-conditional", + "key": "my_key", + "label": "My Key", + "enum_key": "type", + "enum_label": "label", + "enum_children": [ + // Each item must be a dictionary with 'key' + { + "key": "action", + "label": "Action", + "children": [ + { + "type": "text", + "key": "key", + "label": "Key" + }, + { + "type": "text", + "key": "label", + "label": "Label" + }, + { + "type": "text", + "key": "command", + "label": "Comand" + } + ] + }, + { + "key": "menu", + "label": "Menu", + "children": [ + { + "key": "children", + "label": "Children", + "type": "list", + "object_type": "text" + } + ] + }, + { + // Separator does not have children as "separator" value is enough + "key": "separator", + "label": "Separator" + } + ] +} +``` + +How output of the schema could look like on save: +```javascript +{ + "type": "separator" +} + +{ + "type": "action", + "key": "action_1", + "label": "Action 1", + "command": "run command -arg" +} + +{ + "type": "menu", + "children": [ + "child_1", + "child_2" + ] +} +``` + +### Inputs for setting any kind of value (`Pure` inputs) +- all inputs must have defined `"key"` if are not used as dynamic item + - they can also have defined `"label"` + +#### boolean +- simple checkbox, nothing more to set +```javascript +{ + "type": "boolean", + "key": "my_boolean_key", + "label": "Do you want to use Pype?" +} +``` + +#### number +- number input, can be used for both integer and float + - key `"decimal"` defines how many decimal places will be used, 0 is for integer input (Default: `0`) + - key `"minimum"` as minimum allowed number to enter (Default: `-99999`) + - key `"maxium"` as maximum allowed number to enter (Default: `99999`) +- key `"steps"` will change single step value of UI inputs (using arrows and wheel scroll) +- for UI it is possible to show slider to enable this option set `show_slider` to `true` +```javascript +{ + "type": "number", + "key": "fps", + "label": "Frame rate (FPS)" + "decimal": 2, + "minimum": 1, + "maximum": 300000 +} +``` + +```javascript +{ + "type": "number", + "key": "ratio", + "label": "Ratio" + "decimal": 3, + "minimum": 0, + "maximum": 1, + "show_slider": true +} +``` + +#### text +- simple text input + - key `"multiline"` allows to enter multiple lines of text (Default: `False`) + - key `"placeholder"` allows to show text inside input when is empty (Default: `None`) + +```javascript +{ + "type": "text", + "key": "deadline_pool", + "label": "Deadline pool" +} +``` + +#### path-input +- Do not use this input in schema please (use `path` instead) +- this input is implemented to add additional features to text input +- this is meant to be used in proxy input `path` + +#### raw-json +- a little bit enhanced text input for raw json +- can store dictionary (`{}`) or list (`[]`) but not both + - by default stores dictionary to change it to list set `is_list` to `True` +- has validations of json format +- output can be stored as string + - this is to allow any keys in dictionary + - set key `store_as_string` to `true` + - code using that setting must expected that value is string and use json module to convert it to python types + +```javascript +{ + "type": "raw-json", + "key": "profiles", + "label": "Extract Review profiles", + "is_list": true +} +``` + +#### enum +- enumeration of values that are predefined in schema +- multiselection can be allowed with setting key `"multiselection"` to `True` (Default: `False`) +- values are defined under value of key `"enum_items"` as list + - each item in list is simple dictionary where value is label and key is value which will be stored + - should be possible to enter single dictionary if order of items doesn't matter +- it is possible to set default selected value/s with `default` attribute + - it is recommended to use this option only in single selection mode + - at the end this option is used only when defying default settings value or in dynamic items + +```javascript +{ + "key": "tags", + "label": "Tags", + "type": "enum", + "multiselection": true, + "enum_items": [ + {"burnin": "Add burnins"}, + {"ftrackreview": "Add to Ftrack"}, + {"delete": "Delete output"}, + {"slate-frame": "Add slate frame"}, + {"no-handles": "Skip handle frames"} + ] +} +``` + +#### anatomy-templates-enum +- enumeration of all available anatomy template keys +- have only single selection mode +- it is possible to define default value `default` + - `"work"` is used if default value is not specified +- enum values are not updated on the fly it is required to save templates and + reset settings to recache values +```javascript +{ + "key": "host", + "label": "Host name", + "type": "anatomy-templates-enum", + "default": "publish" +} +``` + +#### hosts-enum +- enumeration of available hosts +- multiselection can be allowed with setting key `"multiselection"` to `True` (Default: `False`) +- it is possible to add empty value (represented with empty string) with setting `"use_empty_value"` to `True` (Default: `False`) +- it is possible to set `"custom_labels"` for host names where key `""` is empty value (Default: `{}`) +- to filter host names it is required to define `"hosts_filter"` which is list of host names that will be available + - do not pass empty string if `use_empty_value` is enabled + - ignoring host names would be more dangerous in some cases +```javascript +{ + "key": "host", + "label": "Host name", + "type": "hosts-enum", + "multiselection": false, + "use_empty_value": true, + "custom_labels": { + "": "N/A", + "nuke": "Nuke" + }, + "hosts_filter": [ + "nuke" + ] +} +``` + +#### apps-enum +- enumeration of available application and their variants from system settings + - applications without host name are excluded +- can be used only in project settings +- has only `multiselection` +- used only in project anatomy +```javascript +{ + "type": "apps-enum", + "key": "applications", + "label": "Applications" +} +``` + +#### tools-enum +- enumeration of available tools and their variants from system settings +- can be used only in project settings +- has only `multiselection` +- used only in project anatomy +```javascript +{ + "type": "tools-enum", + "key": "tools_env", + "label": "Tools" +} +``` + +#### task-types-enum +- enumeration of task types from current project +- enum values are not updated on the fly and modifications of task types on project require save and reset to be propagated to this enum +- has set `multiselection` to `True` but can be changed to `False` in schema + +#### deadline_url-enum +- deadline module specific enumerator using deadline system settings to fill it's values +- TODO: move this type to deadline module + +### Inputs for setting value using Pure inputs +- these inputs also have required `"key"` +- attribute `"label"` is required in few conditions + - when item is marked `as_group` or when `use_label_wrap` +- they use Pure inputs "as widgets" + +#### list +- output is list +- items can be added and removed +- items in list must be the same type +- to wrap item in collapsible widget with label on top set `use_label_wrap` to `True` + - when this is used `collapsible` and `collapsed` can be set (same as `dict` item does) +- type of items is defined with key `"object_type"` +- there are 2 possible ways how to set the type: + 1.) dictionary with item modifiers (`number` input has `minimum`, `maximum` and `decimals`) in that case item type must be set as value of `"type"` (example below) + 2.) item type name as string without modifiers (e.g. [text](#text)) + 3.) enhancement of 1.) there is also support of `template` type but be carefull about endless loop of templates + - goal of using `template` is to easily change same item definitions in multiple lists + +1.) with item modifiers +```javascript +{ + "type": "list", + "key": "exclude_ports", + "label": "Exclude ports", + "object_type": { + "type": "number", # number item type + "minimum": 1, # minimum modifier + "maximum": 65535 # maximum modifier + } +} +``` + +2.) without modifiers +```javascript +{ + "type": "list", + "key": "exclude_ports", + "label": "Exclude ports", + "object_type": "text" +} +``` + +3.) with template definition +```javascript +// Schema of list item where template is used +{ + "type": "list", + "key": "menu_items", + "label": "Menu Items", + "object_type": { + "type": "template", + "name": "template_object_example" + } +} + +// WARNING: +// In this example the template use itself inside which will work in `list` +// but may cause an issue in other entity types (e.g. `dict`). + +'template_object_example.json' : +[ + { + "type": "dict-conditional", + "use_label_wrap": true, + "collapsible": true, + "key": "menu_items", + "label": "Menu items", + "enum_key": "type", + "enum_label": "Type", + "enum_children": [ + { + "key": "action", + "label": "Action", + "children": [ + { + "type": "text", + "key": "key", + "label": "Key" + } + ] + }, { + "key": "menu", + "label": "Menu", + "children": [ + { + "key": "children", + "label": "Children", + "type": "list", + "object_type": { + "type": "template", + "name": "template_object_example" + } + } + ] + } + ] + } +] +``` + +#### dict-modifiable +- one of dictionary inputs, this is only used as value input +- items in this input can be removed and added same way as in `list` input +- value items in dictionary must be the same type +- required keys may be defined under `"required_keys"` + - required keys must be defined as a list (e.g. `["key_1"]`) and are moved to the top + - these keys can't be removed or edited (it is possible to edit label if item is collapsible) +- type of items is defined with key `"object_type"` + - there are 2 possible ways how to set the object type (Examples below): + 1. just a type name as string without modifiers (e.g. `"text"`) + 2. full types with modifiers as dictionary(`number` input has `minimum`, `maximum` and `decimals`) in that case item type must be set as value of `"type"` +- this input can be collapsible + - `"use_label_wrap"` must be set to `True` (Default behavior) + - that can be set with key `"collapsible"` as `True`/`False` (Default: `True`) + - with key `"collapsed"` as `True`/`False` can be set that is collapsed when GUI is opened (Default: `False`) + +1. **Object type** without modifiers +```javascript +{ + "type": "dict-modifiable", + "object_type": "text", + "is_group": true, + "key": "templates_mapping", + "label": "Muster - Templates mapping", + "is_file": true +} +``` + +2. **Object type** with item modifiers +```javascript +{ + "type": "dict-modifiable", + "object_type": { + "type": "number", + "minimum": 0, + "maximum": 300 + }, + "is_group": true, + "key": "templates_mapping", + "label": "Muster - Templates mapping", + "is_file": true +} +``` + +#### path +- input for paths, use `path-input` internally +- has 2 input modifiers `"multiplatform"` and `"multipath"` + - `"multiplatform"` - adds `"windows"`, `"linux"` and `"darwin"` path inputs (result is dictionary) + - `"multipath"` - it is possible to enter multiple paths + - if both are enabled result is dictionary with lists + +```javascript +{ + "type": "path", + "key": "ffmpeg_path", + "label": "FFmpeg path", + "multiplatform": true, + "multipath": true +} +``` + +#### list-strict +- input for strict number of items in list +- each child item can be different type with different possible modifiers +- it is possible to display them in horizontal or vertical layout + - key `"horizontal"` as `True`/`False` (Default: `True`) +- each child may have defined `"label"` which is shown next to input + - label does not reflect modifications or overrides (TODO) +- children item are defined under key `"object_types"` which is list of dictionaries + - key `"children"` is not used because is used for hierarchy validations in schema +- USAGE: For colors, transformations, etc. Custom number and different modifiers + give ability to define if color is HUE or RGB, 0-255, 0-1, 0-100 etc. + +```javascript +{ + "type": "list-strict", + "key": "color", + "label": "Color", + "object_types": [ + { + "label": "Red", + "type": "number", + "minimum": 0, + "maximum": 255, + "decimal": 0 + }, { + "label": "Green", + "type": "number", + "minimum": 0, + "maximum": 255, + "decimal": 0 + }, { + "label": "Blue", + "type": "number", + "minimum": 0, + "maximum": 255, + "decimal": 0 + }, { + "label": "Alpha", + "type": "number", + "minimum": 0, + "maximum": 1, + "decimal": 6 + } + ] +} +``` + +#### color +- pre implemented entity to store and load color values +- entity store and expect list of 4 integers in range 0-255 + - integers represents rgba [Red, Green, Blue, Alpha] +- has modifier `"use_alpha"` which can be `True`/`False` + - alpha is always `255` if set to `True` and alpha slider is not visible in UI + +```javascript +{ + "type": "color", + "key": "bg_color", + "label": "Background Color" +} +``` + +### Anatomy +Anatomy represents data stored on project document. Item cares about **Project Anatomy**. + +#### anatomy +- entity is just enhanced [dict](#dict) item +- anatomy has always all keys overridden with overrides + +### Noninteractive items +Items used only for UI purposes. + +#### label +- add label with note or explanations +- it is possible to use html tags inside the label +- set `work_wrap` to `true`/`false` if you want to enable word wrapping in UI (default: `false`) + +```javascript +{ + "type": "label", + "label": "RED LABEL: Normal label" +} +``` + +#### separator +- legacy name is `splitter` (still usable) +- visual separator of items (more divider than separator) + +```javascript +{ + "type": "separator" +} +``` + +### Proxy wrappers +- should wrap multiple inputs only visually +- these do not have `"key"` key and do not allow to have `"is_file"` or `"is_group"` modifiers enabled +- can't be used as a widget (first item in e.g. `list`, `dict-modifiable`, etc.) + +#### form +- wraps inputs into form look layout +- should be used only for Pure inputs + +```javascript +{ + "type": "dict-form", + "children": [ + { + "type": "text", + "key": "deadline_department", + "label": "Deadline apartment" + }, { + "type": "number", + "key": "deadline_priority", + "label": "Deadline priority" + }, { + ... + } + ] +} +``` + + +#### collapsible-wrap +- wraps inputs into collapsible widget + - looks like `dict` but does not hold `"key"` +- should be used only for Pure inputs + +```javascript +{ + "type": "collapsible-wrap", + "label": "Collapsible example" + "children": [ + { + "type": "text", + "key": "_example_input_collapsible", + "label": "Example input in collapsible wrapper" + }, { + ... + } + ] +} +``` + + +## How to add new settings +Always start with modifying or adding a new schema and don't worry about values. When you think schema is ready to use launch OpenPype settings in development mode using `poetry run python ./start.py settings --dev` or prepared script in `~/openpype/tools/run_settings(.sh|.ps1)`. Settings opened in development mode have the checkbox `Modify defaults` available in the bottom left corner. When checked default values are modified and saved on `Save`. This is a recommended approach on how default settings should be created instead of direct modification of files. + +![Modify default settings](assets/settings_dev.png) diff --git a/website/docs/dev_testing.md b/website/docs/dev_testing.md index cab298ae37..7136ceb479 100644 --- a/website/docs/dev_testing.md +++ b/website/docs/dev_testing.md @@ -14,6 +14,11 @@ But many tests should yet be created! - installed DCC you want to test - `mongorestore` on a PATH +You could check that `mongorestore` is available by running this in console (or cmd), it shouldn't fail and you should see version of utility: +```commandline +mongorestore --version +``` + If you would like just to experiment with provided integration tests, and have particular DCC installed on your machine, you could run test for this host by: - From source: @@ -23,7 +28,7 @@ If you would like just to experiment with provided integration tests, and have p ``` - From build: ``` -- ${OPENPYPE_BUILD}/openpype_console run {ABSOLUTE_PATH_OPENPYPE_ROOT}/tests/integration/hosts/nuke` +- ${OPENPYPE_BUILD}/openpype_console runtests {ABSOLUTE_PATH_OPENPYPE_ROOT}/tests/integration/hosts/nuke` ``` Modify tests path argument to limit which tests should be run (`../tests/integration` will run all implemented integration tests). diff --git a/website/docs/hosts-maya.md b/website/docs/hosts-maya.md deleted file mode 100644 index 0ee0c2d86b..0000000000 --- a/website/docs/hosts-maya.md +++ /dev/null @@ -1,33 +0,0 @@ -### Tools -Creator -Publisher -Loader -Scene Inventory -Look assigner -Workfiles - -### Plugins -Deadline -Muster -Yeti -Arnold -Vray -Redshift - -### Families -Model -Look -Rig -Animation -Cache -Camera -Assembly -MayaAscii (generic scene) -Setdress -RenderSetup -Review -arnoldStandin -vrayProxy -vrayScene -yetiCache -yetiRig diff --git a/website/docs/manager_ftrack.md b/website/docs/manager_ftrack.md index defbb4b48f..b5ca167838 100644 --- a/website/docs/manager_ftrack.md +++ b/website/docs/manager_ftrack.md @@ -4,7 +4,7 @@ title: Ftrack sidebar_label: Project Manager --- -Ftrack is currently the main project management option for OpenPype. This documentation assumes that you are familiar with Ftrack and it's basic principles. If you're new to Ftrack, we recommend having a thorough look at [Ftrack Official Documentation](http://ftrack.rtd.ftrack.com/en/stable/). +Ftrack is currently the main project management option for OpenPype. This documentation assumes that you are familiar with Ftrack and it's basic principles. If you're new to Ftrack, we recommend having a thorough look at [Ftrack Official Documentation](https://help.ftrack.com/en/). ## Project management Setting project attributes is the key to properly working pipeline. @@ -31,7 +31,7 @@ This process describes how data from Ftrack will get into Avalon database. ### How to synchronize You can trigger synchronization manually using [Sync To Avalon](manager_ftrack_actions.md#sync-to-avalon) action. -Synchronization can also be automated with OpenPype's [event server](#event-server) and synchronization events. If your Ftrack is [prepared for OpenPype](#prepare-ftrack-for-openpype), the project should have custom attribute `Avalon auto-sync`. Check the custom attribute to allow auto-updates with event server. +Synchronization can also be automated with OpenPype's [event server](#event-server) and synchronization events. If your Ftrack is [prepared for OpenPype](module_ftrack.md#prepare-ftrack-for-openpype), the project should have custom attribute `Avalon auto-sync`. Check the custom attribute to allow auto-updates with event server. :::tip Always use `Sync To Avalon` action before you enable `Avalon auto-sync`! diff --git a/website/docs/manager_naming.md b/website/docs/manager_naming.md deleted file mode 100644 index bf822fbeb4..0000000000 --- a/website/docs/manager_naming.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -id: manager_naming -title: Naming Conventions -sidebar_label: Naming Conventions ---- - -:::note -This naming convention holds true for most of our pipeline. Please match it as close as possible even for projects and files that might be outside of pipeline scope at this point. Small errors count! The reason for given formatting is to allow people to understand the file at glance and that a script or a program can easily get meaningful information from your files without errors. -::: - -## General rules - -For more detailed rules and different file types, have a look at naming conventions for scenes and assets - -- Every file starts with file code based on a project it belongs to e.g. ‘tst_’, ‘drm_’ -- Optional subversion and comment always comes after the major version. v##.subversion_comment. -- File names can only be composed of letters, numbers, underscores `_` and dots “.” -- You can use snakeCase or CamelCase if you need more words in a section.  thisIsLongerSentenceInComment -- No spaces in filenames. Ever! -- Frame numbers are always separated by a period ”.” -- If you're not sure use this template: - -## Work files - -**`{code}_{shot}_{task}_v001.ext`** - -**`{code}_{asset}_{task}_v001.ext`** - -**Examples:** - - prj_sh010_enviro_v001.ma - prj_sh010_animation_v001.ma - prj_sh010_comp_v001.nk - - prj_bob_modelling_v001.ma - prj_bob_rigging_v001.ma - prj_bob_lookdev_v001.ma - -:::info -In all of the examples anything enclosed in curly brackets  { } is compulsory in the name. -Anything in square brackets [ ] is optional. -::: - -## Published Assets - -**`{code}_{asset}_{family}_{subset}_{version}_[comment].ext`** - -**Examples:** - - prj_bob_model_main_v01.ma - prj_bob_model_hires_v01.ma - prj_bob_model_main_v01_clothes.ma - prj_bob_model_main_v01_body.ma - prj_bob_rig_main_v01.ma - Prj_bob_look_main_v01.ma - Prj_bob_look_wet_v01.ma diff --git a/website/docs/module_ftrack.md b/website/docs/module_ftrack.md index fd9687ed9d..6d5529b512 100644 --- a/website/docs/module_ftrack.md +++ b/website/docs/module_ftrack.md @@ -13,7 +13,7 @@ Ftrack is currently the main project management option for OpenPype. This docume ## Prepare Ftrack for OpenPype ### Server URL -If you want to connect Ftrack to OpenPype you might need to make few changes in Ftrack settings. These changes would take a long time to do manually, so we prepared a few Ftrack actions to help you out. First, you'll need to launch OpenPype settings, enable [Ftrack module](admin_settings_system.md#Ftrack), and enter the address to your Ftrack server. +If you want to connect Ftrack to OpenPype you might need to make few changes in Ftrack settings. These changes would take a long time to do manually, so we prepared a few Ftrack actions to help you out. First, you'll need to launch OpenPype settings, enable [Ftrack module](admin_settings_system.md#Ftrack), and enter the address to your Ftrack server. ### Login Once your server is configured, restart OpenPype and you should be prompted to enter your [Ftrack credentials](artist_ftrack.md#How-to-use-Ftrack-in-OpenPype) to be able to run our Ftrack actions. If you are already logged in to Ftrack in your browser, it is enough to press `Ftrack login` and it will connect automatically. @@ -26,7 +26,7 @@ You can only use our Ftrack Actions and publish to Ftrack if each artist is logg ### Custom Attributes After successfully connecting OpenPype with you Ftrack, you can right click on any project in Ftrack and you should see a bunch of actions available. The most important one is called `OpenPype Admin` and contains multiple options inside. -To prepare Ftrack for working with OpenPype you'll need to run [OpenPype Admin - Create/Update Avalon Attributes](manager_ftrack_actions.md#create-update-avalon-attributes), which creates and sets the Custom Attributes necessary for OpenPype to function. +To prepare Ftrack for working with OpenPype you'll need to run [OpenPype Admin - Create/Update Custom Attributes](manager_ftrack_actions.md#create-update-avalon-attributes), which creates and sets the Custom Attributes necessary for OpenPype to function. @@ -34,7 +34,7 @@ To prepare Ftrack for working with OpenPype you'll need to run [OpenPype Admin - Ftrack Event Server is the key to automation of many tasks like _status change_, _thumbnail update_, _automatic synchronization to Avalon database_ and many more. Event server should run at all times to perform the required processing as it is not possible to catch some of them retrospectively with enough certainty. ### Running event server -There are specific launch arguments for event server. With `openpype_console eventserver` you can launch event server but without prior preparation it will terminate immediately. The reason is that event server requires 3 pieces of information: _Ftrack server url_, _paths to events_ and _credentials (Username and API key)_. Ftrack server URL and Event path are set from OpenPype's environments by default, but the credentials must be done separatelly for security reasons. +There are specific launch arguments for event server. With `openpype_console module ftrack eventserver` you can launch event server but without prior preparation it will terminate immediately. The reason is that event server requires 3 pieces of information: _Ftrack server url_, _paths to events_ and _credentials (Username and API key)_. Ftrack server URL and Event path are set from OpenPype's environments by default, but the credentials must be done separatelly for security reasons. @@ -53,7 +53,7 @@ There are specific launch arguments for event server. With `openpype_console eve - **`--ftrack-api-key "00000aaa-11bb-22cc-33dd-444444eeeee"`** : User's API key - `--ftrack-url "https://yourdomain.ftrackapp.com/"` : Ftrack server URL _(it is not needed to enter if you have set `FTRACK_SERVER` in OpenPype' environments)_ -So if you want to use OpenPype's environments then you can launch event server for first time with these arguments `openpype_console.exe eventserver --ftrack-user "my.username" --ftrack-api-key "00000aaa-11bb-22cc-33dd-444444eeeee" --store-credentials`. Since that time, if everything was entered correctly, you can launch event server with `openpype_console.exe eventserver`. +So if you want to use OpenPype's environments then you can launch event server for first time with these arguments `openpype_console.exe module ftrack eventserver --ftrack-user "my.username" --ftrack-api-key "00000aaa-11bb-22cc-33dd-444444eeeee" --store-credentials`. Since that time, if everything was entered correctly, you can launch event server with `openpype_console.exe module ftrack eventserver`. @@ -72,7 +72,7 @@ We do not recommend setting your Ftrack user and api key environments in a persi ### Where to run event server -We recommend you to run event server on stable server machine with ability to connect to Avalon database and Ftrack web server. Best practice we recommend is to run event server as service. It can be Windows or Linux. +We recommend you to run event server on stable server machine with ability to connect to OpenPype database and Ftrack web server. Best practice we recommend is to run event server as service. It can be Windows or Linux. :::important Event server should **not** run more than once! It may cause major issues. @@ -99,11 +99,10 @@ Event server should **not** run more than once! It may cause major issues. - add content to the file: ```sh #!/usr/bin/env bash -export OPENPYPE_DEBUG=1 export OPENPYPE_MONGO= pushd /mnt/path/to/openpype -./openpype_console eventserver --ftrack-user --ftrack-api-key +./openpype_console module ftrack eventserver --ftrack-user --ftrack-api-key --debug ``` - change file permission: `sudo chmod 0755 /opt/openpype/run_event_server.sh` @@ -140,14 +139,13 @@ WantedBy=multi-user.target - create service file: `openpype-ftrack-eventserver.bat` -- add content to the service file: +- add content to the service file: ```sh @echo off -set OPENPYPE_DEBUG=1 set OPENPYPE_MONGO= pushd \\path\to\openpype -openpype_console.exe eventserver --ftrack-user --ftrack-api-key +openpype_console.exe module ftrack eventserver --ftrack-user --ftrack-api-key --debug ``` - download and install `nssm.cc` - create Windows service according to nssm.cc manual @@ -174,7 +172,7 @@ This event updates entities on their changes Ftrack. When new entity is created Deleting an entity by Ftrack's default is not processed for security reasons _(to delete entity use [Delete Asset/Subset action](manager_ftrack_actions.md#delete-asset-subset))_. ::: -### Synchronize Hierarchical and Entity Attributes +### Synchronize Hierarchical and Entity Attributes Auto-synchronization of hierarchical attributes from Ftrack entities. @@ -190,7 +188,7 @@ Change status of next task from `Not started` to `Ready` when previous task is a Multiple detailed rules for next task update can be configured in the settings. -### Delete Avalon ID from new entity +### Delete Avalon ID from new entity Is used to remove value from `Avalon/Mongo Id` Custom Attribute when entity is created. @@ -215,7 +213,7 @@ This event handler allows setting of different status to a first created Asset V This is useful for example if first version publish doesn't contain any actual reviewable work, but is only used for roundtrip conform check, in which case this version could receive status `pending conform` instead of standard `pending review` ### Update status on next task -Change status on next task by task types order when task status state changed to "Done". All tasks with the same Task mapping of next task status changes From → To. Some status can be ignored. +Change status on next task by task types order when task status state changed to "Done". All tasks with the same Task mapping of next task status changes From → To. Some status can be ignored. ## Publish plugins @@ -238,7 +236,7 @@ Add Ftrack Family: enabled #### Advanced adding if additional families present -In special cases adding 'ftrack' based on main family ('Families' set higher) is not enough. +In special cases adding 'ftrack' based on main family ('Families' set higher) is not enough. (For example upload to Ftrack for 'plate' main family should only happen if 'review' is contained in instance 'families', not added in other cases. ) -![Collect Ftrack Family](assets/ftrack/ftrack-collect-advanced.png) \ No newline at end of file +![Collect Ftrack Family](assets/ftrack/ftrack-collect-advanced.png) diff --git a/website/docs/module_kitsu.md b/website/docs/module_kitsu.md new file mode 100644 index 0000000000..7738ee1ce2 --- /dev/null +++ b/website/docs/module_kitsu.md @@ -0,0 +1,44 @@ +--- +id: module_kitsu +title: Kitsu Administration +sidebar_label: Kitsu +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Kitsu is a great open source production tracker and can be used for project management instead of Ftrack. This documentation assumes that you are familiar with Kitsu and it's basic principles. If you're new to Kitsu, we recommend having a thorough look at [Kitsu Official Documentation](https://kitsu.cg-wire.com/). + +## Prepare Kitsu for OpenPype + +### Server URL +If you want to connect Kitsu to OpenPype you have to set the `Server` url in Kitsu settings. And that's all! +This setting is available for all the users of the OpenPype instance. + +## Synchronize +Updating OP with Kitsu data is executed running the `sync-service`, which requires to provide your Kitsu credentials with `-l, --login` and `-p, --password` or by setting the environment variables `KITSU_LOGIN` and `KITSU_PWD`. This process will request data from Kitsu and create/delete/update OP assets. +Once this sync is done, the thread will automatically start a loop to listen to Kitsu events. + +```bash +openpype_console module kitsu sync-service -l me@domain.ext -p my_password +``` + +### Events listening +Listening to Kitsu events is the key to automation of many tasks like _project/episode/sequence/shot/asset/task create/update/delete_ and some more. Events listening should run at all times to perform the required processing as it is not possible to catch some of them retrospectively with strong reliability. If such timeout has been encountered, you must relaunch the `sync-service` command to run the synchronization step again. + +Connection token is refreshed every week. + +### Push to Kitsu +An utility function is provided to help update Kitsu data (a.k.a Zou database) with OpenPype data if the publishing to the production tracker hasn't been possible for some time. Running `push-to-zou` will create the data on behalf of the user. +:::caution +This functionality cannot deal with all cases and is not error proof, some intervention by a human being might be required. +::: + +```bash +openpype_console module kitsu push-to-zou -l me@domain.ext -p my_password +``` + +## Q&A +### Is it safe to rename an entity from Kitsu? +Absolutely! Entities are linked by their unique IDs between the two databases. +But renaming from the OP's Project Manager won't apply the change to Kitsu, it'll be overriden during the next synchronization. diff --git a/website/docs/module_site_sync.md b/website/docs/module_site_sync.md index 78f482352e..2e9cf01102 100644 --- a/website/docs/module_site_sync.md +++ b/website/docs/module_site_sync.md @@ -123,6 +123,10 @@ To get working connection to Google Drive there are some necessary steps: - add new site back in OpenPype Settings, name as you want, provider needs to be 'gdrive' - distribute credentials file via shared mounted disk location +:::note +If you are using regular personal GDrive for testing don't forget adding `/My Drive` as the prefix in root configuration. Business accounts and share drives don't need this. +::: + ### SFTP SFTP provider is used to connect to SFTP server. Currently authentication with `user:password` or `user:ssh key` is implemented. diff --git a/website/docs/module_slack.md b/website/docs/module_slack.md index 3a2842da63..1999912fdc 100644 --- a/website/docs/module_slack.md +++ b/website/docs/module_slack.md @@ -94,6 +94,16 @@ Few keys also have Capitalized and UPPERCASE format. Values will be modified acc Here you can find review {review_filepath} ``` +##### Dynamic message for artists +If artists uses host with implemented Publisher (new UI for publishing, implemented in Tray Publisher, Adobe products etc), it is possible for +them to add additional message (notification for specific users for example, artists must provide proper user id with '@'). +Additional message will be sent only if at least one profile, eg. one target channel is configured. +All available template keys (see higher) could be used here as a placeholder too. + +#### User or group notifications +Message template or dynamic data could contain user or group notification, it must be in format @artist.name, '@John Doe' or "@admin group" for display name containing space. +If value prefixed with @ is not resolved and Slack user is not found, message will contain same value (not translated by Slack into link and proper mention.) + #### Message retention Currently no purging of old messages is implemented in Openpype. Admins of Slack should set their own retention of messages and files per channel. (see https://slack.com/help/articles/203457187-Customize-message-and-file-retention-policies) diff --git a/website/docs/project_settings/settings_project_global.md b/website/docs/project_settings/settings_project_global.md index 24ea09b6fb..37fed93e69 100644 --- a/website/docs/project_settings/settings_project_global.md +++ b/website/docs/project_settings/settings_project_global.md @@ -13,6 +13,39 @@ Project settings can have project specific values. Each new project is using stu Projects always use default project values unless they have [project override](../admin_settings#project-overrides) (orage colour). Any changes in default project may affect all existing projects. ::: +## Color Management (ImageIO) + +:::info Default OCIO config +OpenPype distributes its own OCIO configs. Those can be found in `{openpype install dir}/{version}/vendor/bin/ocioconfig/OpenColorIOConfigs`. Windows example: `C:\Program Files (x86)\OpenPype\3.14.0\vendor\bin\ocioconfig\OpenColorIOConfigs` +::: + +### Using OCIO config +Global config path is set by default to OpenPype distributed configs. At the moment there are only two - **aces_1.2** and **nuke-default**. Since this path input is not platform specific it is required to use at least an environment variable do platform specific config root directory. Order of paths matter so first path found and existing first served. + +Each OCIO config path input supports formatting using environment variables and [anatomy template keys](../admin_settings_project_anatomy#available-template-keys). The default global OCIO config path is `{OPENPYPE_ROOT}/vendor/bin/ocioconfig/OpenColorIOConfigs/aces_1.2/config.ocio`. + +If the project settings for a particular host has its own OCIO config **enabled** and set to at least one path and the path exists, it overrides the global OCIO config for that host. + +**For example** + +Project nuke-specific OCIO config: `project_settings/nuke/imageio/ocio_config` + +If config path is defined to particular shot target with following path inputs: +1. `{root[work]}/{project[name]}/{hierarchy}/{asset}/config/aces.ocio` +2. `{root[work]}/{project[name]}/{hierarchy}/config/aces.ocio` + +Procedure of resolving path (from above example) will look first into path 1st and if the path is not existing then it will try 2nd and if even that is not existing then it will fall back to global default. + +### Using File rules +File rules are inspired by [OCIO v2 configuration]((https://opencolorio.readthedocs.io/en/latest/guides/authoring/rules.html)). Each rule has a unique name which can be overridden by host-specific _File rules_ (example: `project_settings/nuke/imageio/file_rules/rules`). + +The _input pattern_ matching uses REGEX expression syntax (try [regexr.com](https://regexr.com/)). Matching rules procedure's intention is to be used during publishing or loading of representation. Since the publishing procedure is run before integrator formate publish template path, make sure the pattern is working or any work render path. + +:::warning Colorspace name input +The **colorspace name** value is a raw string input and no validation is run after saving project settings. We recommend to open the specified `config.ocio` file and copy pasting the exact colorspace names. +::: + + ## Profile filters Many of the settings are using a concept of **Profile filters** @@ -135,6 +168,12 @@ Profile may generate multiple outputs from a single input. Each output must defi - set alpha to `0` to not use this option at all (in most of cases background stays black) - other than `0` alpha will draw color as background +- **`Additional filtering`** + - Profile filtering defines which group of output definitions is used but output definitions may require more specific filters on their own. + - They may filter by subset name (regex can be used) or publish families. Publish families are more complex as are based on knowing code base. + - Filtering by custom tags -> this is used for targeting to output definitions from other extractors using settings (at this moment only Nuke bake extractor can target using custom tags). + - Nuke extractor settings path: `project_settings/nuke/publish/ExtractReviewDataMov/outputs/baking/add_custom_tags` + - Filtering by input length. Input may be video, sequence or single image. It is possible that `.mp4` should be created only when input is video or sequence and to create review `.png` when input is single frame. In some cases the output should be created even if it's single frame or multi frame input. ### IntegrateAssetNew @@ -156,7 +195,7 @@ Applicable context filters: #### Subset grouping profiles -Published subsets might be grouped together for cleaner and easier selection in **[Loader](artist_tools.md#subset-groups)** +Published subsets might be grouped together for cleaner and easier selection in the **[Subset Manager](artist_tools_subset_manager)** Group name is chosen with use of [profile filtering](#profile-filters) @@ -173,7 +212,7 @@ Applicable context filters: Settings for OpenPype tools. ## Creator -Settings related to [Creator tool](artist_tools.md#details). +Settings related to [Creator tool](artist_tools_creator). ### Subset name profiles ![global_tools_creator_subset_template](assets/global_tools_creator_subset_template.png) @@ -211,4 +250,4 @@ All settings related to Workfile tool. ### Open last workfile at launch This feature allows you to define a rule for each task/host or toggle the feature globally to all tasks as they are visible in the picture. -![global_tools_workfile_open_last_version](assets/global_tools_workfile_open_last_version.png) \ No newline at end of file +![global_tools_workfile_open_last_version](assets/global_tools_workfile_open_last_version.png) diff --git a/website/docs/system_introduction.md b/website/docs/system_introduction.md index 71c5d64aa8..05627b5359 100644 --- a/website/docs/system_introduction.md +++ b/website/docs/system_introduction.md @@ -17,7 +17,7 @@ various usage scenarios. You can find detailed breakdown of technical requirements [here](dev_requirements), but in general OpenPype should be able to operate in most studios fairly quickly. The main obstacles are usually related to workflows and habits, that -might now be fully compatible with what OpenPype is expecting or enforcing. +might not be fully compatible with what OpenPype is expecting or enforcing. It is recommended to go through artists [key concepts](artist_concepts) to get idea about basics. Keep in mind that if you run into any workflows that are not supported, it's usually just because we haven't hit that particular case and it can most likely be added upon request. @@ -48,24 +48,3 @@ to the table - Some DCCs do not support using Environment variables in file paths. This will make it very hard to maintain full multiplatform compatibility as well variable storage roots. - Relying on VPN connection and using it to work directly of network storage will be painfully slow. - - -## Repositories - -### [OpenPype](https://github.com/pypeclub/pype) - -This is where vast majority of the code that works with your data lives. It acts -as Avalon-Config, if we're speaking in avalon terms. - -Avalon gives us the ability to work with a certain host, say Maya, in a standardized manner, but OpenPype defines **how** we work with all the data, allows most of the behavior to be configured on a very granular level and provides a comprehensive build and installation tools for it. - -Thanks to that, we are able to maintain one codebase for vast majority of the features across all our clients deployments while keeping the option to tailor the pipeline to each individual studio. - -### [Avalon-core](https://github.com/pypeclub/avalon-core) - -Avalon-core is the heart of OpenPype. It provides the base functionality including key GUIs (albeit expanded and modified by us), database connection, standards for data structures, working with entities and some universal tools. - -Avalon is being actively developed and maintained by a community of studios and TDs from around the world, with Pype Club team being an active contributor as well. - -Due to the extensive work we've done on OpenPype and the need to react quickly to production needs, we -maintain our own fork of avalon-core, which is kept up to date with upstream changes as much as possible. diff --git a/website/docs/upgrade_notes.md b/website/docs/upgrade_notes.md deleted file mode 100644 index 8231cf997d..0000000000 --- a/website/docs/upgrade_notes.md +++ /dev/null @@ -1,165 +0,0 @@ ---- -id: update_notes -title: Update Notes -sidebar_label: Update Notes ---- - - - -## **Updating to 2.13.0** ## - -### MongoDB - -**Must** - -Due to changes in how tasks are stored in the database (we added task types and possibility of more arbitrary data.), we must take a few precautions when updating. -1. Make sure that ftrack event server with sync to avalon is NOT running during the update. -2. Any project that is to be worked on with 2.13 must be synced from ftrack to avalon with the updated sync to avalon action, or using and updated event server sync to avalon event. - -If 2.12 event servers runs when trying to update the project sync with 2.13, it will override any changes. - -### Nuke Studio / hiero - -Make sure to re-generate pype tags and replace any `task` tags on your shots with the new ones. This will allow you to make multiple tasks of the same type, but with different task name at the same time. - -### Nuke - -Due to a minor update to nuke write node, artists will be prompted to update their write nodes before being able to publish any old shots. There is a "repair" action for this in the publisher, so it doesn't have to be done manually. - - - - -## **Updating to 2.12.0** ## - -### Apps and tools - -**Must** - -run Create/Update Custom attributes action (to update custom attributes group) -check if studio has set custom intent values and move values to ~/config/presets/global/intent.json - -**Optional** - -Set true/false on application and tools by studio usage (eliminate app list in Ftrack and time for registering Ftrack ations) - - - - -## **Updating to 2.11.0** ## - -### Maya in deadline - -We added or own maya deadline plugin to make render management easier. It operates the same as standard mayaBatch in deadline, but allow us to separate Pype sumitted jobs from standard submitter. You'll need to follow this guide to update this [install pype deadline](https://pype.club/docs/admin_hosts#pype-dealine-supplement-code) - - - - -## **Updating to 2.9.0** ## - -### Review and Burnin PRESETS - -This release introduces a major update to working with review and burnin presets. They can now be much more granular and can target extremely specific usecases. The change is backwards compatible with previous format of review and burnin presets, however we highly recommend updating all the presets to the new format. Documentation on what this looks like can be found on pype main [documentation page](https://pype.club/docs/admin_presets_plugins#publishjson). - -### Multiroot and storages - -With the support of multiroot projects, we removed the old `storage.json` from configuration and replaced it with simpler `config/anatomy/roots.json`. This is a required change, but only needs to be done once per studio during the update to 2.9.0. [Read More](https://pype.club/docs/next/admin_config#roots) - - - - -## **Updating to 2.7.0** ## - -### Master Versions -To activate `master` version workflow you need to activate `integrateMasterVersion` plugin in the `config/presets/plugins/global/publish.json` - -``` -"IntegrateMasterVersion": {"enabled": true}, -``` - -### Ftrack - -Make sure that `intent` attributes in ftrack is set correctly. It should follow this setup unless you have your custom values -``` -{ - "label": "Intent", - "key": "intent", - "type": "enumerator", - "entity_type": "assetversion", - "group": "avalon", - "config": { - "multiselect": false, - "data": [ - {"test": "Test"}, - {"wip": "WIP"}, - {"final": "Final"} - ] - } -``` - - - - -## **Updating to 2.6.0** ## - -### Dev vs Prod - -If you want to differentiate between dev and prod deployments of pype, you need to add `config.ini` file to `pype-setup/pypeapp` folder with content. - -``` -[Default] -dev=true -``` - -### Ftrack - -You will have to log in to ftrack in pype after the update. You should be automatically prompted with the ftrack login window when you launch 2.6 release for the first time. - -Event server has to be restarted after the update to enable the ability to control it via action. - -### Presets - -There is a major change in the way how burnin presets are being stored. We simplified the preset format, however that means the currently running production configs need to be tweaked to match the new format. - -:::note Example of converting burnin preset from 2.5 to 2.6 - -2.5 burnin preset - -``` -"burnins":{ - "TOP_LEFT": { - "function": "text", - "text": "{dd}/{mm}/{yyyy}" - }, - "TOP_CENTERED": { - "function": "text", - "text": "" - }, - "TOP_RIGHT": { - "function": "text", - "text": "v{version:0>3}" - }, - "BOTTOM_LEFT": { - "function": "text", - "text": "{frame_start}-{current_frame}-{frame_end}" - }, - "BOTTOM_CENTERED": { - "function": "text", - "text": "{asset}" - }, - "BOTTOM_RIGHT": { - "function": "frame_numbers", - "text": "{username}" - } -``` - -2.6 burnin preset -``` -"burnins":{ - "TOP_LEFT": "{dd}/{mm}/{yyyy}", - "TOP_CENTER": "", - "TOP_RIGHT": "v{version:0>3}" - "BOTTOM_LEFT": "{frame_start}-{current_frame}-{frame_end}", - "BOTTOM_CENTERED": "{asset}", - "BOTTOM_RIGHT": "{username}" -} -``` diff --git a/website/sidebars.js b/website/sidebars.js index 16af1e1151..93887e00f6 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -8,7 +8,24 @@ module.exports = { "artist_getting_started", "artist_concepts", "artist_publish", - "artist_tools", + { + type: "category", + collapsed: true, + label: "Tools", + link: {type: 'doc', id: 'artist_tools'}, + items: [ + "artist_tools_context_manager", + "artist_tools_creator", + "artist_tools_loader", + "artist_tools_library_loader", + "artist_tools_publisher", + "artist_tools_inventory", + "artist_tools_workfiles", + "artist_tools_look_assigner", + "artist_tools_subset_manager", + "artist_tools_sync_queue" + ], + }, "artist_install" ], }, @@ -19,8 +36,21 @@ module.exports = { items: [ "artist_hosts_hiero", "artist_hosts_nuke_tut", - "artist_hosts_maya", + { + type: "category", + label: "Maya", + items: [ + "artist_hosts_maya", + "artist_hosts_maya_multiverse", + "artist_hosts_maya_yeti", + "artist_hosts_maya_xgen", + "artist_hosts_maya_arnold", + "artist_hosts_maya_vray", + "artist_hosts_maya_redshift", + ], + }, "artist_hosts_blender", + "artist_hosts_3dsmax", "artist_hosts_harmony", "artist_hosts_houdini", "artist_hosts_aftereffects", @@ -28,6 +58,7 @@ module.exports = { "artist_hosts_photoshop", "artist_hosts_tvpaint", "artist_hosts_unreal", + "artist_kitsu", { type: "category", label: "Ftrack", @@ -56,6 +87,7 @@ module.exports = { type: "category", label: "Configuration", items: [ + "admin_environment", "admin_settings", "admin_settings_system", "admin_settings_project_anatomy", @@ -75,6 +107,7 @@ module.exports = { label: "Modules", items: [ "module_ftrack", + "module_kitsu", "module_site_sync", "module_deadline", "module_muster", @@ -87,18 +120,17 @@ module.exports = { label: "Integrations", items: [ "admin_hosts_blender", + "admin_hosts_hiero", + "admin_hosts_houdini", "admin_hosts_maya", + "admin_hosts_nuke", "admin_hosts_resolve", "admin_hosts_harmony", "admin_hosts_aftereffects", "admin_hosts_tvpaint" ], }, - { - type: "category", - label: "Releases", - items: ["changelog", "update_notes"], - }, + "admin_releases", { type: "category", collapsed: false, @@ -131,11 +163,22 @@ module.exports = { ], }, ], - Dev: [ + Dev: [ "dev_introduction", "dev_requirements", "dev_build", "dev_testing", - "dev_contribute" - ] + "dev_contribute", + "dev_settings", + { + type: "category", + label: "Hosts integrations", + items: [ + "dev_host_implementation", + "dev_publishing" + ] + }, + "dev_deadline", + "dev_colorspace" + ] }; diff --git a/website/src/css/custom.css b/website/src/css/custom.css index e8dd86256b..58c9305bc7 100644 --- a/website/src/css/custom.css +++ b/website/src/css/custom.css @@ -196,12 +196,12 @@ html[data-theme='dark'] .header-github-link::before { padding: 20px } -.showcase .client { +.showcase .studio { display: flex; justify-content: space-between; } -.showcase .client img { +.showcase .studio img { max-height: 110px; padding: 20px; max-width: 160px; diff --git a/website/src/pages/features.js b/website/src/pages/features.js index d5c036eb89..8f3a085784 100644 --- a/website/src/pages/features.js +++ b/website/src/pages/features.js @@ -15,32 +15,32 @@ const key_features = [ label: "Workfiles", description: "Save and load workfiles in progress. Change the context inside of the application.", - docs: "/docs/artist_tools#workfiles", + docs: "/docs/artist_tools_workfiles", }, { label: "Creator", description: "Universal GUI for defining content for publishing from your DCC app.", - docs: "/docs/artist_tools#creator", + docs: "/docs/artist_tools_creator", }, { label: "Loader", description: "Universal GUI for loading published assets into your DCC app.", - docs: "/docs/artist_tools#loader", + docs: "/docs/artist_tools_loader", }, { label: "Publisher", description: "Universal GUI for validating and publishng content from your DCC app.", image: "", - docs: "/docs/artist_tools#publisher", + docs: "/docs/artist_tools_publisher", }, { label: "Scene manager", description: "Universal GUI for managing versions of assets loaded into your working scene.", - docs: "docs/artist_tools#inventory", + docs: "docs/artist_tools_inventory", }, { label: "Project manager", @@ -52,7 +52,7 @@ const key_features = [ label: "Library Loader", description: "A loader GUI that allows yo to load content from dedicated cross project asset library", - docs: "docs/artist_tools#library-loader", + docs: "docs/artist_tool_library_loader", image: "", }, { diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 791b309bbc..52302ec285 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -65,9 +65,17 @@ const collab = [ image: '/img/clothcat.png', infoLink: 'https://www.clothcatanimation.com/' }, { - title: 'Ellipse Studio', - image: '/img/ellipse-studio.png', - infoLink: 'http://www.dargaudmedia.com' + title: 'Ellipse Animation', + image: '/img/ellipse_animation.svg', + infoLink: 'http://www.ellipseanimation.com' + }, { + title: 'J Cube Inc', + image: '/img/jcube_logo_bw.png', + infoLink: 'https://j-cube.jp' + }, { + title: 'Normaal Animation', + image: '/img/logo_normaal.png', + infoLink: 'https://j-cube.jp' } ]; @@ -144,7 +152,37 @@ const studios = [ title: "Ember Light", image: "/img/EmberLight_black.png", infoLink: "https://emberlight.se/", - } + }, + { + title: "IGG Canada", + image: "/img/igg-logo.png", + infoLink: "https://www.igg.com/", + }, + { + title: "Agora Studio", + image: "/img/agora_studio.png", + infoLink: "https://agora.studio/", + }, + { + title: "Lucan Visuals", + image: "/img/lucan_Logo_On_White-HR.png", + infoLink: "https://www.lucan.tv/", + }, + { + title: "No Ghost", + image: "/img/noghost.png", + infoLink: "https://www.noghost.co.uk/", + }, + { + title: "Static VFX", + image: "/img/staticvfx.png", + infoLink: "http://www.staticvfx.com/", + }, + { + title: "Method n Madness", + image: "/img/methodmadness.png", + infoLink: "https://www.methodnmadness.com/", +} ]; function Service({imageUrl, title, description}) { @@ -157,10 +195,10 @@ function Service({imageUrl, title, description}) { ); } -function Client({title, image, infoLink}) { +function Studio({title, image, infoLink}) { const imgUrl = useBaseUrl(image); return ( - + ); @@ -224,7 +262,7 @@ function Home() { Get Support
- +

OpenPYPE is developed, maintained and supported by PYPE.club

@@ -285,7 +323,7 @@ function Home() {
- +
-

In development by us or OpenPype community.

+

Planned or in development by us and OpenPype community.

@@ -441,7 +494,7 @@ function Home() {

Studios using openPype

{studios.map((props, idx) => ( - + ))}
diff --git a/website/static/img/NoGhost_Logo_black.svg b/website/static/img/NoGhost_Logo_black.svg new file mode 100644 index 0000000000..b499b1621f --- /dev/null +++ b/website/static/img/NoGhost_Logo_black.svg @@ -0,0 +1,31 @@ + + + + + + + + + + + + + diff --git a/website/static/img/agora_studio.png b/website/static/img/agora_studio.png new file mode 100644 index 0000000000..48b07b8775 Binary files /dev/null and b/website/static/img/agora_studio.png differ diff --git a/website/static/img/app_flame.png b/website/static/img/app_flame.png index ba9b69e45f..188153e573 100644 Binary files a/website/static/img/app_flame.png and b/website/static/img/app_flame.png differ diff --git a/website/static/img/app_hibob.png b/website/static/img/app_hibob.png new file mode 100644 index 0000000000..91dd8d3f6b Binary files /dev/null and b/website/static/img/app_hibob.png differ diff --git a/website/static/img/app_hiero.png b/website/static/img/app_hiero.png index ea33f10214..ef27f58bee 100644 Binary files a/website/static/img/app_hiero.png and b/website/static/img/app_hiero.png differ diff --git a/website/static/img/app_multiverse.png b/website/static/img/app_multiverse.png new file mode 100644 index 0000000000..c0d80e4f1b Binary files /dev/null and b/website/static/img/app_multiverse.png differ diff --git a/website/static/img/app_nuke.png b/website/static/img/app_nuke.png index 4b3797af7a..1465da8ce8 100644 Binary files a/website/static/img/app_nuke.png and b/website/static/img/app_nuke.png differ diff --git a/website/static/img/app_nukestudio.png b/website/static/img/app_nukestudio.png new file mode 100644 index 0000000000..dfc2ad5a97 Binary files /dev/null and b/website/static/img/app_nukestudio.png differ diff --git a/website/static/img/app_royalrender.png b/website/static/img/app_royalrender.png new file mode 100644 index 0000000000..0e49519227 Binary files /dev/null and b/website/static/img/app_royalrender.png differ diff --git a/website/static/img/ellipse_animation.svg b/website/static/img/ellipse_animation.svg new file mode 100644 index 0000000000..c1caaa6726 --- /dev/null +++ b/website/static/img/ellipse_animation.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/website/static/img/igg-logo.png b/website/static/img/igg-logo.png new file mode 100644 index 0000000000..9fc7a7f84f Binary files /dev/null and b/website/static/img/igg-logo.png differ diff --git a/website/static/img/jcube_logo_bw.png b/website/static/img/jcube_logo_bw.png new file mode 100644 index 0000000000..76b897a865 Binary files /dev/null and b/website/static/img/jcube_logo_bw.png differ diff --git a/website/static/img/logo_normaal.png b/website/static/img/logo_normaal.png new file mode 100644 index 0000000000..711847c9f2 Binary files /dev/null and b/website/static/img/logo_normaal.png differ diff --git a/website/static/img/lucan_Logo_On_White-HR.png b/website/static/img/lucan_Logo_On_White-HR.png new file mode 100644 index 0000000000..c86030e1e7 Binary files /dev/null and b/website/static/img/lucan_Logo_On_White-HR.png differ diff --git a/website/static/img/methodmadness.png b/website/static/img/methodmadness.png new file mode 100644 index 0000000000..9dd0681d4a Binary files /dev/null and b/website/static/img/methodmadness.png differ diff --git a/website/static/img/noghost.png b/website/static/img/noghost.png new file mode 100644 index 0000000000..febaedcae8 Binary files /dev/null and b/website/static/img/noghost.png differ diff --git a/website/static/img/staticvfx.png b/website/static/img/staticvfx.png new file mode 100644 index 0000000000..41efd7f120 Binary files /dev/null and b/website/static/img/staticvfx.png differ diff --git a/website/yarn.lock b/website/yarn.lock index 7f677aaed7..559c58f931 100644 --- a/website/yarn.lock +++ b/website/yarn.lock @@ -1543,15 +1543,37 @@ dependencies: "@hapi/hoek" "^9.0.0" +"@jridgewell/gen-mapping@^0.3.0": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9" + integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A== + dependencies: + "@jridgewell/set-array" "^1.0.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.9" + "@jridgewell/resolve-uri@^3.0.3": - version "3.0.5" - resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.0.5.tgz#68eb521368db76d040a6315cdb24bf2483037b9c" - integrity sha512-VPeQ7+wH0itvQxnG+lIzWgkysKIr3L9sslimFW55rHMdGu/qCQ5z5h9zq4gI8uBtqkpHhsF4Z/OwExufUCThew== + version "3.1.0" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78" + integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w== + +"@jridgewell/set-array@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" + integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== + +"@jridgewell/source-map@^0.3.2": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb" + integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw== + dependencies: + "@jridgewell/gen-mapping" "^0.3.0" + "@jridgewell/trace-mapping" "^0.3.9" "@jridgewell/sourcemap-codec@^1.4.10": - version "1.4.11" - resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.11.tgz#771a1d8d744eeb71b6adb35808e1a6c7b9b8c8ec" - integrity sha512-Fg32GrJo61m+VqYSdRSjRXMjQ06j8YIYfcTqndLYVAaHmroZHLJZCydsWBOTDqXS2v+mjxohBWEMfg97GXmYQg== + version "1.4.14" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24" + integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw== "@jridgewell/trace-mapping@^0.3.0": version "0.3.4" @@ -1561,6 +1583,14 @@ "@jridgewell/resolve-uri" "^3.0.3" "@jridgewell/sourcemap-codec" "^1.4.10" +"@jridgewell/trace-mapping@^0.3.9": + version "0.3.14" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.14.tgz#b231a081d8f66796e475ad588a1ef473112701ed" + integrity sha512-bJWEfQ9lPTvm3SneWwRFVLzrh6nhjwqw7TUFFBEMzwvg7t7PCDenf2lDwqo4NQXzdpgBXyFgDWnQA+2vkruksQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@mdx-js/mdx@1.6.22", "@mdx-js/mdx@^1.6.21": version "1.6.22" resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-1.6.22.tgz#8a723157bf90e78f17dc0f27995398e6c731f1ba" @@ -2140,10 +2170,10 @@ acorn@^6.1.1: resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.4.2.tgz#35866fd710528e92de10cf06016498e47e39e1e6" integrity sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ== -acorn@^8.0.4, acorn@^8.4.1: - version "8.7.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.0.tgz#90951fde0f8f09df93549481e5fc141445b791cf" - integrity sha512-V/LGr1APy+PXIwKebEWrkZPwoeoF+w1jiOBUmuxuiUIaOHtob8Qc9BTrYo7VuI5fR8tqsy+buA2WFooR5olqvQ== +acorn@^8.0.4, acorn@^8.4.1, acorn@^8.5.0: + version "8.7.1" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.1.tgz#0197122c843d1bf6d0a5e83220a788f278f63c30" + integrity sha512-Xx54uLJQZ19lKygFXOWsscKUbsBZW0CPykPhVQdhIeIwrbPmJzqeASDInc8nKBnp/JT6igTs82qPXz069H8I/A== address@^1.0.1, address@^1.1.2: version "1.1.2" @@ -2311,9 +2341,9 @@ asap@~2.0.3: integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= async@^2.6.2: - version "2.6.3" - resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff" - integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg== + version "2.6.4" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" + integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA== dependencies: lodash "^4.17.14" @@ -4243,9 +4273,9 @@ htmlparser2@^6.1.0: entities "^2.0.0" http-cache-semantics@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" - integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== + version "4.1.1" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a" + integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ== http-deceiver@^1.2.7: version "1.2.7" @@ -4710,9 +4740,9 @@ json-schema-traverse@^1.0.0: integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug== json5@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" - integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow== + version "1.0.2" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.2.tgz#63d98d60f21b313b77c4d6da18bfa69d80e1d593" + integrity sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA== dependencies: minimist "^1.2.0" @@ -4782,9 +4812,9 @@ loader-runner@^4.2.0: integrity sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw== loader-utils@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.4.0.tgz#c579b5e34cb34b1a74edc6c1fb36bfa371d5a613" - integrity sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA== + version "1.4.2" + resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.4.2.tgz#29a957f3a63973883eb684f10ffd3d151fec01a3" + integrity sha512-I5d00Pd/jwMD2QCduo657+YM/6L3KZu++pmX9VFncxaxvHcru9jx1lBaFft+r4Mt2jK0Yhp41XlRAihzPxHNCg== dependencies: big.js "^5.2.2" emojis-list "^3.0.0" @@ -5125,9 +5155,9 @@ minimatch@^3.0.4: brace-expansion "^1.1.7" minimist@^1.2.0, minimist@^1.2.5: - version "1.2.5" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" - integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== + version "1.2.7" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18" + integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g== mkdirp@^0.5.5: version "0.5.5" @@ -5207,9 +5237,9 @@ node-fetch@2.6.7: whatwg-url "^5.0.0" node-forge@^1.2.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.2.1.tgz#82794919071ef2eb5c509293325cec8afd0fd53c" - integrity sha512-Fcvtbb+zBcZXbTTVwqGA5W+MKBj56UjVRevvchv5XrcyXbmNdesfZL37nlcWOfpgHhgmxApw3tQbTr4CqNmX4w== + version "1.3.0" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.0.tgz#37a874ea723855f37db091e6c186e5b67a01d4b2" + integrity sha512-08ARB91bUi6zNKzVmaj3QO7cr397uiDT2nJ63cHjyNtCTWIgvS47j3eT0WfzUwS9+6Z5YshRaoasFkXCKrIYbA== node-releases@^2.0.1: version "2.0.2" @@ -6838,11 +6868,6 @@ source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1: resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== -source-map@~0.7.2: - version "0.7.3" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383" - integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ== - sourcemap-codec@^1.4.4: version "1.4.8" resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4" @@ -7048,12 +7073,13 @@ terser-webpack-plugin@^5.1.3, terser-webpack-plugin@^5.2.4: terser "^5.7.2" terser@^5.10.0, terser@^5.7.2: - version "5.10.0" - resolved "https://registry.yarnpkg.com/terser/-/terser-5.10.0.tgz#b86390809c0389105eb0a0b62397563096ddafcc" - integrity sha512-AMmF99DMfEDiRJfxfY5jj5wNH/bYO09cniSqhfoyxc8sFoYIgkJy86G04UoZU5VjlpnplVu0K6Tx6E9b5+DlHA== + version "5.14.2" + resolved "https://registry.yarnpkg.com/terser/-/terser-5.14.2.tgz#9ac9f22b06994d736174f4091aa368db896f1c10" + integrity sha512-oL0rGeM/WFQCUd0y2QrWxYnq7tfSuKBiqTjRPWrRgB46WD/kiwHwF8T23z78H6Q6kGCuuHcPB+KULHRdxvVGQA== dependencies: + "@jridgewell/source-map" "^0.3.2" + acorn "^8.5.0" commander "^2.20.0" - source-map "~0.7.2" source-map-support "~0.5.20" text-table@^0.2.0: @@ -7154,9 +7180,9 @@ typedarray-to-buffer@^3.1.5: is-typedarray "^1.0.0" ua-parser-js@^0.7.30: - version "0.7.31" - resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.31.tgz#649a656b191dffab4f21d5e053e27ca17cbff5c6" - integrity sha512-qLK/Xe9E2uzmYI3qLeOmI0tEOt+TBBQyUIAh4aAgU05FVYzeZrKUdkAZfBNVGRaHVgV0TDkdEngJSw/SyQchkQ== + version "0.7.33" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.33.tgz#1d04acb4ccef9293df6f70f2c3d22f3030d8b532" + integrity sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw== unherit@^1.0.4: version "1.1.3"